\documentclass[12pt,titlepage]{article}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{amsthm}
\usepackage{mathtools}
\usepackage{graphicx}
\usepackage{color}
\usepackage{ucs}
\usepackage[utf8x]{inputenc}
\usepackage{xparse}
\usepackage{hyperref}
%----Macros----------
%
% Unresolved issues:
%
% \righttoleftarrow
% \lefttorightarrow
%
% \color{} with HTML colorspec
% \bgcolor
% \array with options (without options, it's equivalent to the matrix environment)
% Of the standard HTML named colors, white, black, red, green, blue and yellow
% are predefined in the color package. Here are the rest.
\definecolor{aqua}{rgb}{0, 1.0, 1.0}
\definecolor{fuschia}{rgb}{1.0, 0, 1.0}
\definecolor{gray}{rgb}{0.502, 0.502, 0.502}
\definecolor{lime}{rgb}{0, 1.0, 0}
\definecolor{maroon}{rgb}{0.502, 0, 0}
\definecolor{navy}{rgb}{0, 0, 0.502}
\definecolor{olive}{rgb}{0.502, 0.502, 0}
\definecolor{purple}{rgb}{0.502, 0, 0.502}
\definecolor{silver}{rgb}{0.753, 0.753, 0.753}
\definecolor{teal}{rgb}{0, 0.502, 0.502}
% Because of conflicts, \space and \mathop are converted to
% \itexspace and \operatorname during preprocessing.
% itex: \space{ht}{dp}{wd}
%
% Height and baseline depth measurements are in units of tenths of an ex while
% the width is measured in tenths of an em.
\makeatletter
\newdimen\itex@wd%
\newdimen\itex@dp%
\newdimen\itex@thd%
\def\itexspace#1#2#3{\itex@wd=#3em%
\itex@wd=0.1\itex@wd%
\itex@dp=#2ex%
\itex@dp=0.1\itex@dp%
\itex@thd=#1ex%
\itex@thd=0.1\itex@thd%
\advance\itex@thd\the\itex@dp%
\makebox[\the\itex@wd]{\rule[-\the\itex@dp]{0cm}{\the\itex@thd}}}
\makeatother
% \tensor and \multiscript
\makeatletter
\newif\if@sup
\newtoks\@sups
\def\append@sup#1{\edef\act{\noexpand\@sups={\the\@sups #1}}\act}%
\def\reset@sup{\@supfalse\@sups={}}%
\def\mk@scripts#1#2{\if #2/ \if@sup ^{\the\@sups}\fi \else%
\ifx #1_ \if@sup ^{\the\@sups}\reset@sup \fi {}_{#2}%
\else \append@sup#2 \@suptrue \fi%
\expandafter\mk@scripts\fi}
\def\tensor#1#2{\reset@sup#1\mk@scripts#2_/}
\def\multiscripts#1#2#3{\reset@sup{}\mk@scripts#1_/#2%
\reset@sup\mk@scripts#3_/}
\makeatother
% \slash
\makeatletter
\newbox\slashbox \setbox\slashbox=\hbox{$/$}
\def\itex@pslash#1{\setbox\@tempboxa=\hbox{$#1$}
\@tempdima=0.5\wd\slashbox \advance\@tempdima 0.5\wd\@tempboxa
\copy\slashbox \kern-\@tempdima \box\@tempboxa}
\def\slash{\protect\itex@pslash}
\makeatother
% math-mode versions of \rlap, etc
% from Alexander Perlis, "A complement to \smash, \llap, and lap"
% http://math.arizona.edu/~aprl/publications/mathclap/
\def\clap#1{\hbox to 0pt{\hss#1\hss}}
\def\mathllap{\mathpalette\mathllapinternal}
\def\mathrlap{\mathpalette\mathrlapinternal}
\def\mathclap{\mathpalette\mathclapinternal}
\def\mathllapinternal#1#2{\llap{$\mathsurround=0pt#1{#2}$}}
\def\mathrlapinternal#1#2{\rlap{$\mathsurround=0pt#1{#2}$}}
\def\mathclapinternal#1#2{\clap{$\mathsurround=0pt#1{#2}$}}
% Renames \sqrt as \oldsqrt and redefine root to result in \sqrt[#1]{#2}
\let\oldroot\root
\def\root#1#2{\oldroot #1 \of{#2}}
\renewcommand{\sqrt}[2][]{\oldroot #1 \of{#2}}
% Manually declare the txfonts symbolsC font
\DeclareSymbolFont{symbolsC}{U}{txsyc}{m}{n}
\SetSymbolFont{symbolsC}{bold}{U}{txsyc}{bx}{n}
\DeclareFontSubstitution{U}{txsyc}{m}{n}
% Manually declare the stmaryrd font
\DeclareSymbolFont{stmry}{U}{stmry}{m}{n}
\SetSymbolFont{stmry}{bold}{U}{stmry}{b}{n}
% Manually declare the MnSymbolE font
\DeclareFontFamily{OMX}{MnSymbolE}{}
\DeclareSymbolFont{mnomx}{OMX}{MnSymbolE}{m}{n}
\SetSymbolFont{mnomx}{bold}{OMX}{MnSymbolE}{b}{n}
\DeclareFontShape{OMX}{MnSymbolE}{m}{n}{
<-6> MnSymbolE5
<6-7> MnSymbolE6
<7-8> MnSymbolE7
<8-9> MnSymbolE8
<9-10> MnSymbolE9
<10-12> MnSymbolE10
<12-> MnSymbolE12}{}
% Declare specific arrows from txfonts without loading the full package
\makeatletter
\def\re@DeclareMathSymbol#1#2#3#4{%
\let#1=\undefined
\DeclareMathSymbol{#1}{#2}{#3}{#4}}
\re@DeclareMathSymbol{\neArrow}{\mathrel}{symbolsC}{116}
\re@DeclareMathSymbol{\neArr}{\mathrel}{symbolsC}{116}
\re@DeclareMathSymbol{\seArrow}{\mathrel}{symbolsC}{117}
\re@DeclareMathSymbol{\seArr}{\mathrel}{symbolsC}{117}
\re@DeclareMathSymbol{\nwArrow}{\mathrel}{symbolsC}{118}
\re@DeclareMathSymbol{\nwArr}{\mathrel}{symbolsC}{118}
\re@DeclareMathSymbol{\swArrow}{\mathrel}{symbolsC}{119}
\re@DeclareMathSymbol{\swArr}{\mathrel}{symbolsC}{119}
\re@DeclareMathSymbol{\nequiv}{\mathrel}{symbolsC}{46}
\re@DeclareMathSymbol{\Perp}{\mathrel}{symbolsC}{121}
\re@DeclareMathSymbol{\Vbar}{\mathrel}{symbolsC}{121}
\re@DeclareMathSymbol{\sslash}{\mathrel}{stmry}{12}
\re@DeclareMathSymbol{\bigsqcap}{\mathop}{stmry}{"64}
\re@DeclareMathSymbol{\biginterleave}{\mathop}{stmry}{"6}
\re@DeclareMathSymbol{\invamp}{\mathrel}{symbolsC}{77}
\re@DeclareMathSymbol{\parr}{\mathrel}{symbolsC}{77}
\makeatother
% \llangle, \rrangle, \lmoustache and \rmoustache from MnSymbolE
\makeatletter
\def\Decl@Mn@Delim#1#2#3#4{%
\if\relax\noexpand#1%
\let#1\undefined
\fi
\DeclareMathDelimiter{#1}{#2}{#3}{#4}{#3}{#4}}
\def\Decl@Mn@Open#1#2#3{\Decl@Mn@Delim{#1}{\mathopen}{#2}{#3}}
\def\Decl@Mn@Close#1#2#3{\Decl@Mn@Delim{#1}{\mathclose}{#2}{#3}}
\Decl@Mn@Open{\llangle}{mnomx}{'164}
\Decl@Mn@Close{\rrangle}{mnomx}{'171}
\Decl@Mn@Open{\lmoustache}{mnomx}{'245}
\Decl@Mn@Close{\rmoustache}{mnomx}{'244}
\makeatother
% Widecheck
\makeatletter
\DeclareRobustCommand\widecheck[1]{{\mathpalette\@widecheck{#1}}}
\def\@widecheck#1#2{%
\setbox\z@\hbox{\m@th$#1#2$}%
\setbox\tw@\hbox{\m@th$#1%
\widehat{%
\vrule\@width\z@\@height\ht\z@
\vrule\@height\z@\@width\wd\z@}$}%
\dp\tw@-\ht\z@
\@tempdima\ht\z@ \advance\@tempdima2\ht\tw@ \divide\@tempdima\thr@@
\setbox\tw@\hbox{%
\raise\@tempdima\hbox{\scalebox{1}[-1]{\lower\@tempdima\box
\tw@}}}%
{\ooalign{\box\tw@ \cr \box\z@}}}
\makeatother
% \mathraisebox{voffset}[height][depth]{something}
\makeatletter
\NewDocumentCommand\mathraisebox{moom}{%
\IfNoValueTF{#2}{\def\@temp##1##2{\raisebox{#1}{$\m@th##1##2$}}}{%
\IfNoValueTF{#3}{\def\@temp##1##2{\raisebox{#1}[#2]{$\m@th##1##2$}}%
}{\def\@temp##1##2{\raisebox{#1}[#2][#3]{$\m@th##1##2$}}}}%
\mathpalette\@temp{#4}}
\makeatletter
% udots (taken from yhmath)
\makeatletter
\def\udots{\mathinner{\mkern2mu\raise\p@\hbox{.}
\mkern2mu\raise4\p@\hbox{.}\mkern1mu
\raise7\p@\vbox{\kern7\p@\hbox{.}}\mkern1mu}}
\makeatother
%% Fix array
\newcommand{\itexarray}[1]{\begin{matrix}#1\end{matrix}}
%% \itexnum is a noop
\newcommand{\itexnum}[1]{#1}
%% Renaming existing commands
\newcommand{\underoverset}[3]{\underset{#1}{\overset{#2}{#3}}}
\newcommand{\widevec}{\overrightarrow}
\newcommand{\darr}{\downarrow}
\newcommand{\nearr}{\nearrow}
\newcommand{\nwarr}{\nwarrow}
\newcommand{\searr}{\searrow}
\newcommand{\swarr}{\swarrow}
\newcommand{\curvearrowbotright}{\curvearrowright}
\newcommand{\uparr}{\uparrow}
\newcommand{\downuparrow}{\updownarrow}
\newcommand{\duparr}{\updownarrow}
\newcommand{\updarr}{\updownarrow}
\newcommand{\gt}{>}
\newcommand{\lt}{<}
\newcommand{\map}{\mapsto}
\newcommand{\embedsin}{\hookrightarrow}
\newcommand{\Alpha}{A}
\newcommand{\Beta}{B}
\newcommand{\Zeta}{Z}
\newcommand{\Eta}{H}
\newcommand{\Iota}{I}
\newcommand{\Kappa}{K}
\newcommand{\Mu}{M}
\newcommand{\Nu}{N}
\newcommand{\Rho}{P}
\newcommand{\Tau}{T}
\newcommand{\Upsi}{\Upsilon}
\newcommand{\omicron}{o}
\newcommand{\lang}{\langle}
\newcommand{\rang}{\rangle}
\newcommand{\Union}{\bigcup}
\newcommand{\Intersection}{\bigcap}
\newcommand{\Oplus}{\bigoplus}
\newcommand{\Otimes}{\bigotimes}
\newcommand{\Wedge}{\bigwedge}
\newcommand{\Vee}{\bigvee}
\newcommand{\coproduct}{\coprod}
\newcommand{\product}{\prod}
\newcommand{\closure}{\overline}
\newcommand{\integral}{\int}
\newcommand{\doubleintegral}{\iint}
\newcommand{\tripleintegral}{\iiint}
\newcommand{\quadrupleintegral}{\iiiint}
\newcommand{\conint}{\oint}
\newcommand{\contourintegral}{\oint}
\newcommand{\infinity}{\infty}
\newcommand{\bottom}{\bot}
\newcommand{\minusb}{\boxminus}
\newcommand{\plusb}{\boxplus}
\newcommand{\timesb}{\boxtimes}
\newcommand{\intersection}{\cap}
\newcommand{\union}{\cup}
\newcommand{\Del}{\nabla}
\newcommand{\odash}{\circleddash}
\newcommand{\negspace}{\!}
\newcommand{\widebar}{\overline}
\newcommand{\textsize}{\normalsize}
\renewcommand{\scriptsize}{\scriptstyle}
\newcommand{\scriptscriptsize}{\scriptscriptstyle}
\newcommand{\mathfr}{\mathfrak}
\newcommand{\statusline}[2]{#2}
\newcommand{\tooltip}[2]{#2}
\newcommand{\toggle}[2]{#2}
% Theorem Environments
\theoremstyle{plain}
\newtheorem{theorem}{Theorem}
\newtheorem{lemma}{Lemma}
\newtheorem{prop}{Proposition}
\newtheorem{cor}{Corollary}
\newtheorem*{utheorem}{Theorem}
\newtheorem*{ulemma}{Lemma}
\newtheorem*{uprop}{Proposition}
\newtheorem*{ucor}{Corollary}
\theoremstyle{definition}
\newtheorem{defn}{Definition}
\newtheorem{example}{Example}
\newtheorem*{udefn}{Definition}
\newtheorem*{uexample}{Example}
\theoremstyle{remark}
\newtheorem{remark}{Remark}
\newtheorem{note}{Note}
\newtheorem*{uremark}{Remark}
\newtheorem*{unote}{Note}
%-------------------------------------------------------------------
\begin{document}
%-------------------------------------------------------------------
\section*{Blog - network theory (part 19)}
This page is a [[Blog articles in progress|blog article in progress]], written by [[John Baez]] and [[Jacob Biamonte]]. To see discussions of this article while it was being written, go to the \href{http://forum.azimuthproject.org/discussion/1015/blog-network-theory-part-19/#Item_1}{Azimuth Forum}. For the final polished version, visit the \href{http://johncarlosbaez.wordpress.com/2012/07/18/network-theory-part-19/}{Azimuth Blog}.
\emph{joint with [[Jacob Biamonte]]}
It's time to resume the series! It's been a long time, so we'll assume you forgot everything we've said before, and make this post as self-contained as possible. we started looking at a simple example: a diatomic gas.
Two atoms can recombine to form a diatomic molecule:
\begin{displaymath}
A + A \to A_2
\end{displaymath}
and conversely, a diatomic molecule can break apart into two atoms:
\begin{displaymath}
A_2 \to A + A
\end{displaymath}
We can draw both these reactions using a Petri net:
where we're writing $B$ instead of $A_2$ to abstract away some detail that's just distracting here. Or, equivalently, we can use a chemical reaction network:
Last time we looked at the for this chemical reaction network, and found equilibrium solutions of that equation. Now let's look at the , and find equilibrium solutions of that. This will serve as a review of three big theorems.
We'll start from scratch, in case you're just tuning in. The master equation is all about how atoms or molecules or rabbits or wolves or other things interact randomly and turn into other things. So, let's write write $\psi_{m,n}$ for the probability that we have $m$ atoms of $A$ and $n$ molecule of $B$ in our container. These probabilities are functions of time, and master equation will say how they change.
First we need to pick a for each reaction. Let's say the rate constant for this reaction is some number $\alpha > 0$:
\begin{displaymath}
A + A \to B
\end{displaymath}
while the rate constant for this reaction is some number $\beta > 0$:
\begin{displaymath}
B \to A + A
\end{displaymath}
Before we make it pretty using the ideas we've been explaining, the master equation says
\begin{displaymath}
\displaystyle{ \frac{d}{d t} \psi_{m,n} (t)} = \alpha (m+2)(m+1)\psi_{m+2,n-1}(t) - \alpha m(m-1) \psi_{m,n}(t) +
\beta (n+1) \psi_{m-2,n+1}(t) - \beta n \psi_{m,n}(t)
\end{displaymath}
where we define $\psi_{i,j}$ to be zero if either $i$ or $j$ is negative.
Yuck! Normally we don't show you such nasty equations. Indeed the whole point of our work has been to show you that by packaging the equations in a better way, we can understand them using high-level concepts instead of mucking around with millions of scribbled symbols. But we thought we'd show you what's secretly lying behind our beautiful abstract formalism, just once.
Each term has a meaning. For example, the first one:
\begin{displaymath}
\alpha (m+2)(m+1)\psi_{m+2,n-1}(t)
\end{displaymath}
means that the reaction $A + A \to B$ will tend to increase the probability of there being $m$ atoms of $A$ and $n$ molecules of $B$ if we start with 2 more $A$`s and 1 fewer $B$. This reaction can happen in $(m+2)(m+1)$ ways if we start with $m+2$ atoms of $A$. And it happens at a probabilistic rate proportional to the rate constant for this reaction, $\alpha$.
We won't go through the rest of the terms. It's a good exercise to do so, but there could easily be a typo in the formula, since it's so long and messy. So let us know if you find one!
To simplify this mess, the key trick is to introduce a that summarizes all the probabilities in a single power series:
\begin{displaymath}
\Psi = \sum_{m,n \ge 0} \psi_{m,n} y^m \, z^n
\end{displaymath}
It's a power series in two variables, $y$ and $z$, since we have two chemical species: $A$`s and $B$'s.
Using this trick, the master equation looks like
\begin{displaymath}
\frac{d}{d t} \Psi(t) = H \Psi(t)
\end{displaymath}
where the $H$ is a sum of terms, one for each reaction. This Hamiltonian is built from operators that annihilate and create $A$`s and $B$'s. The annihilation and creation operators for $A$ atoms are:
\begin{displaymath}
\displaystyle{ a = \frac{\partial}{\partial y} , \qquad a^\dagger = y }
\end{displaymath}
The annihilation operator differentiates our power series with respect to the variable $y$. The creation operator multiplies it by that variable. Similarly, the annihilation and creation operators for $B$ molecules are:
\begin{displaymath}
\displaystyle{ b = \frac{\partial}{\partial z} , \qquad b^\dagger = z }
\end{displaymath}
In we explained a recipe that lets us stare at our chemical reaction network and write down this Hamiltonian:
\begin{displaymath}
H = \alpha (b^\dagger a^2 - {a^\dagger}^2 a^2) + \beta ({a^\dagger}^2 b - b^\dagger b)
\end{displaymath}
As promised, there's one term for each reaction. But each term is itself a sum of two: one that increases the probability that our container of chemicals will be in a new state, and another that decreases the probability that it's in its original state. We get a total of four terms, which correspond to the four terms in our previous way of writing the master equation.
\textbf{Puzzle 1.} Show that this new way of writing the master equation is equivalent to the previous one.
Now we will look for all solutions of the master equation: in other words, solutions that don't change with time. So, we're trying to solve
\begin{displaymath}
H \Psi = 0
\end{displaymath}
Given the rather complicated form of the Hamiltonian, this seems tough. The challenge looks more concrete but perhaps more scary if we go back to our original formulation. We're looking for probabilities $\psi_{m,n}$, nonnegative numbers that sum to one, such that
\begin{displaymath}
\alpha (m+2)(m+1)\psi_{m+2,n-1}- \alpha m(m-1) \psi_{m,n} + \beta (n+1) \psi_{m-2,n+1} - \beta n \psi_{m,n} = 0
\end{displaymath}
for all $m$ and $n$.
This equation looks rather horrid, but the good news is that it's , so a linear combination of solutions is again a solution. This lets us simplify the problem using a conserved quantity.
Clearly, there's a quantity that the reactions here don't change:
What's that? It's the number of $A$`s plus the number of $B$'s. After all, a $B$ can turn into two $A$'s, or vice versa.
(Of course the secret reason is that $B$ is a diatomic molecule made of two $A$`s. But you'd be able to follow the logic here even if you didn't know that, just by looking at the chemical reaction network\ldots{} and sometimes this more abstract approach is handy! Indeed, the way chemists first discovered that certain molecules are made of certain atoms is by seeing which reactions were possible and which weren't.)
Suppose we start in a situation where we know that the number of $B$`s plus twice the number of $A$'s equals some number $k$:
\begin{displaymath}
\psi_{m,n} = 0 \; unless \; m+2n = k
\end{displaymath}
Then we know $\Psi$ is initially of the form
\begin{displaymath}
\Psi = \sum_{m+2n = k} \psi_{m,n} y^m z^n
\end{displaymath}
But since the number of $A$`s plus twice the number of $B$'s is conserved, if $\Psi$ obeys the master equation it will to be of this form!
Put a fancier way, we know that if a solution of the master equation starts in this subspace:
\begin{displaymath}
L_k = \{ \Psi: \; \Psi = \sum_{m+2n = k} \psi_{m,n} y^m z^n \; for \; some \; \psi_{m,n} \}
\end{displaymath}
it will in this subspace. So, because the master equation is linear, we can take any solution $\Psi$ and write it as a linear combination of solutions $\Psi_k$, one in each subspace $L_k$.
In particular, we can do this for an equilibrium solution $\Psi$. And then all the solutions $\Psi_k$ are also equilibrium solutions: they're linearly independent, so if one of them changed with time, $\Psi$ would too.
This means we can just look for equilibrium solutions in the subspaces $L_k$. If we find these, we can get equilibrium solutions by taking linear combinations.
Once we've noticed that, our horrid equation makes a bit more sense:
\begin{displaymath}
\alpha (m+2)(m+1)\psi_{m+2,n-1}- \alpha m(m-1) \psi_{m,n} + \beta (n+1) \psi_{m-2,n+1} - \beta n \psi_{m,n} = 0
\end{displaymath}
Note that if the pair of subscripts $m, n$ obey $m + 2n = k$, the same is true for the other pairs of subscripts here. So our equation relates the values of $\psi_{m,n}$ for all choices of $m,n$ lying on this line segment:
\begin{displaymath}
2m+n = k , \qquad m ,n \ge 0
\end{displaymath}
If you think about it a minute, you'll see that if we know two of these values, we can keep using our equation to recursively work out all the rest. So, there are linearly independent equilibrium solutions of the master equation in each subspace $L_k$.
Why two? Why not two? Well, we have to be a bit careful about what happens at the ends of the line segment: remember that $\psi_{m,n}$ is defined to be zero when $m$ or $n$ becomes negative. If we think very hard about this, we'll see there's just linearly independent equilibrium solution of the master equation in each subspace $L_k$. But this is the sort of nitty-gritty calculation that's not fun to watch someone else do, so we won't bore you with that.
Soon we'll move on to a more high-level approach to this problem. But first, one remark. Our horrid equation
\begin{displaymath}
\alpha (m+2)(m+1)\psi_{m+2,n-1}- \alpha m(m-1) \psi_{m,n} + \beta (n+1) \psi_{m-2,n+1} - \beta n \psi_{m,n} = 0
\end{displaymath}
resembles the usual discretized form of the equation
\begin{displaymath}
\displaystyle {\frac{d^2 \psi}{d x^2} = 0 }
\end{displaymath}
namely:
\begin{displaymath}
\psi_{n-1} - 2 \psi_{n} + \psi_{n+1} = 0
\end{displaymath}
And this makes sense, since we get
\begin{displaymath}
\displaystyle {\frac{d^2 \psi}{d x^2} = 0 }
\end{displaymath}
by taking the :
\begin{displaymath}
\displaystyle \frac{\partial \psi}{\partial t} = {\frac{\partial^2 \psi}{\partial x^2} }
\end{displaymath}
and assuming $\psi$ doesn't depend on time. So what we're doing is a lot like looking for equilibrium solutions of the heat equation.
This makes perfect sense, since the heat equation describes how heat smears out as little particles of heat randomly move around. True, there don't really exist `little particles of heat', but the heat equation also describes the diffusion of any other kind of particles as they randomly move around undergoing Brownian motion. Similarly, our master equation describes a random walk on this line segment:
\begin{displaymath}
m+2n = k , \qquad m , n \ge 0
\end{displaymath}
or more precisely, the points on this segment with integer coordinates. The equilibrium solutions arise when the probabilities $\psi_{m,n}$ have diffused as much as possible.
If you think about it this way, it should be physically obvious that there's just linearly independent equilibrium solution of the master equation for each value of $k$.
There's a general moral here, too, which we're seeing in a special case: the master equation for a chemical reaction network really describes a bunch of random walks, one for each allowed value of the conserved quantities that can be built as linear combinations of number operators. In our case we have one such conserved quantity, but in general there may be more (or none). Futhermore, these `random walks' are what we've been calling .
We simplified our task of finding equilibrium solutions of the master equation by finding a conserved quantity. The idea of simplifying problems using conserved quantities is fundamental to physics: this is why physicists are so enamored with quantities like energy, momentum, angular momentum and so on.
Nowadays physicists often use `Noether's theorem' to get conserved quantities from symmetries. There's a very simple version of Noether's theorem for quantum mechanics, but in we saw a version for stochastic mechanics, and it's that version that is relevant now. Here's a paper which explains it in detail:
$\bullet$ John Baez and Brendan Fong, .
We don't really need Noether's theorem now, since we found the conserved quantity and exploited it without even noticing the symmetry. Nonetheless it's interesting to see how it relates to what we're doing.
For the reaction we're looking at now, the idea is that the subspaces $L_k$ are eigenspaces of an operator that commutes with the Hamiltonian $H$. It follows from standard math that a solution of the master equation that starts in one of these subspaces, stays in that subspace.
What is this operator? It's built from `number operators'. The for $A$`s is
\begin{displaymath}
N_A = a^\dagger a
\end{displaymath}
and the number operator for $B$`s is
\begin{displaymath}
N_B = b^\dagger b
\end{displaymath}
A little calculation shows
\begin{displaymath}
N_A \,y^m z_2^n = m \, y^m z^n, \quad \qquad N_B\, y^m z^n = m \,y^m z^n
\end{displaymath}
so the eigenvalue of $N_A$ is the number of $A$`s, while the eigenvalue of $N_B$ is the number of $B$'s. This is why they're called number operators.
As a consequence, the eigenvalue of the operator $N_A + 2N_B$ is the number of $A$`s plus twice the number of $B$'s:
\begin{displaymath}
(N_A + 2N_B) \, y^m z^n = (m + 2n) \, y^m z^n
\end{displaymath}
Let's call this operator $O$, since it's so important:
\begin{displaymath}
O = N_A + 2N_B
\end{displaymath}
If you think about it, the spaces $L_k$ we saw a minute ago are precisely the eigenspaces of this operator:
\begin{displaymath}
L_k = \{ \Psi : \; O \Psi = k \Psi \}
\end{displaymath}
As we've seen, solutions of the master equation that start in one of these eigenspaces will stay there. This lets take some techniques that are very familiar in quantum mechanics, and apply them to this stochastic situation.
First of all, time evolution as described by the master equation is given by the operators $\exp(t H)$. In other words,
\begin{displaymath}
\displaystyle{ \frac{d}{d t} \Psi(t) } = H \Psi(t) \; and \; \Psi(0) = \Phi \quad \Rightarrow \quad \Psi(t) = \exp(t H) \Phi
\end{displaymath}
Thus if $\Phi$ is an eigenvector of $O$, so is $\exp(t H) \Phi$, with the same eigenvalue. In other words,
\begin{displaymath}
O \Phi = k \Phi
\end{displaymath}
implies
\begin{displaymath}
O \exp(t H) \Phi = k \exp(t H) \Phi = \exp(t H) O \Phi
\end{displaymath}
But since we can choose a basis consisting of eigenvectors of $O$, we must have
\begin{displaymath}
O \exp(t H) = \exp(t H) O
\end{displaymath}
or, throwing caution to the winds and differentiating:
\begin{displaymath}
O H = H O
\end{displaymath}
So, as we'd expect from Noether's theorem, our conserved quantity commutes with the Hamiltonian! This in turn implies that $H$ commutes with any polynomial in $O$, which in turn suggests that
\begin{displaymath}
\exp(s O) H = H \exp(s O)
\end{displaymath}
and also
\begin{displaymath}
\exp(s O) \exp(t H) = \exp(t H) \exp(s O)
\end{displaymath}
The last equation says that $O$ generates a 1-parameter family of `symmetries' $\exp(s O)$: operators that commute with time evolution. But what do these symmetries actually do? Since
\begin{displaymath}
O y^m z^n = (m + 2n) y^m z^n
\end{displaymath}
we have
\begin{displaymath}
\exp(s O) y^m z^n = e^{s(m + 2n)}\, y^m z^n
\end{displaymath}
So, this symmetry takes any probability distribution $\psi_{m,n}$ and multiplies it by $e^{s(m + 2n)}$.
In other words, our symmetry multiplies the relative probability of finding our container of gas in a given state by a factor of $e^s$ for each $A$ atom, and by a factor of $e^{2s}$ for each $B$ molecule. It might not seem obvious that this operation commutes with time evolution! However, experts on chemical reaction theory are familiar with this fact.
Finally, a couple of technical points. Starting where we said `throwing caution to the winds', our treatment has not been rigorous, since $O$ and $H$ are unbounded operators, and these must be handled with caution. Nonetheless, all the commutation relations we wrote down are true.
It's also true that $\exp(s O)$ is unbounded for positive $s$. It's bounded for negative $s$, but even then doesn't map probability distributions to probability distributions. However, it does map any nonzero vector $\Psi$ with $\psi_{m,n} \ge 0$ to a vector $\exp(s O) \Psi$ with the same properties. So, we can just normalize this vector and get a probability distribution. This normalization is why we introduced the concept of probabilities.
Now we'll actually find solutions of the master equation in closed form. To understand this final section, you really do need to remember some things we've discussed earlier. we considered the same chemical reaction network we're studying today, but we looked at its rate equation. Our notation for the rate constants was different then---sorry---but if we call those constants $\alpha$ and $\beta$ as we're doing now, the rate equation looks like this:
\begin{displaymath}
\displaystyle{ \frac{d}{d t} x_1 = 2 \alpha x_2 - 2 \beta x_1^2}
\end{displaymath}
\begin{displaymath}
\displaystyle{ \frac{d}{d t} x_2 = - \alpha x_2 + \beta x_1^2 }
\end{displaymath}
This describes how the number of $A$`s and $B$'s changes in the limit where there are lots of them and we can treat them as varying continuously, in a deterministic way. The number $A$'s is $x_1$, and the number of $B$'s is $x_2$.
We saw that the quantity
\begin{displaymath}
x_1 + 2 x_2
\end{displaymath}
is conserved, just as now we're seeing that $N_A + 2 N_B$ is conserved. We saw that the rate equation has one equilibrium solution for each choice of $x_1 + 2 x_2$. And we saw that these equilibrium solutions obey
\begin{displaymath}
\frac{x_1^2}{x_2} = \frac{\alpha}{\beta}
\end{displaymath}
Now the Anderson-Craciun-Kurtz theorem, introduced in , is a powerful result that gets equilibrium solution of the master equation from equilibrium solutions of the rate equation. It only applies to equilibrium solutions that are `complex balanced', but that's okay:
\textbf{Puzzle 2.} Show that the equilibrium solutions of the rate equation for the chemical reaction network
are complex balanced.
So, given any equilibrium solution $(x_1,x_2)$ of our rate equation, we can hit it with the Anderson-Craciun-Kurtz theorem and get an equilibrium solution of the master equation! And it looks like this:
\begin{displaymath}
\displaystyle{ \Psi = e^{-(x_1 + x_2)} \, \sum_{m,n \ge 0} \frac{x_1^m x_2^n} {m! n! } \, y^m z^n }
\end{displaymath}
In this solution, the probability distribution
\begin{displaymath}
\psi_{m,n} = e^{-(x_1 + x_2)} \, \frac{x_1^m x_2^n} {m! n! }
\end{displaymath}
is a product of Poisson distributions. The factor in front is there to make the numbers $\psi_{m,n}$ add up to one. And remember, $x_1, x_2$ are any nonnegative numbers with $x_1^2/x_2 = \alpha/\beta$.
So, using the Anderson-Craciun-Kurtz theorem, we get an explicit closed-form solution of the horrid equation
\begin{displaymath}
\alpha (m+2)(m+1)\psi_{m+2,n-1}- \alpha m(m-1) \psi_{m,n} + \beta (n+1) \psi_{m-2,n+1} - \beta n \psi_{m,n} = 0
\end{displaymath}
with very little work! This is why this theorem is so nice. And of course we're looking at a very simple reaction network: for more complicated ones it becomes even better to use this theorem to avoid painful calculations.
category:blog
\end{document}