Chapter 2
2.1
$$
\begin{pmatrix}
1 \\\
-1
\end{pmatrix}
+
\begin{pmatrix}
1 \\\ 2
\end{pmatrix}
-
\begin{pmatrix}
2 \\\ 1
\end{pmatrix}
=0
$$
2.2
$$
\begin{eqnarray}
&A&:V\rightarrow V,\,\, \mathrm{basis}:\left| 0 \right\rangle, \left| 1 \right\rangle \\
&A&\left| 0 \right\rangle=\left| 0 \right\rangle,\,\, A\left| 1 \right\rangle = \left| 1 \right\rangle \\
\end{eqnarray}
$$
that is,
$$
\begin{eqnarray}
&A&\left| 0 \right\rangle = \left| 0 \right\rangle = 1\left| 0 \right\rangle + 0\left| 1 \right\rangle \\
&A&\left| 1 \right\rangle = \left| 1 \right\rangle = 0\left| 0 \right\rangle + 1\left| 1 \right\rangle.
\end{eqnarray}
$$
Thus,
$$
\begin{equation}
A =
\begin{pmatrix}
1 & 0 \\
0 & 1
\end{pmatrix}
\end{equation}
$$
2.3
$$
\begin{equation}
A:V\rightarrow W,\,\,B:W\rightarrow X
\end{equation}
$$
From eq.(2.12),
$$
\begin{equation}
A\left| v_j \right\rangle = \sum_i A_i {}_j \left| w_i \right\rangle, \,\, B\left| w_i \right\rangle = \sum_k B_k {}_i \left| x_k \right\rangle .
\end{equation}
$$
Then,
$$
\begin{eqnarray}
B\left( A\left| v_j \right\rangle \right) &=& B \left( \sum_i A_i {}_j \left| w_i \right\rangle \right) \\
&=& \sum_i A_i {}_j \sum_k B_k {}_i \left| x_k \right\rangle \\
&=& \sum_i \sum_k B_k {}_i A_i {}_j \left| x_k \right\rangle \\
&=& \sum_k \left( \sum_i B_k {}_i A_i {}_j \right) \left| x_k \right\rangle \\
&=& \sum_k \left( BA \right)_k {}_j \left| x_k \right\rangle
\end{eqnarray}
$$
2.4
$$
\begin{eqnarray}
&A&:V\rightarrow V,\,\, \mathrm{basis}:\left| 0 \right\rangle, \left| 1 \right\rangle \\
&A&\left| 0 \right\rangle=\left| 0 \right\rangle,\,\, A\left| 1 \right\rangle = \left| 1 \right\rangle \\
\end{eqnarray}
$$
that is,
$$
\begin{eqnarray}
&A&\left| 0 \right\rangle = \left| 0 \right\rangle = 1\left| 0 \right\rangle + 0\left| 1 \right\rangle \\
&A&\left| 1 \right\rangle = \left| 1 \right\rangle = 0\left| 0 \right\rangle + 1\left| 1 \right\rangle.
\end{eqnarray}
$$
Thus,
$$
\begin{equation}
A =
\begin{pmatrix}
1 & 0 \\
0 & 1
\end{pmatrix}
\end{equation}
$$
2.5
Put
$$
\begin{eqnarray}
&\left| v \right\rangle = \left( v_1\; v_2\; \cdots\; v_n \right)^T \\
&\left| w \right\rangle = \left( w_1\; w_2\; \cdots\; w_n \right)^T
\end{eqnarray}
$$
For (1),
$$
\begin{eqnarray}
\left( \left| v \right\rangle,\, \sum_i \lambda_i \left| w_i \right\rangle \right) &=& \sum_j v^*_j \left( \sum_i \lambda_iw_i {}_j \right) \\
&=& \sum_i \lambda_i \left( \sum_j v^{*}_j w_i {}_j \right) \\
&=& \sum_i \lambda_i \left( \left| v \right\rangle,\, \left| w_i \right\rangle \right).
\end{eqnarray}
$$
For (2),
$$
\begin{eqnarray}
\left( \left| v \right\rangle,\, \left| w \right\rangle \right) &=& \sum_i v^*_i w_i \\
&=& \sum_i \left( w^{*}_i v_i\right)^{*} \\
&=& \left( \left| w \right\rangle,\, \left| v \right\rangle \right)^{*}.
\end{eqnarray}
$$
For (3),
$$
\begin{eqnarray}
\left( \left| v \right\rangle,\, \left| v \right\rangle \right) &=& \sum_i v^*_i v_i \\
&=& \sum_i |v_i|^2 \ge 0.
\end{eqnarray}
$$
2.6
$$
\begin{eqnarray}
\left( \sum_i \lambda_i \left| w_i \right\rangle,\, \left| v \right\rangle \right) &=& \left( \left| v \right\rangle,\, \sum_i \lambda_i \left| w_i \right\rangle \right)^* \\
&=& \left( \sum_i \lambda_i \left( \left| v \right\rangle,\, \left| w_i \right\rangle \right) \right)^* \\
&=& \sum_i \lambda^*_i \left( \left| v \right\rangle,\, \left| w_i \right\rangle \right)^* \\
&=& \sum_i \lambda^*_i \left( \left| w_i \right\rangle,\, \left| v \right\rangle \right)
\end{eqnarray}
$$
2.7
$$
\begin{equation}
\left( \left| v \right\rangle,\, \left| w \right\rangle \right) = \left\langle v | w \right\rangle = 1\times 1 + 1\times (-1) = 0.
\end{equation}
$$
Normalized forms of $\left| v \right\rangle$ and $\left| w \right\rangle$ are,
$$
\begin{eqnarray}
&{}&\frac{\left| v \right\rangle}{\left| \left| v \right\rangle \right|} = \frac{\left| v \right\rangle}{\sqrt{\left\langle v | v \right\rangle}} = \frac{\left| v \right\rangle}{\sqrt{2}},\\
&{}&\frac{\left| w \right\rangle}{\left| \left| w \right\rangle \right|} = \frac{\left| w \right\rangle}{\sqrt{\left\langle w | w \right\rangle}} = \frac{\left| w \right\rangle}{\sqrt{2}}.
\end{eqnarray}
$$
2.8
Use mathematical induction to prove it.
For k = 1,
$$
\begin{eqnarray}
\left| v_1 \right\rangle &=& \frac{\left| w_1 \right\rangle}{\left| \left| w_1 \right\rangle \right|}\\\
\left| v_2 \right\rangle &=& \frac{\left| w_2 \right\rangle - \left\langle v_1 | w_2 \right\rangle \left| v_1 \right\rangle}{\left| \left| w_2 \right\rangle - \left\langle v_1 | w_2 \right\rangle \left| v_1 \right\rangle \right|} \\\
\left\langle v_1 | v_2 \right\rangle &=& \frac{\left\langle v_1 | w_2 \right\rangle - \left\langle v_1 | w_2 \right\rangle \left\langle v_1 | v_1 \right\rangle}{\left| \left| w_1 \right\rangle \right| \left| \left| w_2 \right\rangle - \left\langle v_1 | w_2 \right\rangle \left| v_1 \right\rangle \right|} = 0
\end{eqnarray}
$$
Thus, $\left| v_1 \right\rangle \perp \left| v_2 \right\rangle$.
Assume it is true for $k = k’,( k’ \le d-1)$. Then, for $k=k'+1$,
$$
\begin{eqnarray}
\left| v_{k'+1} \right\rangle &=& \frac{\left| w_{k'+1} \right\rangle - \sum^{k'}_i \left\langle v_i | w_{k'+1} \right\rangle \left| v_i \right\rangle}{\left| \left| w_{k'+1} \right\rangle - \sum^{k'}_i \left\langle v_i | w_{k'+1} \right\rangle \left| v_i \right\rangle \right|} \\
\left\langle v_{k'} | v_{k'+1} \right\rangle &=& \frac{\left\langle v_{k'} | w_{k'+1} \right\rangle - \sum^{k'}_i \left\langle v_i | w_{k'+1} \right\rangle \left\langle v_k' | v_i \right\rangle}{\left| \left| w_{k'} \right\rangle - \sum^{k'-1}_i \left\langle v_i | w_{k'} \right\rangle \left| v_i \right\rangle \right| \left| \left| w_{k'+1} \right\rangle - \sum^{k'}_i \left\langle v_i | w_{k'+1} \right\rangle \left| v_i \right\rangle \right|} = 0.
\end{eqnarray}
$$
Thus, $\left| v_{k’} \right\rangle \perp\left| v_{k'+1} \right\rangle$.
Therefore, the Gram-Schmidt procedure produces an orthonormal basis for $V$.
2.9
$$
\begin{eqnarray}
\sigma_0 &=&= \sum^1_{i,j=0} \left\langle j | \sigma_0 | i \right\rangle \left| j \right\rangle \left\langle i \right| = \left| 0 \right\rangle \left\langle 0 \right| + \left| 1 \right\rangle \left\langle 1 \right| \\
\sigma_1 &=& \left| 0 \right\rangle \left\langle 1 \right| + \left| 1 \right\rangle \left\langle 0 \right| \\
\sigma_2 &=& -i \left| 0 \right\rangle \left\langle 1 \right| + i\left| 1 \right\rangle \left\langle 0 \right| \\
\sigma_3 &=& \left| 0 \right\rangle \left\langle 0 \right| - \left| 1 \right\rangle \left\langle 1 \right|
\end{eqnarray}
$$
2.10
Define $T$ as
$$
\left| v_j \right\rangle \left\langle v_k \right| \left( v_i \right) \equiv T\left( \left| v_i \right\rangle \right).
$$
Then, $T\left( \left| v_1 \right\rangle \right), T\left( \left| v_2 \right\rangle \right), \cdots, T\left( \left| v_d \right\rangle \right)$ is describes as
$$
\left( T\left( \left| v_1 \right\rangle \right),\, T\left( \left| v_2 \right\rangle \right),\, \cdots,\, T\left( \left| v_d \right\rangle \right) \right) = \left( \left( \left| v_1 \right\rangle \right),\, \left( \left| v_2 \right\rangle \right),\, \cdots,\, \left( \left| v_d \right\rangle \right) \right)A
$$
where $A$ is the matrix representation of $T$.
Therefore,
$$
A_{lm} = \delta_{lj} \delta_{mk}
$$
2.11
(1): X
$$
\mathrm{det}\left( X - \lambda I \right) =
\begin{vmatrix}
-\lambda & 1 \\
1 & -\lambda
\end{vmatrix}
= \lambda^2-1 = 0\;\;\;\;\therefore \lambda = \pm 1
$$
For $\lambda = -1$,
$$
\begin{pmatrix}
1 & 1 \\
1 & 1
\end{pmatrix}
\begin{pmatrix}
a_1 \\ b_1
\end{pmatrix}
=
\begin{pmatrix}
0 \\ 0
\end{pmatrix}.
$$
This yields an eigenvector
$$
\left| \lambda_{-1} \right\rangle =
\begin{pmatrix}
a_1 \\ b_1
\end{pmatrix}
= \frac{1}{\sqrt{2}}
\begin{pmatrix}
1 \\ -1
\end{pmatrix}
= \frac{1}{\sqrt{2}} \left( \left| 0 \right\rangle - \left| 1 \right\rangle \right).
$$
For $\lambda = 1$,
$$
\begin{pmatrix}
-1 & 1 \\
1 & -1
\end{pmatrix}
\begin{pmatrix}
a_2 \\ b_2
\end{pmatrix}
=
\begin{pmatrix}
0 \\ 0
\end{pmatrix}.
$$
This yields an eigenvector
$$
\left| \lambda_1 \right\rangle =
\begin{pmatrix}
a_2 \\ b_2
\end{pmatrix}
= \frac{1}{\sqrt{2}}
\begin{pmatrix}
1 \\ 1
\end{pmatrix}
= \frac{1}{\sqrt{2}} \left( \left| 0 \right\rangle + \left| 1 \right\rangle \right).
$$
Therefore,
$$
\begin{eqnarray}
X &=& \lambda_{-1} \left| \lambda_{-1} \right\rangle \left\langle \lambda_{-1} \right| + \lambda_1 \left| \lambda_1 \right\rangle \left\langle \lambda_1 \right| \\
&=& \frac{1}{2} \left( \left| 0 \right\rangle + \left| 1 \right\rangle \right) \left( \left\langle 0 \right| + \left\langle 1 \right|\right) - \frac{1}{2} \left( \left| 0 \right\rangle - \left| 1 \right\rangle \right) \left( \left\langle 0 \right| - \left\langle 1 \right| \right)
\end{eqnarray}
$$
(2): Y
$$
\mathrm{det}\left( Y - \lambda I \right) =
\begin{vmatrix}
-\lambda & -i \\
i & -\lambda
\end{vmatrix}
= \lambda^2-1 = 0\;\;\;\;\therefore \lambda = \pm 1
$$
For $\lambda = -1$,
$$
\begin{pmatrix}
1 & -i \\
i & 1
\end{pmatrix}
\begin{pmatrix}
a_1 \\ b_1
\end{pmatrix}
=
\begin{pmatrix}
0 \\ 0
\end{pmatrix}.
$$
This yields an eigenvector
$$
\left| \lambda_{-1} \right\rangle =
\begin{pmatrix}
a_1 \\ b_1
\end{pmatrix}
= \frac{1}{\sqrt{2}}
\begin{pmatrix}
1 \\ -i
\end{pmatrix}
= \frac{1}{\sqrt{2}} \left( \left| 0 \right\rangle - i \left| 1 \right\rangle \right).
$$
For $\lambda = 1$,
$$
\begin{pmatrix}
-1 & -i \\
i & -1
\end{pmatrix}
\begin{pmatrix}
a_2 \\ b_2
\end{pmatrix}
=
\begin{pmatrix}
0 \\ 0
\end{pmatrix}.
$$
This yields an eigenvector
$$
\left| \lambda_1 \right\rangle =
\begin{pmatrix}
a_2 \\ b_2
\end{pmatrix}
= \frac{1}{\sqrt{2}}
\begin{pmatrix}
1 \\ i
\end{pmatrix}
= \frac{1}{\sqrt{2}} \left( \left| 0 \right\rangle + i \left| 1 \right\rangle \right).
$$
Therefore,
$$
\begin{eqnarray}
Y &=& \lambda_{-1} \left| \lambda_{-1} \right\rangle \left\langle \lambda_{-1} \right| + \lambda_1 \left| \lambda_1 \right\rangle \left\langle \lambda_1 \right| \\
&=& \frac{1}{2} \left( \left| 0 \right\rangle + i \left| 1 \right\rangle \right) \left( \left\langle 0 \right| + i \left\langle 1 \right| \right) - \frac{1}{2} \left( \left| 0 \right\rangle -i \left| 1 \right\rangle \right) \left( \left\langle 0 \right| -i \left\langle 1 \right| \right)
\end{eqnarray}
$$
(3): Z
$$
\mathrm{det} \left( Z - \lambda I \right) =
\begin{vmatrix}
1-\lambda & 0 \\
0 & -1-\lambda
\end{vmatrix}
= -\left( \lambda^2 -1 \right) = 0\;\;\;\;\therefore \lambda = \pm 1.
$$
For $\lambda = -1$,
$$
\begin{pmatrix}
2 & 0 \\
0 & 0
\end{pmatrix}
\begin{pmatrix}
a_1 \\ b_1
\end{pmatrix}
=
\begin{pmatrix}
0 \\ 0
\end{pmatrix}.
$$
This yields an eigenvector
$$
\left| \lambda_{-1} \right\rangle =
\begin{pmatrix}
a_1 \\ b_1
\end{pmatrix}
=
\begin{pmatrix}
0 \\ 1
\end{pmatrix}
= \left| 1 \right\rangle.
$$
For $\lambda = 1$,
$$
\begin{pmatrix}
0 & 0 \\
0 & -2
\end{pmatrix}
\begin{pmatrix}
a_2 \\ b_2
\end{pmatrix}
=
\begin{pmatrix}
0 \\ 0
\end{pmatrix}.
$$
This yields an eigenvector
$$
\left| \lambda_1 \right\rangle =
\begin{pmatrix}
a_2 \\ b_2
\end{pmatrix}
=
\begin{pmatrix}
1 \\ 0
\end{pmatrix}
= \left| 0 \right\rangle.
$$
Therefore,
$$
\begin{eqnarray}
Z &=& \lambda_{-1} \left| \lambda_{-1} \right\rangle \left\langle \lambda_{-1} \right| + \lambda_1 \left| \lambda_1 \right\rangle \left\langle \lambda_1 \right| \\
&=& \left| 0 \right\rangle \left\langle 0 \right| - \left| 1 \right\rangle \left\langle 1 \right|.
\end{eqnarray}
$$
2.12
Put $A = \begin{pmatrix} 1 & 0 \\\ 1& 1\end{pmatrix}$. Then,
$$
\mathrm{det} \left( A- \lambda I \right)=
\begin{pmatrix}
1-\lambda & 0 \\
1 & 1-\lambda
\end{pmatrix}
= \left(1-\lambda \right)^2 = 0\;\;\;\;\therefore \lambda = 1.
$$
Now find the eigenvector $\left| \lambda_1 \right\rangle$.
$$
\begin{pmatrix}
0 & 0 \\
1 & 0
\end{pmatrix}
\begin{pmatrix}
a \\ b
\end{pmatrix}
=
\begin{pmatrix}
0 \\ 0
\end{pmatrix}.
$$
Thus,
$$
\left| \lambda_1 \right\rangle =
\begin{pmatrix}
0 \\ 1
\end{pmatrix}.
$$
Since $A = \begin{pmatrix} 1 & 0 \\ 1& 1\end{pmatrix}$ has only one eigenvector, it’s not diagonalizable.
2.13
$$
\begin{eqnarray}
\left( \left| l \right\rangle,\, \left( \left| w \right\rangle \left\langle v \right| \right)^{\dagger} \left| m \right\rangle \right) &=& \left( \left( \left| w \right\rangle \left\langle v \right| \right) \left| l \right\rangle,\, \left| m \right\rangle \right) \\
&=& \left( \left\langle v | l \right\rangle \left| w \right\rangle,\, \left| m \right\rangle \right) \\
&=& \left( \left\langle v | l \right\rangle \right)^* \left\langle w | m \right\rangle \\
&=& \left\langle l | v \right\rangle \left\langle w | m \right\rangle \\
&=& \left\langle l \right| \left( \left| v \right\rangle \left\langle w \right| \right) \left| m \right\rangle \\
&=& \left( \left| l \right\rangle,\, \left( \left| v \right\rangle \left\langle w \right| \right) \left| m \right\rangle \right)
\end{eqnarray}
$$
2.14
$$
\begin{eqnarray}
\left( \left( \sum_i a_i A_i \right)^{\dagger} \left| v \right\rangle, \left| w \right\rangle \right) &=& \left( \left| v \right\rangle, \sum_i a_i A_i \left| w \right\rangle \right) \\
&=& \sum_i a_i \left( A^{\dagger}_i \left| v \right\rangle, \left| w \right\rangle \right) \\
&=& \left( \sum_i a^*_i A^{\dagger}_i \left| v \right\rangle, \left| w \right\rangle \right)
\end{eqnarray}
$$
2.15
$$
\begin{eqnarray}
\left( \left( A^{\dagger} \right)^{\dagger} \left| v \right\rangle, \left| w \right\rangle \right) &=& \left( \left| v \right\rangle, A^{\dagger} \left| w \right\rangle \right) \\
&=& \left( A^{\dagger} \left| w \right\rangle, \left| v \right\rangle \right)^* \\
&=& \left( \left| w \right\rangle, A \left| v \right\rangle \right)^* \\
&=& \left( A\left| v \right\rangle, \left| w \right\rangle \right)
\end{eqnarray}
$$
2.16
$$
\begin{eqnarray}
P^2 &=& \left( \sum^k_{i=1} \left| i \right\rangle \left\langle i \right| \right) \left( \sum^k_{j=1} \left| j \right\rangle \left\langle j \right| \right) \\
&=& \sum^k_{i=1} \sum^k_{j=1} \left| i \right\rangle \left\langle i | j \right\rangle \left\langle j \right| \\
&=& \sum^k_{i=1} \sum^k_{j=1} \left| i \right\rangle \left\langle j \right| \delta_{ij} \\
&=& \sum^k_{i=1} \left| i \right\rangle \left\langle i \right| \\
&=& P
\end{eqnarray}
$$
2.17
A normal matrix A is diagonalized by an unitary matrix U, such that
$$
A = U^{\dagger} D U,
$$
where $D$ is a diagonal matrix.
Hermitian conjugate of A is,
$$
A^{\dagger} = U^{\dagger}D^{\dagger}U.
$$
If all eigenvalues are real, $D=D^{\dagger}$. Then,
$$
A^{\dagger} = U^{\dagger}D^{\dagger}U = U^{\dagger}DU = A.
$$
2.18
$$
\begin{eqnarray}
U \left| v \right\rangle &=& \lambda \left| v \right\rangle \;\;\;\; &(1) \\
\left\langle v \right| U^{\dagger} &=& \lambda^* \left\langle v \right| \;\;\;\; &(2)
\end{eqnarray}
$$
$(2) \times (1)$,
$$
\left\langle v \middle| U^{\dagger} U \middle| v \right\rangle = \left\langle v \middle| v \right\rangle = \lambda^* \lambda \left\langle v \middle| v \right\rangle\;\;\;\; \therefore \left| \lambda \right|^2 = 1 \rightarrow \lambda = \mathrm{e}^{i\theta}
$$
2.19
$$
\begin{eqnarray}
\sigma^{\dagger}_0 &=&
\begin{pmatrix}
1 & 0 \\ 0 & 1
\end{pmatrix}
= \sigma_0 \\
\sigma^{\dagger}_0 \sigma_0 &=&
\begin{pmatrix}
1 & 0 \\ 0 & 1
\end{pmatrix}
\begin{pmatrix}
1 & 0 \\ 0 & 1
\end{pmatrix}
=
\begin{pmatrix}
1 & 0 \\ 0 & 1
\end{pmatrix}
= I
\end{eqnarray}
$$
$$
\begin{eqnarray}
\sigma_1^{\dagger} &=&
\begin{pmatrix}
0 & 1 \\ 1 & 0
\end{pmatrix}
= \sigma_1 \\
\sigma^{\dagger}_1 \sigma_1 &=&
\begin{pmatrix}
0 & 1 \\ 1 & 0
\end{pmatrix}
\begin{pmatrix}
0 & 1 \\ 1 & 0
\end{pmatrix}
=
\begin{pmatrix}
1 & 0 \\ 0 & 1
\end{pmatrix}
= I
\end{eqnarray}
$$
$$
\begin{eqnarray}
\sigma^{\dagger}_2 &=&
\begin{pmatrix}
0 & -i \\ i & 0
\end{pmatrix}
= \sigma_2 \\
\sigma^{\dagger}_2 \sigma_2 &=&
\begin{pmatrix}
0 & -i \\ i & 0
\end{pmatrix}
\begin{pmatrix}
0 & -i \\ i & 0
\end{pmatrix}
=
\begin{pmatrix}
1 & 0 \\ 0 & 1
\end{pmatrix}
= I
\end{eqnarray}
$$
$$
\begin{eqnarray}
\sigma^{\dagger}_3 &=&
\begin{pmatrix}
1 & 0 \\ 0 & -1
\end{pmatrix}
= \sigma_3 \\
\sigma^{\dagger}_3 \sigma_3 &=&
\begin{pmatrix}
1 & 0 \\ 0 & -1
\end{pmatrix}
\begin{pmatrix}
1 & 0 \\ 0 & -1
\end{pmatrix}
=
\begin{pmatrix}
1 & 0 \\ 0 & 1
\end{pmatrix}
= I
\end{eqnarray}
$$
2.20
$$
\begin{eqnarray}
A^{'}_{ij} &=& \left\langle v_i \middle| A \middle| v_j \right\rangle \\
&=& \sum_k \left\langle v_i \middle| w_k \right\rangle \left\langle w_k \middle| A \middle| v_j \right\rangle \\
&=& \sum_{k,l} \left\langle v_i \middle| w_k \right\rangle \left\langle w_k \middle| A \middle| w_l \right\rangle \left\langle w_l \middle| v_j \right\rangle \\
&=& \sum_{k,l} \left\langle v_i \middle| U \middle| v_k \right\rangle \left\langle w_k \middle| A \middle| w_l \right\rangle \left\langle v_l \middle| U^{\dagger} \middle| v_j \right\rangle \\
&=& \sum_{k,l} U_{ik} A^{''}_{kl} U^{\dagger}_{lj}
\end{eqnarray}
$$
where $U \equiv \sum_m \left| w_m \right\rangle \left\langle v_m \right|$.
2.22
$$
\begin{eqnarray}
H \left| \lambda_i \right\rangle &=& \lambda_i \left| \lambda_i \right\rangle \;\;\;\; &(1)\\
H \left| \lambda_j \right\rangle &=& \lambda_j \left| \lambda_j \right\rangle \;\;\;\; &(2)\\
\end{eqnarray}
$$
$\left\langle \lambda_j \right| \times$ (1) and $\left\langle \lambda_i \right| \times$ (1)
$$
\begin{eqnarray}
\left\langle \lambda_j \middle| H \middle| \lambda_i \right\rangle &=& \lambda_i \left\langle \lambda_j \middle| \lambda_i \right\rangle \;\;\;\; &(3) \\
\left\langle \lambda_i \middle| H \middle| \lambda_j \right\rangle &=& \lambda_j \left\langle \lambda_i \middle| \lambda_j \right\rangle \;\;\;\; &(4) \\
\left\langle \lambda_j \middle| H^{*} \middle| \lambda_i \right\rangle &=& \left\langle \lambda_j \middle| H \middle| \lambda_i \right\rangle = \lambda_j^{*} \left\langle \lambda_j \middle| \lambda_i \right\rangle = \lambda_j \left\langle \lambda_j \middle| \lambda_i \right\rangle \;\;\;\; &(4)^{'}
\end{eqnarray}
$$
$(3) - (4)^{'}$
$$
\begin{eqnarray}
0 = \left( \lambda_i - \lambda_j \right) \left\langle \lambda_j \middle| \lambda_i \right\rangle
\end{eqnarray}
$$
Therefore, $\lambda_i \neq \lambda_j \rightarrow \left\langle \lambda_i \middle| \lambda_j \right\rangle = 0$.
2.23
$$
\begin{eqnarray}
P &\equiv& \sum_i \left| i \right\rangle \left\langle i \right| \\
P^2 &=& \sum_{i,j} \left| i \right\rangle \left\langle i \middle| j \right\rangle \left\langle j \right| \\
&=& \sum_i \left| i \right\rangle \left\langle i \right| = P.
\end{eqnarray}
$$
$$
\begin{eqnarray}
P \left| \lambda \right\rangle &=& \lambda \left| \lambda \right\rangle \\
P^2 \left| \lambda \right\rangle &=& \lambda P \left| \lambda \right\rangle = \lambda^2 \left| \lambda \right\rangle
\end{eqnarray}
$$
Since $P = P^2$,
$$
\lambda = \lambda^2
$$
$$
\Leftrightarrow \lambda \left( \lambda - 1 \right) = 0 \;\;\; \therefore \lambda = 0,1.
$$
2.24
Let $A$ be a positive operator. $A$ can be written as,
$$
\begin{eqnarray}
A &=& \frac{A+A^{\dagger}}{2}+i\frac{A-A^{\dagger}}{2i} \\
&=& B + iC, \;\text{where}\; B=\frac{A+A^{\dagger}}{2},\;C=\frac{A-A^{\dagger}}{2i}.
\end{eqnarray}
$$
Then,
$$
\begin{eqnarray}
\left\langle v \middle| A \middle| v\right\rangle &=& \left\langle v \middle| B+iC \middle| v \right\rangle \\
&=& \left\langle v \middle| B \middle| v \right\rangle + i \left\langle v \middle| C \middle| v \right\rangle
\end{eqnarray}
$$
Since $A$ is a positive operator, $\left\langle v \middle| C \middle| v \right\rangle = 0$. So, $C=0$. Therefore, A is Hermitian.
2.25
$$
\left\langle v \middle| \left( A^{\dagger}A \right) \middle| v \right\rangle = \left\langle v \middle| A^{\dagger}A \middle| v \right\rangle = \left| A\left| v \right\rangle \right|^2 \ge 0.
$$
2.26
$$
\begin{eqnarray}
\left| \psi \right\rangle^{\otimes 2} &=& \left( \frac{1}{\sqrt{2}} \right)^2 \left( \left| 0 \right\rangle + \left| 1 \right\rangle \right) \otimes \left( \left| 0 \right\rangle + \left| 1 \right\rangle \right) \\
&=& \frac{1}{2} \left( \left| 0 \right\rangle \otimes \left| 0 \right\rangle + \left| 0 \right\rangle \otimes \left| 1 \right\rangle + \left| 1 \right\rangle \otimes \left| 0 \right\rangle + \left| 1 \right\rangle \otimes \left| 1 \right\rangle \right) \\
&=& \frac{1}{2} \left( \left| 00 \right\rangle + \left| 01 \right\rangle + \left| 10 \right\rangle + \left| 11 \right\rangle \right) \\
\left| \psi \right\rangle^{\otimes 3} &=& \left( \frac{1}{\sqrt{2}} \right)^{3} \left( \left| 0 \right\rangle + \left| 1 \right\rangle \right) \otimes \left( \left| 0 \right\rangle + \left| 1 \right\rangle \right) \otimes \left( \left| 0 \right\rangle + \left| 1 \right\rangle \right) \\
&=& \frac{1}{2\sqrt{2}} \left( \left|000\right\rangle + \left|001\right\rangle + \left|010\right\rangle + \left|011\right\rangle + \left|100\right\rangle + \left|101\right\rangle + \left|110\right\rangle + \left|110\right\rangle + \left|111\right\rangle \right)
\end{eqnarray}
$$
Kronecker Product:
$$
\begin{eqnarray}
\left| \psi \right\rangle &=& \frac{1}{\sqrt{2}}
\begin{pmatrix}
1 \\ 1
\end{pmatrix} \\
\left| \psi \right\rangle^{\otimes 2} &=& \frac{1}{2}
\begin{pmatrix}
1 \\ 1
\end{pmatrix}
\otimes
\begin{pmatrix}
1 \\ 1
\end{pmatrix}
= \frac{1}{2}
\begin{pmatrix}
1 \\ 1 \\ 1 \\ 1
\end{pmatrix} \\
\left| \psi \right\rangle^{\otimes 3} &=& \frac{1}{2\sqrt{2}}
\begin{pmatrix}
1 \\ 1
\end{pmatrix}
\otimes
\begin{pmatrix}
1 \\ 1
\end{pmatrix}
\otimes
\begin{pmatrix}
1 \\ 1
\end{pmatrix}
= \frac{1}{2\sqrt{2}}
\begin{pmatrix}
1 \\ 1 \\ 1 \\ 1 \\ 1 \\ 1 \\ 1 \\ 1
\end{pmatrix}
\end{eqnarray}
$$
2.27
(a)
$$
\begin{eqnarray}
X \otimes Z &=&
\begin{pmatrix}
0 & 1 \\ 1 & 0
\end{pmatrix}
\otimes
\begin{pmatrix}
1 & 0 \\ 0 & -1
\end{pmatrix}
=
\begin{pmatrix}
0 & 0 & 1 & 0 \\
0 & 0 & 0 & -1 \\
1 & 0 & 0 & 0 \\
0 & -1 & 0 & 0
\end{pmatrix}
\end{eqnarray}
$$
(b)
$$
I \otimes X =
\begin{pmatrix}
1 & 0 \\ 0 & 1
\end{pmatrix}
\otimes
\begin{pmatrix}
0 & 1 \\ 1 & 0
\end{pmatrix}
=
\begin{pmatrix}
0 & 1 & 0 & 0 \\
1 & 0 & 0 & 0 \\
0 & 0 & 0 & 1 \\
0 & 0 & 1 & 0
\end{pmatrix}
$$
(c)
$$
X \otimes I =
\begin{pmatrix}
0 & 1 \\ 1 & 0
\end{pmatrix}
\otimes
\begin{pmatrix}
1 & 0 \\ 0 & 1
\end{pmatrix}
=
\begin{pmatrix}
0 & 0 & 1 & 0 \\
0 & 0 & 0 & 1 \\
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0
\end{pmatrix}
$$
Since $I\otimes X \ne X\otimes I$, tensor product is not commutative.
2.28
$$
\begin{eqnarray}
\left(A\otimes B\right)^* &=&
\begin{pmatrix}
a_{11}^*B^* & \cdots & a_{1N}^*B^* \\
\vdots & \ddots & \vdots \\
a_{N1}^*B^* & \cdots & a_{NN}^*B^*
\end{pmatrix}
= A^* \otimes B^* \\
\left(A \otimes B\right)^T &=&
\begin{pmatrix}
a_{11}B^T & \cdots & a_{N1}B^T \\
\vdots & \ddots & \vdots \\
a_{1N}B^T & \cdots & a_{NN}B^T
\end{pmatrix}
= A^T \otimes B^T \\
\left( A \otimes B \right) ^{\dagger} &=& \left( \left( A \otimes B \right) ^* \right) ^T = \left( A^* \right)^T \otimes \left( B^* \right)^T = A^{\dagger} \otimes B^{\dagger}
\end{eqnarray}
$$
2.29
Let $U_1$ and $U_2$ be unitary operators.
$$
\left( U_1 \otimes U_2 \right) \left( U_1 \otimes U_2 \right)^{\dagger} = \left( U_1 \otimes U_2 \right) \left( U_1^{\dagger} \otimes U_2^{\dagger} \right) = U_1 U_1^{\dagger} \otimes U_2 U_2^{\dagger} = I \otimes I
$$
2.30
Let $H_1$ and $H_2$ be Hermitian operators.
$$
\left( H_1 \otimes H_2 \right)^{\dagger} = H_1^{\dagger} \otimes H_2^{\dagger} = H_1 \otimes H_2
$$
2.31
Let $A_1$ and $A_2$ be positive operators. Then,
$$
\begin{eqnarray}
\left( \left| v \right\rangle \otimes \left| w \right\rangle, A_1 \otimes A_2 \left| v \right\rangle \otimes \left| w \right\rangle \right) &=& \left\langle w \right| \otimes \left\langle v \right|A_1 \otimes A_2 \left| v \right\rangle \otimes \left| w \right\rangle = \left\langle v \middle| A_1 \middle| v \right\rangle \left\langle w \middle| A_2 \middle| w \right\rangle \ge 0.
\end{eqnarray}
$$
2.32
Let $P_1$ and $P_2$ be projectors. Then,
$$
\left( P_1 \otimes P_2 \right)^2 = \left( P_1 \otimes P_2 \right) \left( P_1 \otimes P_2 \right) = P_1^2 \otimes P_2^2 = P_1 \otimes P_2
$$
2.33
$$
\begin{eqnarray}
H &=& \frac{1}{\sqrt{2}}\left[ \left( \left| 0 \right\rangle + \left| 1 \right\rangle \right) \left\langle 0 \right| + \left( \left| 0 \right\rangle - \left| 1 \right\rangle \right) \left\langle 1 \right| \right] \\
&=& \frac{1}{\sqrt{2}} \left[ \left| 0 \right\rangle \left\langle 0 \right| + \left| 1 \right\rangle \left\langle 0 \right| + \left| 0 \right\rangle \left\langle 1 \right| - \left| 1 \right\rangle \left\langle 1 \right| \right] \\
&=& \frac{1}{\sqrt{2}} \sum_{x,y} \left( -1 \right)^{x\cdot y} \left| x \right\rangle \left\langle y \right|
\end{eqnarray}
$$
Then,
$$
\begin{eqnarray}
H^{\otimes n} &=& \frac{1}{\sqrt{2^n}} \sum_{x1,y1} \left( -1 \right)^{x_1 \cdot y_1} \left| x_1 \right\rangle \left\langle y_1 \right| \otimes \sum_{x_2,y_2} \left( -1 \right)^{x_2 \cdot y_2} \left| x_2 \right\rangle \left\langle y_2 \right| \otimes \cdots \\
&=& \frac{1}{\sqrt{2^n}} \sum_{\boldsymbol{x},\boldsymbol{y}} \left( -1 \right)^{\boldsymbol{x} \cdot \boldsymbol{y}} \left| \boldsymbol{x} \right\rangle \left\langle \boldsymbol{y} \right|.
\end{eqnarray}
$$
$$
\begin{eqnarray}
H^{\otimes 2} &=& \frac{1}{2} \left[ \left| 00 \right\rangle \left\langle 00 \right| + \left| 01 \right\rangle \left\langle 00 \right| + \left| 00 \right\rangle \left\langle 01 \right| - \left| 01 \right\rangle \left\langle 01 \right| \\
+ \left| 10 \right\rangle \left\langle 00 \right| + \left| 11 \right\rangle \left\langle 00 \right| + \left| 10 \right\rangle \left\langle 01 \right| - \left| 11 \right\rangle \left\langle 01 \right| \\
+ \left| 00 \right\rangle \left\langle 10 \right| + \left| 01 \right\rangle \left\langle 10 \right| + \left| 00 \right\rangle \left\langle 11 \right| - \left| 01 \right\rangle \left\langle 11 \right| \\
-\left| 10 \right\rangle \left\langle 10 \right| - \left| 11 \right\rangle \left\langle 10 \right| - \left| 10 \right\rangle \left\langle 11 \right| + \left| 11 \right\rangle \left\langle 11 \right| \right] \\
&=&
\begin{pmatrix}
1 & 1 & 1 & 1 \\
1 & -1 & 1 & -1 \\
1 & 1 & -1 & -1 \\
1 & -1 & -1 & 1
\end{pmatrix}
\end{eqnarray}
$$
2.34
Let
$$
A =
\begin{pmatrix}
4 & 3 \\ 3 & 4
\end{pmatrix}.
$$
Then,
$$
{\rm det}\left( A - \lambda I \right) =
\begin{vmatrix}
4-\lambda & 3 \\ 3 & 4-\lambda
\end{vmatrix}
= \left( 4-\lambda \right)^2 - 9 = 0
$$
$$
\therefore \lambda = 1,\;7
$$
For $\lambda = 1$,
$$
\begin{pmatrix}
3 & 3 \\ 3 & 3
\end{pmatrix}
\begin{pmatrix}
a_1 \\ b_1
\end{pmatrix}
=
\begin{pmatrix}
0 \\ 0
\end{pmatrix}.
$$
This yields an eigenvector
$$
\left| \lambda_1 \right\rangle =
\begin{pmatrix}
a_1 \\ b_1
\end{pmatrix}
= \frac{1}{\sqrt{2}}
\begin{pmatrix}
1 \\ -1
\end{pmatrix}.
$$
For $\lambda = 7$,
$$
\begin{pmatrix}
-3 & 3 \\ 3 & -3
\end{pmatrix}
\begin{pmatrix}
a_2 \\ b_2
\end{pmatrix}
=
\begin{pmatrix}
0 \\ 0
\end{pmatrix}.
$$
This yields an eigenvector
$$
\left| \lambda_7 \right\rangle =
\begin{pmatrix}
a_2 \\ b_2
\end{pmatrix}
= \frac{1}{\sqrt{2}}
\begin{pmatrix}
1 \\ 1
\end{pmatrix}.
$$
Then $A$ is written as
$$
A = \lambda_1 \left| \lambda_1 \right\rangle \left\langle \lambda_1 \right| + \lambda_7 \left| \lambda_7 \right\rangle \left\langle \lambda_7 \right| = \left| \lambda_1 \right\rangle \left\langle \lambda_1 \right| + 7 \left| \lambda_7 \right\rangle \left\langle \lambda_7 \right|.
$$
Thus,
$$
\begin{eqnarray}
\sqrt{A} &=& \left| \lambda_1 \right\rangle \left\langle \lambda_1 \right| + \sqrt{7} \left| \lambda_7 \right\rangle \left\langle \lambda_7 \right| \\ &=& \frac{1}{2}
\begin{pmatrix}
1 \\ -1
\end{pmatrix}
\begin{pmatrix}
1 & -1
\end{pmatrix}
+ \frac{\sqrt{7}}{2}
\begin{pmatrix}
1 \\ 1
\end{pmatrix}
\begin{pmatrix}
1 & 1
\end{pmatrix} \\
&=& \frac{1}{2}
\begin{pmatrix}
1+ \sqrt{7} & -1 + \sqrt{7} \\
-1 + \sqrt{7} & 1 + \sqrt{7}
\end{pmatrix},\\
\ln A &=& \ln 1 \left| \lambda_1 \right\rangle \left\langle \lambda_7 \right| + \ln 7 \left| \lambda_7 \right\rangle \left\langle \lambda_7 \right| \\ &=& \frac{\ln 7}{2}
\begin{pmatrix}
1 \\ 1
\end{pmatrix}
\begin{pmatrix}
1 & 1
\end{pmatrix} \\
&=& \frac{\ln 7}{2}
\begin{pmatrix}
1 & 1 \\ 1 & 1
\end{pmatrix}.
\end{eqnarray}
$$
2.35
Let $\vec{v} = \left(v_1, v_2, v_3\right)$. Then,
$$
\begin{eqnarray}
\vec{v}\cdot \vec{\sigma} &=& v_1
\begin{pmatrix}
0 & 1 \\
1 & 0
\end{pmatrix}
+ v_2
\begin{pmatrix}
0 & -i \\
i & 0
\end{pmatrix}
+ v_3
\begin{pmatrix}
1 & 0 \\
0 & -1
\end{pmatrix} \\
&=&
\begin{pmatrix}
v_3 & v_1-iv_2 \\
v_1 + iv_2 & -v_3
\end{pmatrix}.
\end{eqnarray}
$$
Find eigenvalues by solving characteristic equation.
$$
\begin{eqnarray}
\det \left( \vec{v}\cdot \vec{\sigma} - \lambda I \right) &=&
\begin{vmatrix}
v_3-\lambda & v_1-iv_2 \\
v_1+iv_2 & -v_3-\lambda
\end{vmatrix} \\
&=& -\left( v_3-\lambda \right)\left( v_3+\lambda \right) - \left( v_1-iv_2 \right)\left( v_1+iv_2 \right) \\
&=& \lambda^2 -\left( v_1^2 + v_2^2 + v_3^2 \right) \\
&=& \lambda^2 -1 \;\;\;\;\;\; \left( \because \vec{v} \text{ is a unit vector} \right) \\
&=& 0 \;\;\;\;\;\; \therefore \lambda=\pm 1
\end{eqnarray}
$$
So, $\vec{v}\cdot\vec{\sigma}$ has a spectral decomposition
$$
\vec{v}\cdot\vec{\sigma} = \left| \lambda_1 \right\rangle \left\langle \lambda_1 \right| - \left| \lambda_{-1} \right\rangle \left\langle \lambda_{-1} \right| .
$$
Therefore,
$$
\begin{eqnarray}
\exp \left( i\theta \vec{v} \cdot \vec{\sigma} \right) &=& \exp \left( i\theta \right) \left| \lambda_1 \right\rangle \left\langle \lambda_1 \right| + \exp \left( -i\theta \right) \left| \lambda_{-1} \right\rangle \left\langle \lambda_{-1} \right| \\
&=& \left( \cos\theta + i\sin\theta \right) \left| \lambda_1 \right\rangle \left\langle \lambda_1 \right| + \left( \cos\theta - i\sin\theta \right) \left| \lambda_{-1} \right\rangle \left\langle \lambda_{-1} \right| \\
&=& \cos\theta \left( \left| \lambda_1 \right\rangle \left\langle \lambda_1 \right| + \left| \lambda_{-1} \right\rangle \left\langle \lambda_{-1} \right| \right) + i\sin\theta \left( \left| \lambda_1 \right\rangle \left\langle \lambda_1 \right| - \left| \lambda_{-1} \right\rangle \left\langle \lambda_{-1} \right| \right) \\
&=& \cos\theta + i\sin\left( \theta \right) \vec{v}\cdot\vec{\sigma}
\end{eqnarray}
$$
2.36
$$
\begin{eqnarray}
\mathrm{tr} \left( X \right) &=& \mathrm{tr}
\begin{pmatrix}
0 & 1 \\
1 & 0
\end{pmatrix}
= 0 \\
\mathrm{tr} \left( Y \right) &=& \mathrm{tr}
\begin{pmatrix}
0 & -i \\
i & 0
\end{pmatrix}
= 0 \\
\mathrm{tr} \left( Z \right) &=& \mathrm{tr}
\begin{pmatrix}
1 & 0 \\
0 & -1
\end{pmatrix}
= 0
\end{eqnarray}
$$
2.37
Suppose $\dim \left( A \right) = \dim \left( B \right) = n$. Then,
$$
\begin{eqnarray}
\mathrm{tr} \left( AB \right) = \sum_i^n \left( \sum_j^n A_{ij}B_{ji} \right) = \sum_j^n \left( \sum_i^n B_{ji}A_{ij} \right) = \mathrm{tr} \left( BA \right)
\end{eqnarray}
$$
2.38
Suppose $\dim \left( A \right) = \dim \left( B \right) = n$. Then,
$$
\begin{eqnarray}
\mathrm{tr} \left( A + B \right) &=& \sum_i^n \left( A + B \right)_{ii} = \sum_i^n \left( A_{ii} + B_{ii} \right) = \mathrm{tr} \left( A \right) + \mathrm{tr} \left( B \right) \\
\mathrm{tr} \left( zA \right) &=& \sum_i^n zA_{ii} = z\sum_i^n A_{ii} = z\,\mathrm{tr} \left( A \right)
\end{eqnarray}
$$
2.39
(1)
We show that this operator satisfies (1), (2) and (3) in “2.1.4 Inner products” (p.65).
$$
\begin{align}
\left( A, \sum_i \lambda_i B_i \right) &= \mathrm{tr} \left( A^{\dagger}\sum_i \lambda_i B_i \right) \\
&= \mathrm{tr} \left( \sum_i \lambda_i A^{\dagger}B_i \right) \\
&= \sum_i \lambda_i \mathrm{tr} \left( A^{\dagger}B_i \right) \\
&= \sum_i \lambda_i \left( A, B_i \right).
\end{align}
$$
$$
\begin{align}
\left( A, B \right)^* &= \left( \mathrm{tr} \left( A^{\dagger}B \right) \right)^* \\
&= \mathrm{tr} \left( \left( A^{\dagger}B \right)^{\dagger} \right) \\
&= \mathrm{tr} \left( B^{\dagger}A \right) \\
&= \left( B, A \right).
\end{align}
$$
$$
\begin{align}
\left( A,A \right) &= \mathrm{tr} \left( A^{\dagger}A \right) \\
&= \sum_i \left| A_{ii} \right|^2 \ge 0.
\end{align}
$$
So, the function $(\cdot,\cdot)$ on $L_V\times L_V$ defined by $(A,B)\equiv \mathrm{tr}(A^{\dagger}B)$ is an inner product function.
(2)
A linear transformation $T:;V\rightarrow V;(\mathrm{dim}V=d)$ can be represented as a $d\times d$ matrix. Since there are $d\times d=d^2$ matrices that are linearly independent, the dimension of $L_V$ is $d^2$.
2.40
$$
\begin{align}
\left[ X,Y \right] &=
\begin{pmatrix}
0 & 1 \\
1 & 0
\end{pmatrix}
\begin{pmatrix}
0 & -i \\
i & 0
\end{pmatrix}
-
\begin{pmatrix}
0 & -i \\
i & 0
\end{pmatrix}
\begin{pmatrix}
0 & 1 \\
1 & 0
\end{pmatrix}
=
\begin{pmatrix}
2i & 0 \\
0 & -2i
\end{pmatrix}
= 2iZ \\
\left[ Y,Z \right] &=
\begin{pmatrix}
0 & -i \\
i & 0
\end{pmatrix}
\begin{pmatrix}
1 & 0 \\
0 & -1
\end{pmatrix}
-
\begin{pmatrix}
1 & 0 \\
0 & -1
\end{pmatrix}
\begin{pmatrix}
0 & -i \\
i & 0
\end{pmatrix}
=
\begin{pmatrix}
0 & 2i \\
2i & 0
\end{pmatrix}
= 2iX \\
\left[ Z,X \right] &=
\begin{pmatrix}
1 & 0 \\
0 & -1
\end{pmatrix}
\begin{pmatrix}
0 & 1 \\
1 & 0
\end{pmatrix}
-
\begin{pmatrix}
0 & 1 \\
1 & 0
\end{pmatrix}
\begin{pmatrix}
1 & 0 \\
0 & -1
\end{pmatrix}
=
\begin{pmatrix}
0 & 2 \\
-2 & 0
\end{pmatrix}
= 2iY
\end{align}
$$
2.41
$$
\begin{align}
\left\{ \sigma_1,\sigma_2 \right\} &=
\begin{pmatrix}
0 & 1 \\
1 & 0
\end{pmatrix}
\begin{pmatrix}
0 & -i \\
i & 0
\end{pmatrix}
+
\begin{pmatrix}
0 & -i \\
i & 0
\end{pmatrix}
\begin{pmatrix}
0 & 1 \\
1 & 0
\end{pmatrix}
= 0 \\
\left\{ \sigma_1,\sigma_3 \right\} &=
\begin{pmatrix}
0 & 1 \\
1 & 0
\end{pmatrix}
\begin{pmatrix}
1 & 0 \\
0 & -1
\end{pmatrix}
+
\begin{pmatrix}
1 & 0 \\
0 & -1
\end{pmatrix}
\begin{pmatrix}
0 & 1 \\
1 & 0
\end{pmatrix}
=0 \\
\left\{ \sigma_2,\sigma_1 \right\} &= \left\{ \sigma_1,\sigma_2 \right\} = 0 \\
\left\{ \sigma_2,\sigma_3 \right\} &=
\begin{pmatrix}
0 & -i \\
i & 0
\end{pmatrix}
\begin{pmatrix}
1 & 0 \\
0 & -1
\end{pmatrix}
+
\begin{pmatrix}
1 & 0 \\
0 & -1
\end{pmatrix}
\begin{pmatrix}
0 & -i \\
i & 0
\end{pmatrix}
= 0 \\
\left\{ \sigma_3,\sigma_1 \right\} &= \left\{ \sigma_1,\sigma_3 \right\} = 0 \\
\left\{ \sigma_3,\sigma_2 \right\} &= \left\{ \sigma_2,\sigma_3 \right\} = 0 \\
\sigma_1^2 &=
\begin{pmatrix}
0 & 1 \\
1 & 0
\end{pmatrix}
\begin{pmatrix}
0 & 1 \\
1 & 0
\end{pmatrix}
=
\begin{pmatrix}
1 & 0 \\
0 & 1
\end{pmatrix}
= \mathbb{I} \\
\sigma_2^2 &=
\begin{pmatrix}
0 & -i \\
i & 0
\end{pmatrix}
\begin{pmatrix}
0 & -i \\
i & 0
\end{pmatrix}
=
\begin{pmatrix}
1 & 0 \\
0 & 1
\end{pmatrix}
= \mathbb{I} \\
\sigma_3^2 &=
\begin{pmatrix}
1 & 0 \\
0 & -1
\end{pmatrix}
\begin{pmatrix}
1 & 0 \\
0 & -1
\end{pmatrix}
=
\begin{pmatrix}
1 & 0 \\
0 & 1
\end{pmatrix}
= \mathbb{I}
\end{align}
$$
2.42
$$
\begin{equation}
\frac{\left[ A,B \right]+\left\{ A,B \right\}}{2} = \frac{AB-BA+AB+BA}{2} = AB
\end{equation}
$$
2.43
$$
\begin{align}
\sigma_j\sigma_k &= \frac{\left[ \sigma_j,\sigma_k \right] + \left\{ \sigma_j,\sigma_k \right\}}{2}\;\;\;\left( \because \mathrm{Excercise}\; 2.42 \right) \\
&= \frac{2i\sum_{l=1}^3\epsilon_{jkl}\sigma_l + 2\delta_{j,k}\mathbb{I}}{2} \\
&= \delta_{j,k}\mathbb{I} + i\sum_{l=1}^3\epsilon_{jkl}\;\;\;\left( \because \mathrm{Excercise}\;2.40,\;2.41 \right)
\end{align}
$$
2.44
$$
\begin{align}
\left[ A,B \right] &= AB-BA = 0\;\;\; \therefore AB=BA \\
\left\{ A,B \right\} &= AB+BA = 2AB=0\;\;\; \left( \because AB=BA \right)
\end{align}
$$
Since $A$ is invertible, there exists $A^{-1}$ such that $AA^{-1}=A^{-1}A=\mathbb{I}$. Then,
$$
\begin{equation}
2A^{-1}AB = A^{-1}0 = 0\;\longrightarrow\;B=0
\end{equation}
$$
2.45
$$
\begin{equation}
\left[ A,B \right]^{\dagger} = \left( AB-BA \right)^{\dagger} = \left( B^{\dagger}A^{\dagger} - A^{\dagger}B^{\dagger} \right) = \left[ B^{\dagger},A^{\dagger} \right]
\end{equation}
$$
2.46
$$
\begin{equation}
\left[ A,B \right] = AB-BA = -\left( BA-AB \right) = -\left[ B,A \right]
\end{equation}
$$
2.47
$$
\begin{align}
\left\{ i\left[ A,B \right] \right\}^{\dagger} &= -i\left[ B^{\dagger},A^{\dagger} \right]\;\;\; \left( \because \mathrm{Excercise}\;2.45 \right) \\
&= -i\left[ B,A \right]\;\;\; \left( \because A,B\;\mathrm{are\;Hermitian} \right) \\
&= i\left[ A,B \right]\;\;\; \left( \because \;\mathrm{Excercise}\;2.46 \right)
\end{align}
$$
2.48
Polar decomposition (left): $A = UJ$.
(i) Positive matrix $P$
Since $P$ is positive matrix, we obtain the spectral decomposition of $P$,
$$
\begin{equation}
P = \sum_{i} \lambda_i \left| i \right\rangle \left\langle i \right|.
\end{equation}
$$
Then,
$$
\begin{align}
J = \sqrt{P^{\dagger}P} &= \sqrt{\sum_{i,j}\lambda_i \lambda_j \left| i \right\rangle \left\langle i \middle| j \right\rangle \left\langle j \right|}\\
&= \sqrt{\sum_{i,j}\lambda_i\lambda_j \delta_{i,j} \left| i \right\rangle \left\langle j \right|}\\
&= \sqrt{\sum_i \lambda_i^2 \left| i \right\rangle \left\langle i \right|} = \sqrt{P^2} = P.
\end{align}
$$
So, $U = I$ since $P=UP$ for all $P$.
(ii) Unitary matrix $U'$
$$
\begin{equation}
J = \sqrt{U^{\dagger}U} = I
\end{equation}
$$
Then, $U=U'$ since $U’ = UI$ for all $U$.
(iii) Hermitian matrix $H$
The spectral decomposition of $H$ is,
$$
\begin{equation}
H = \sum_i \lambda_i \left| i \right\rangle \left\langle i \right|.
\end{equation}
$$
Then,
$$
\begin{align}
J = \sqrt{H^{\dagger}H} &= \sqrt{H^2}\;\;\; \left( \because H^{\dagger} = H \right)\\
&= \sqrt{\sum_{i,j}\lambda_i \lambda_j \left| i \right\rangle \left\langle i \middle| j \right\rangle \left\langle j \right|}\\
&= \sqrt{\sum_{i,j}\lambda_i \lambda_j \delta_{i,j} \left| i \right\rangle \left\langle j \right|}\\
&= \sqrt{\sum_i \lambda_i^2 \left| i \right\rangle \left\langle i \right|} = \sum_i |\lambda_i| \left| i \right\rangle \left\langle i \right|.
\end{align}
$$
So, $H = U\sum_i |\lambda_i| \left| i \right\rangle \left\langle i \right|$.
2.49
The spectral decomposition of normal matrix $A$ is,
$$
\begin{equation}
A = \sum_i \lambda_i \left| i \right\rangle \left\langle i \right|.
\end{equation}
$$
Then,
$$
\begin{align}
J = \sqrt{A^{\dagger}A} &= \sqrt{\sum_{i,j}\lambda_i^{*} \lambda_j \left| i \right\rangle \left\langle i \middle| j \right\rangle \left\langle j \right|}\\
&= \sqrt{\sum_{i,j}\lambda_i^{*} \lambda_j \delta_{i,j} \left| i \right\rangle \left\langle j \right|}\\
&= \sqrt{\sum_i \left|\lambda_i\right|^2 \left| i \right\rangle \left\langle i \right|} = \sum_i |\lambda_i | \left| i \right\rangle \left\langle i \right|.
\end{align}
$$
So, $A=U\sum_i |\lambda_i | \left| i \right\rangle \left\langle i \right|$.
2.50
$$
\begin{equation}
A^{\dagger}A =
\begin{pmatrix}
1 & 1 \\
0 & 1
\end{pmatrix}
\begin{pmatrix}
1 & 0 \\
1 & 1
\end{pmatrix}
=
\begin{pmatrix}
2 & 1 \\
1 & 1
\end{pmatrix}
\end{equation}
$$
where
$$
\begin{equation}
A \equiv
\begin{pmatrix}
1 & 0 \\
1 & 1
\end{pmatrix}.
\end{equation}
$$
Then,
$$
\begin{align}
\det\left( A^{\dagger}A-\lambda I \right) =
\begin{vmatrix}
2-\lambda & 1 \\
1 & 1-\lambda
\end{vmatrix}
= \lambda^2 - 3\lambda + 1 = 0 \;\;\; \therefore \lambda = \frac{3\pm\sqrt{5}}{2}.
\end{align}
$$
For $\lambda_- = \frac{3-\sqrt{5}}{2}$,
$$
\begin{align}
\begin{pmatrix}
2-\frac{3-\sqrt{5}}{2} & 1 \\
1 & 1-\frac{3-\sqrt{5}}{2}
\end{pmatrix}
\begin{pmatrix}
a \\ b
\end{pmatrix}
=
\begin{pmatrix}
0 \\ 0
\end{pmatrix}
\;\;\; \therefore \left| \lambda_- \right\rangle = \frac{1}{\sqrt{10+2\sqrt{5}}}
\begin{pmatrix}
2 \\ -1-\sqrt{5}
\end{pmatrix}.
\end{align}
$$
For $\lambda_+ = \frac{3+\sqrt{5}}{2}$,
$$
\begin{equation}
\begin{pmatrix}
2-\frac{3+\sqrt{5}}{2} & 1 \\
1 & 1-\frac{3+\sqrt{5}}{2}
\end{pmatrix}
\begin{pmatrix}
a \\ b
\end{pmatrix}
=
\begin{pmatrix}
0 \\ 0
\end{pmatrix}
\;\;\; \therefore \left| \lambda_+ \right\rangle = \frac{1}{\sqrt{10-2\sqrt{5}}}
\begin{pmatrix}
2 \\ -1+\sqrt{5}
\end{pmatrix}.
\end{equation}
$$
Here,
$$
\begin{align}
\left\langle \lambda_- \middle| \lambda_- \right\rangle &= \frac{1}{10+2\sqrt{5}} \left( 4+\left( -1-\sqrt{5} \right)^2 \right) = 1 \\
\left\langle \lambda_+ \middle| \lambda_+ \right\rangle &= \frac{1}{10-2\sqrt{5}} \left( 4+\left( -1+\sqrt{5} \right)^2 \right) = 1 \\
\left\langle \lambda_- \middle| \lambda_+ \right\rangle &= \frac{1}{\sqrt{10+2\sqrt{5}}\sqrt{10-2\sqrt{5}}}\left( 4+\left( -1-\sqrt{5} \right)\left( -1+\sqrt{5} \right) \right) = 0.
\end{align}
$$
Thus,
$$
\begin{align}
J &= \sqrt{A^{\dagger}A} = \sqrt{\lambda_-} \left| \lambda_- \right\rangle \left\langle \lambda_- \right| + \sqrt{\lambda_+} \left| \lambda_+ \right\rangle \left\langle \lambda_+ \right| \\
J^{-1} &= \frac{1}{\sqrt{\lambda_-}} \left| \lambda_- \right\rangle \left\langle \lambda_- \right| + \frac{1}{\sqrt{\lambda_+}} \left| \lambda_+ \right\rangle \left\langle \lambda_+ \right| \\
&= \sqrt{\frac{2}{3-\sqrt{5}}} \frac{1}{10+2\sqrt{5}}
\begin{pmatrix}
4 & -2-2\sqrt{5} \\
-2-2\sqrt{5} & 6+2\sqrt{5}
\end{pmatrix}
+ \sqrt{\frac{2}{3+\sqrt{5}}}\frac{1}{10-2\sqrt{5}}
\begin{pmatrix}
4 & -2+2\sqrt{5} \\
-2+2\sqrt{5} & 6-2\sqrt{5}
\end{pmatrix} \\
&= \sqrt{\frac{2}{3-\sqrt{5}}} \frac{1}{5+\sqrt{5}}
\begin{pmatrix}
2 & -1-\sqrt{5} \\
-1-\sqrt{5} & 3+\sqrt{5}
\end{pmatrix}
+ \sqrt{\frac{2}{3+\sqrt{5}}}\frac{1}{5-\sqrt{5}}
\begin{pmatrix}
2 & -1+\sqrt{5} \\
-1+\sqrt{5} & 3-\sqrt{5}
\end{pmatrix} \\
&= \frac{1}{\sqrt{5}}
\begin{pmatrix}
2 & -1 \\
-1 & 3
\end{pmatrix} \\
U &= AJ^{-1} = \frac{1}{\sqrt{5}}
\begin{pmatrix}
1 & 0 \\
1 & 1
\end{pmatrix}
\begin{pmatrix}
2 & -1 \\
-1 & 3
\end{pmatrix}
= \frac{1}{\sqrt{5}}
\begin{pmatrix}
1 & -1 \\
1 & 2
\end{pmatrix}
\end{align}
$$
2.51
$$
\begin{equation}
HH^{\dagger} = \frac{1}{2}
\begin{pmatrix}
1 & 1 \\
1 & -1
\end{pmatrix}
\begin{pmatrix}
1 & 1 \\
1 & -1
\end{pmatrix}
=
\begin{pmatrix}
1 & 0 \\
0 & 1
\end{pmatrix}.
\end{equation}
$$
2.52
$$
\begin{equation}
H^2 = \frac{1}{2}
\begin{pmatrix}
1 & 1 \\
1 & -1
\end{pmatrix}
\begin{pmatrix}
1 & 1 \\
1 & -1
\end{pmatrix}
=
\begin{pmatrix}
1 & 0 \\
0 & 1
\end{pmatrix}.
\end{equation}
$$
2.53
$$
\begin{align}
\det \left( H - \lambda I \right) &=
\begin{pmatrix}
\frac{1}{\sqrt{2}}-\lambda & \frac{1}{\sqrt{2}} \\
\frac{1}{\sqrt{2}} & -\frac{1}{\sqrt{2}}-\lambda
\end{pmatrix}\\
&= \lambda^2-1 = 0\;\;\; \therefore \lambda = \pm 1.
\end{align}
$$
For $\lambda_{-1} = -1$,
$$
\begin{align}
\frac{1}{\sqrt{2}}
\begin{pmatrix}
1 & 1 \\
1 & -1
\end{pmatrix}
\begin{pmatrix}
a \\ b
\end{pmatrix}
= -
\begin{pmatrix}
a \\b
\end{pmatrix} \;\;\; \therefore \left| \lambda_{-1} \right\rangle = \frac{1}{\sqrt{4+2\sqrt{2}}}
\begin{pmatrix}
1 \\ -1-\sqrt{2}
\end{pmatrix}.
\end{align}
$$
For $\lambda_{+1} = 1$,
$$
\begin{align}
\frac{1}{\sqrt{2}}
\begin{pmatrix}
1 & 1 \\
1 & -1
\end{pmatrix}
\begin{pmatrix}
a \\ b
\end{pmatrix}
=
\begin{pmatrix}
a \\ b
\end{pmatrix} \;\;\; \therefore \left| \lambda_{+1} \right\rangle = \frac{1}{\sqrt{4-2\sqrt{2}}}
\begin{pmatrix}
1 \\ -1+\sqrt{2}
\end{pmatrix}.
\end{align}
$$
2.54
Solution 1:
$A$ and $B$ are simultaneously diagonalizable since $A$ commutes with $B$, i.e. $A = \sum_i a_i \left| i \right\rangle \left\langle i \right|,; B = \sum_j b_j \left| j \right\rangle \left\langle j \right|$. Then,
$$
\begin{align}
\exp \left( A \right)\exp \left( B \right) &= \sum_i \exp\left( a_i \right)\left| i \right\rangle \left\langle i \right| \sum_j \exp\left( b_j \right) \left| j \right\rangle \left\langle j \right| \\
&= \sum_{i,j} \exp\left(a_i + b_j\right) \delta_{i,j} \left| i \right\rangle \left\langle j \right| \\
&= \sum_i \exp\left(a_i + b_i\right) \left| i \right\rangle \left\langle i \right| \\
&= \exp \left( A + B \right).
\end{align}
$$
Solution 2:
Using Baker–Campbell–Hausdorff formula, which is frequently used in quantum mechanics, $\exp(A)\exp(B)$ is written as follows:
$$
\begin{equation}
\exp(A)\exp(B) = \exp(C)
\end{equation}
$$
where $C$ is,
$$
\begin{equation}
C = A + B + \frac{1}{2}\left[ A,B \right] + \frac{1}{12}\left[ A, \left[ A,B \right] \right] - \frac{1}{12} \left[ B, \left[ A,B \right] \right] + \cdots.
\end{equation}
$$
Since $A$ commutes with $B$, $\exp(A)\exp(B) = \exp(A+B)$.
2.55
Solution 1:
From eq.(2.90),
$$
\begin{align}
U\left(t_1,t_2 \right) &= \exp\left[ \frac{-iH\left( t_2-t_1 \right)}{\hbar} \right],\\
U^{\dagger}\left( t_1,t_2 \right) &= \exp\left[ \frac{iH\left( t_2-t_1 \right)}{\hbar} \right].
\end{align}
$$
Then,
$$
\begin{align}
U\left( t_1,t_2 \right)U^{\dagger}\left( t_1,t_2 \right) &= \exp\left[ \frac{-iH\left( t_2-t_1 \right)}{\hbar} \right]\exp\left[ \frac{iH\left( t_2-t_1 \right)}{\hbar} \right] \\
&= \sum_E\exp\left[\frac{-iE\left( t_2-t_1 \right)}{\hbar} \right] \left| E \right\rangle \left\langle E \right| \sum_{E'}\exp\left[ \frac{iE'\left( t_2-t_1 \right)}{\hbar} \right] \left| E' \right\rangle \left\langle E' \right| \;\;\; \left( \because \mathrm{eq.}\;(2.87) \right) \\
&= \sum_{E,E'} \exp\left[ \frac{-iE\left( t_2-t_1 \right)+iE'\left( t_2-t_1 \right)}{\hbar} \right] \delta_{E,E'} \left| E \right\rangle \left\langle E' \right| \\
&= \sum_E \exp\left( 0 \right) \left| E \right\rangle \left\langle E \right| \\
&= \sum_E \left| E \right\rangle \left\langle E \right| = 1.
\end{align}
$$
Solution 2:
$$
\begin{align}
U\left( t_1,t_2 \right)U^{\dagger}\left( t_1,t_2 \right) &= \exp\left[ \frac{-iH\left( t_2-t_1 \right)}{\hbar} \right]\exp\left[ \frac{iH\left( t_2-t_1 \right)}{\hbar} \right] \\
&= \sum_k \frac{ \left[ -iH\left( t_2-t_1 \right)/\hbar \right]^{k}} {k!} \sum_l \frac{ \left[ iH\left( t_2-t_1 \right)/\hbar \right]^{l}} {l!} \\
&= \sum_{N=0}^{\infty}\sum_{k=0}^{N} \frac{\left[ -iH\left( t_2-t_1 \right)/\hbar \right]^{N-k}}{\left( N-k \right)!} \frac{\left[ iH\left( t_2-t_1 \right)/\hbar \right]^k}{k!} \\
&= \sum_{N=0}^{\infty} \left[ \frac{-iH\left( t_2-t_2 \right)}{\hbar} \right]^N \sum_{k=0}^N \frac{\left[ -iH\left( t_2-t_1 \right)/\hbar \right]^{-k}}{\left( N-k \right)!} \frac{\left[ iH\left( t_2-t_1 \right) \right]^k}{k!} \\
&= \sum_{N=0}^{\infty} \left[ \frac{-iH\left( t_2-t_2 \right)}{\hbar} \right]^N \sum_{k=0}^N \left( -1 \right)^{-k} \frac{\left[ iH\left( t_2-t_1 \right)/\hbar \right]^{-k}}{\left( N-k \right)!} \frac{\left[ iH\left( t_2-t_1 \right) \right]^k}{k!} \\
&= \sum_{N=0}^{\infty} \left[ \frac{-iH\left( t_2-t_2 \right)}{\hbar} \right]^N \sum_{k=0}^N \frac{\left( -1 \right)^k}{\left( N-k \right)!\;k!}
\end{align}
$$
When $N>0$, remembering the Binomial theorem, which is,
$$
\begin{equation}
(x+y)^N = \sum_{k=0}^N
\begin{pmatrix}
N \\ k
\end{pmatrix}x^{N-k}y^k
\end{equation}
$$
the latter part will be
$$
\begin{equation}
\sum_{k=0}^N \frac{\left( -1 \right)^k}{\left( N-k \right)!\;k!} = \frac{\left( 1-1 \right)^N}{N!} = 0.
\end{equation}
$$
Thus,
$$
\begin{equation}
U\left( t_1,t_2 \right)U^{\dagger}\left( t_1,t_2 \right) = 1.
\end{equation}
$$
2.56
Since unitary operator $U$ is normal, it has spectral decomposition. Then,
$$
\begin{align}
K &\equiv -i \log(U) \\
&= -i\sum_j\log\left( a_j \right) \left| j \right\rangle \left\langle j \right| \\
&= -i\sum_j\log\left( e^{i\theta_j} \right) \left| j \right\rangle \left\langle j \right| \;\;\; \left( \because \mathrm{Exercise\; 2.18} \right) \\
&= -i\sum_j i\theta_j \left| j \right\rangle \left\langle j \right| \\
&= \sum_j \theta_j \left| j \right\rangle \left\langle j \right| \\
\therefore K^{\dagger} &= K.
\end{align}
$$
2.57
The state of the system $\left| \psi_1 \right\rangle$ after the measurement ${L_l}$ is,
$$
\begin{equation}
\left| \psi_1 \right\rangle = \frac{L_l \left| \psi \right\rangle}{\sqrt{\left\langle \psi \middle| L_l^{\dagger}L_l \middle| \psi \right\rangle}}.
\end{equation}
$$
Then, the state of the system $\left| \psi_2 \right\rangle$ after the measurement ${M_m}$ is,
$$
\begin{align}
\left| \psi_2 \right\rangle &= \frac{M_m \left| \psi_1 \right\rangle}{\sqrt{\left\langle \psi_1 \middle| M_m^{\dagger}M \middle| \psi_1 \right\rangle}} \\
&= \frac{M_mL_l\left| \psi \right\rangle}{\sqrt{\left\langle \psi \middle| L_l^{\dagger}L_l \middle| \psi \right\rangle}} \cdot \frac{\sqrt{\left\langle \psi \middle| L_l^{\dagger}L_l \middle| \psi \right\rangle}}{\left\langle \psi \middle| L_l^{\dagger}M_m^{\dagger}M_mL_l \middle| \psi \right\rangle} \\
&= \frac{M_mL_l \left| \psi \right\rangle}{\sqrt{\left\langle \psi \middle| L_l^{\dagger}M_m^{\dagger}M_mL_l \middle| \psi \right\rangle}}.
\end{align}
$$
On the other hand, the state of the system $\left| \psi_3 \right\rangle$ after the measurement $N_l {}_m = M_m L_l$ is,
$$
\begin{equation}
\left| \psi_3 \right\rangle = \frac{M_mL_l\left| \psi \right\rangle}{\sqrt{\left\langle \psi \middle| L_l^{\dagger}M_m^{\dagger}M_mL_l \middle| \psi \right\rangle}} = \left| \psi_2 \right\rangle.
\end{equation}
$$
2.58
The average observed value of M is,
$$
\begin{align}
\left\langle \psi \middle| M \middle| \psi \right\rangle &= \left\langle \psi \middle| M \middle| \psi \right\rangle \\
&= \sum_{m'} m' \left\langle \psi \middle| P_{m'} \middle| \psi \right\rangle \\
&= m \left\langle \psi \middle| \psi \right\rangle = m.
\end{align}
$$
The standard deviation is,
$$
\begin{align}
\left[ \Delta \left( M \right) \right]^2 &= \left\langle M^2 \right\rangle - \left\langle M \right\rangle^2 \;\;\; \left( \because \mathrm{eq. (2.115)} \right) \\
&= \left\langle \psi \middle| M^2 \middle| \psi \right\rangle - \left( \left\langle \psi \middle| M \middle| \psi \right\rangle \right)^2 \\
&= \sum_{m'} m'^2 \left\langle \psi \middle| P_{m'} \middle| \psi \right\rangle - \left( \sum_{m''} m'' \left\langle \psi \middle| P_{m''} \middle| \psi \right\rangle \right)^2 \\
&= m^2 - m^2 = 0.
\end{align}
$$
2.59
$$
\begin{align}
\left\langle X \right\rangle &= \left\langle 0 \middle| X \middle| 0 \right\rangle = \left\langle 0 \middle| 1 \right\rangle = 0 \\
\left\langle X^2 \right\rangle &= \left\langle 0 \middle| X^2 \middle| 0 \right\rangle = \left\langle 0 \middle| I \middle| 0 \right\rangle = 1.
\end{align}
$$
Therefore, the standard deviation $\Delta \left( X \right)$ is,
$$
\begin{equation}
\Delta\left( X \right) = \sqrt{\left\langle X^2 \right\rangle - \left( \left\langle X \right\rangle \right)^2} = 1.
\end{equation}
$$
2.60
Similar to Exercise 2.35,
$$
\begin{align}
\vec{v}\cdot\vec{\sigma} &= v_1
\begin{pmatrix}
0 & 1 \\
1 & 0
\end{pmatrix}
+ v_2
\begin{pmatrix}
0 & -i \\
i & 0
\end{pmatrix}
+ v_3
\begin{pmatrix}
1 & 0 \\
0 & -1
\end{pmatrix} \\
&=
\begin{pmatrix}
v_3 & v_1-iv_2 \\
v1+iv_2 & -v_3.
\end{pmatrix}
\end{align}
$$
Now find eigenvalues by solving the characteristic equation.
$$
\begin{align}
\det \left( \vec{v}\cdot\vec{\sigma} - \lambda I \right) &=
\begin{vmatrix}
v_3 - \lambda & v_1-iv_2 \\
v_1+iv_2 & -v_3-\lambda
\end{vmatrix} \\
&= \lambda^2 - (v_1^2+v_2^2+v_3^2) \\
&= \lambda^2 - 1 \;\;\; \left( \because \vec{v} \mathrm{\; is\; a\; unit\; vector} \right) \\
&= 0 \;\;\; \therefore \lambda = \pm 1
\end{align}
$$
For $\lambda=-1$,
$$
\begin{align}
\begin{pmatrix}
v_3+1 & v_1-iv_2 \\
v_1+iv_2 & -v_3+1
\end{pmatrix}
\begin{pmatrix}
a \\ b
\end{pmatrix}
=
\begin{pmatrix}
0 \\ 0
\end{pmatrix}.
\end{align}
$$
This yields an eigenvector
$$
\begin{equation}
\left| \lambda_{-1} \right\rangle = \frac{1}{\sqrt{2\left( 1+v_3 \right)}}
\begin{pmatrix}
v_1-iv_2 \\ -v_3-1
\end{pmatrix}.
\end{equation}
$$
Thus, $P_-$ is
$$
\begin{align}
P_- = \left| \lambda_{-1} \right\rangle \left\langle \lambda_{-1} \right| &= \frac{1}{2\left( 1+v_3 \right)}
\begin{pmatrix}
v_1-iv_2 \\ -v_3-1
\end{pmatrix}
\begin{pmatrix}
v_1+iv_2 & -v_3-1
\end{pmatrix} \\
&= \frac{1}{2\left( 1+v_3 \right)}
\begin{pmatrix}
v_1^2+v_2^2 & -v_1\left( v_3+1 \right) +iv_2\left( v_3+1 \right) \\
-v_1\left( v_3+1 \right)-iv_2\left( v_3+1 \right) & \left( v_3+1 \right)^2
\end{pmatrix} \\
&= \frac{1}{2}
\begin{pmatrix}
\frac{v_1^2+v_2^2}{1+v_3} & -v_1+iv_2 \\
-v_1-iv_2 & 1+v_3
\end{pmatrix} \\
&= \frac{1}{2} \left(I-\vec{v}\cdot\vec{\sigma} \right).
\end{align}
$$
For $\lambda=+1$,
$$
\begin{align}
\begin{pmatrix}
v_3-1 & v_1-iv_2 \\
v_1+iv_2 & -v_3+1
\end{pmatrix}
\begin{pmatrix}
a \\ b
\end{pmatrix}
=
\begin{pmatrix}
0 \\ 0
\end{pmatrix}.
\end{align}
$$
This yields an eigenvector
$$
\begin{equation}
\left| \lambda_{+1} \right\rangle = \frac{1}{\sqrt{2\left( 1-v_3 \right)}}
\begin{pmatrix}
v_1-iv_2 \\ 1-v_3
\end{pmatrix}.
\end{equation}
$$
Thus, $P_+$ is
$$
\begin{align}
P_+ = \left| \lambda_{+1} \right\rangle \left\langle \lambda_{+} \right| &= \frac{1}{2\left( 1-v_3 \right)}
\begin{pmatrix}
v_1-iv_2 \\ 1-v_3
\end{pmatrix}
\begin{pmatrix}
v_1+iv_2 & 1-v_3
\end{pmatrix} \\
&= \frac{1}{2\left( 1-v_3 \right)}
\begin{pmatrix}
v_1^2+v_2^2 & v_1\left( 1-v_3 \right) -iv_2\left( 1-v_3 \right) \\
v_1 \left( 1-v_3 \right)+iv_2\left( 1-v_3 \right) & \left( 1-v_3 \right)^2
\end{pmatrix} \\
&= \frac{1}{2}
\begin{pmatrix}
\frac{v_1^2+v_2^2}{1-v_3} & v_1-iv_2 \\
v_1+iv_2 & 1-v_3
\end{pmatrix} \\
&= \frac{1}{2}\left( I+\vec{v}\cdot\vec{\sigma} \right)
\end{align}
$$
2.61
The probability of obtaining the result $+1$ is,
$$
\begin{align}
p(+) &= \left\langle 0 \middle| P_+^{\dagger}P_+ \middle| 0 \right\rangle \\
&= \left\langle 0 \middle| P_+ \middle| 0 \right\rangle \\
&= \frac{1}{2}\left[ \left\langle 0 \middle| I \middle| 0 \right\rangle + \left\langle 0 \middle| \vec{v}\cdot\vec{\sigma} \middle| 0 \right\rangle \right] \\
&= \frac{1}{2}\left[ \left\langle 0 \middle| 0 \right\rangle + \left\langle 0 \middle| v_1 \middle| 1 \right\rangle + \left\langle 0 \middle| iv_2 \middle| 1 \right\rangle + \left\langle 0 \middle| v_3 \middle| 0 \right\rangle \right] \\
&= \frac{1+v_3}{2}.
\end{align}
$$
The state of the system after the measurement is,
$$
\begin{align}
\frac{P_+\left| 0 \right\rangle}{\sqrt{p(+)}} &= \frac{\left( I+\vec{v}\cdot\vec{\sigma}\right) \left| 0 \right\rangle}{\sqrt{2\left( 1+v_3 \right)}} \\
&= \frac{1}{\sqrt{2\left( 1+v_3 \right)}}
\begin{pmatrix}
1+v_3 \\ v_1+iv_2
\end{pmatrix} \\
&= \frac{1}{\sqrt{2\left( 1+v_3 \right)}}\frac{1+v_3}{v_1-iv_2}
\begin{pmatrix}
v_1-iv_2 \\ \frac{v_1^2+v_2^2}{1+v_3}
\end{pmatrix} \\
&= \frac{1}{\sqrt{2\left( 1-v_3 \right)}}
\begin{pmatrix}
v_1-iv_2 \\ 1-v_3
\end{pmatrix} = \left| \lambda_+ \right\rangle.
\end{align}
$$
2.62
Let $M_m$ be measurement operators. $M_m$ correspond to the POVM elements, that is,
$$
\begin{equation}
E_m = M_m^{\dagger}M = M_m.
\end{equation}
$$
So, $M_m$ are positive operators. Since positive operators are Hermitian (Exercise 2.24),
$$
\begin{equation}
M_m^{\dagger}M = M_m^2 = M_m.
\end{equation}
$$
Therefore, $M_m$ are projective operators.
2.63
The left polar decomposition of $M_m$
$$
\begin{equation}
M_m = U_m J_m = U_m \sqrt{M_m^{\dagger}M} = U_m \sqrt{E_m}.
\end{equation}
$$
2.64
We can construct $\left| \psi’_j \right\rangle$ that is orthogonal to all states except $\left| \psi_j \right\rangle$. That is,
$$
\begin{equation}
\left| \psi'_j \right\rangle = \left| \psi_j \right\rangle - \sum_{k=1,k\neq j}^m\frac{\left\langle \psi_j \middle| \psi_k \right\rangle \left| \psi_k \right\rangle}{\left| \left| \psi_k \right\rangle \right|^2}
\end{equation}
$$
Then, $E_m$ is,
$$
\begin{equation}
E_m = A\left| \psi'_m \right\rangle \left\langle \psi'_m \right|,
\end{equation}
$$
where $A$ is chosen such that
$$
\begin{equation}
E_{m+1} = I-\sum_{j=1}^m E_j
\end{equation}
$$
is positive.
2.65
$$
\begin{align}
\left| + \right\rangle &\equiv \frac{\left| 0 \right\rangle + \left| 1 \right\rangle}{\sqrt{2}} \\
\left| - \right\rangle &\equiv \frac{\left| 0 \right\rangle - \left| 1 \right\rangle}{\sqrt{2}}
\end{align}
$$
2.66
Let $\left| \psi \right\rangle \equiv \left( \left| 00 \right\rangle + \left| 11 \right\rangle \right)/\sqrt{2}$. Then,
$$
\begin{align}
E\left( X_1Z_2 \right) &= \left\langle \psi \middle| X_1Z_2 \middle| \psi \right\rangle \\
&= \frac{1}{2} \left( \left\langle 00 \middle| X_1Z_2 \middle| 00 \right\rangle + \left\langle 00 \middle| X_1Z_2 \middle| 11 \right\rangle + \left\langle 11 \middle| X_1Z_2 \middle| 00 \right\rangle + \left\langle 11 \middle| X_1Z_2 \middle| 11 \right\rangle \right) \\
&= \frac{1}{2}\left( \left\langle 00 \middle| 10 \right\rangle - \left\langle 00 \middle| 01 \right\rangle + \left\langle 11 \middle| 10 \right\rangle - \left\langle 11 \middle| 01 \right\rangle \right) \\
&= 0
\end{align}
$$
2.67
It seems intuitively easy (I wouldn’t say it’s trivial). However, it’s too much to prove…
2.68
Suppose one of the Bell states $\left| \psi \right\rangle \equiv \left( \left| 00 \right\rangle + \left| 11 \right\rangle \right)/\sqrt{2}$ can be written as a product pf single particle states
$$
\begin{align}
\left| a \right\rangle &= \alpha_a \left| 0 \right\rangle + \beta_a \left| 1 \right\rangle \\
\left| b \right\rangle &= \alpha_b \left| 0 \right\rangle + \beta_b \left| 1 \right\rangle.
\end{align}
$$
Then,
$$
\begin{equation}
\left| a \right\rangle \left| b \right\rangle = \alpha_a\alpha_b \left| 00 \right\rangle + \alpha_a\beta_b \left| 01 \right\rangle + \beta_a\alpha_b \left| 10 \right\rangle + \beta_a\beta_b \left| 11 \right\rangle.
\end{equation}
$$
Comparing this with a bell state $\left| \psi \right\rangle = \left( \left| 00 \right\rangle + \left| 11 \right\rangle \right)/\sqrt{2}$,
$$
\begin{align}
\alpha_a\beta_b &= 0 \\
\beta_a\alpha_b &= 0.
\end{align}
$$
This contradicts $\left| \psi \right\rangle = \left| a \right\rangle \left| b \right\rangle$. So, $\left| \psi \right\rangle \neq \left| a \right\rangle \left| b \right\rangle$ for all single qubit states $\left| a \right\rangle$ and $\left| b \right\rangle$
2.69
Define the Bell basis as
$$
\begin{align}
\left| \Phi^+ \right\rangle &= \frac{\left( \left| 00 \right\rangle + \left| 11 \right\rangle \right)}{\sqrt{2}} \\
\left| \Phi^- \right\rangle &= \frac{\left( \left| 00 \right\rangle - \left| 11 \right\rangle \right)}{\sqrt{2}} \\
\left| \Psi^+ \right\rangle &= \frac{\left( \left| 01 \right\rangle + \left| 10 \right\rangle \right)}{\sqrt{2}} \\
\left| \Psi^- \right\rangle &= \frac{\left( \left| 01 \right\rangle - \left| 10 \right\rangle \right)}{\sqrt{2}}.
\end{align}
$$
Then,
$$
\begin{align}
\left\langle \Phi^+ \middle| \Phi^+ \right\rangle &= \frac{1}{2}\left( \left\langle 00 \middle| 00 \right\rangle + \left\langle 00 \middle| 11 \right\rangle + \left\langle 11 \middle| 00 \right\rangle + \left\langle 11 \middle| 11 \right\rangle \right) = 1 \\
\left\langle \Phi^+ \middle| \Phi^- \right\rangle &= \frac{1}{2}\left( \left\langle 00 \middle| 00 \right\rangle - \left\langle 00 \middle| 11 \right\rangle + \left\langle 11 \middle| 00 \right\rangle - \left\langle 11 \middle| 11 \right\rangle \right) = 0 \\
\left\langle \Phi^+ \middle| \Psi^+ \right\rangle &= \frac{1}{2}\left( \left\langle 00 \middle| 01 \right\rangle + \left\langle 00 \middle| 10 \right\rangle + \left\langle 11 \middle| 01 \right\rangle + \left\langle 11 \middle| 10 \right\rangle \right) = 0 \\
\left\langle \Phi^+ \middle| \Psi^- \right\rangle &= \frac{1}{2}\left( \left\langle 00 \middle| 01 \right\rangle - \left\langle 00 \middle| 10 \right\rangle + \left\langle 11 \middle| 01 \right\rangle - \left\langle 11 \middle| 10 \right\rangle \right) = 0 \\
\left\langle \Phi^- \middle| \Phi^- \right\rangle &= \frac{1}{2}\left( \left\langle 00 \middle| 00 \right\rangle - \left\langle 00 \middle| 11 \right\rangle - \left\langle 11 \middle| 00 \right\rangle + \left\langle 11 \middle| 11 \right\rangle \right) = 1 \\
\left\langle \Phi^- \middle| \Psi^+ \right\rangle &= \frac{1}{2}\left( \left\langle 00 \middle| 01 \right\rangle + \left\langle 00 \middle| 10 \right\rangle - \left\langle 11 \middle| 01 \right\rangle - \left\langle 11 \middle| 10 \right\rangle \right) = 0 \\
\left\langle \Phi^- \middle| \Psi^- \right\rangle &= \frac{1}{2}\left( \left\langle 00 \middle| 01 \right\rangle - \left\langle 00 \middle| 10 \right\rangle - \left\langle 11 \middle| 01 \right\rangle + \left\langle 11 \middle| 10 \right\rangle \right) = 0 \\
\left\langle \Psi^+ \middle| \Psi^+ \right\rangle &= \frac{1}{2}\left( \left\langle 01 \middle| 01 \right\rangle + \left\langle 01 \middle| 10 \right\rangle + \left\langle 10 \middle| 01 \right\rangle + \left\langle 10 \middle| 10 \right\rangle \right) = 1 \\
\left\langle \Psi^+ \middle| \Psi^- \right\rangle &= \frac{1}{2}\left( \left\langle 01 \middle| 01 \right\rangle - \left\langle 01 \middle| 10 \right\rangle + \left\langle 10 \middle| 01 \right\rangle - \left\langle 10 \middle| 10 \right\rangle \right) = 0 \\
\left\langle \Psi^- \middle| \Psi^- \right\rangle &= \frac{1}{2}\left( \left\langle 01 \middle| 01 \right\rangle - \left\langle 01 \middle| 10 \right\rangle - \left\langle 10 \middle| 01 \right\rangle + \left\langle 10 \middle| 10 \right\rangle \right) = 1.
\end{align}
$$
So, the Bell basis forms an orthonormal basis.
2.70
Any single qubit operation can be expressed as a linear combination of the Pauli matrices, i.e.,
$$
\begin{align}
E = c_I I + c_x X +c_y Y + c_z Z.
\end{align}
$$
Using the relations
$$
\begin{align}
\left( Z\otimes I \right) \left| \Phi^+ \right\rangle &= \frac{1}{\sqrt{2}}\left( \left| 00 \right\rangle - \left| 11 \right\rangle \right) = \left| \Phi^- \right\rangle \\
\left( X\otimes I \right)\left| \Phi^+ \right\rangle &= \frac{1}{\sqrt{2}}\left( \left| 01 \right\rangle + \left| 10 \right\rangle \right) = \left| \Psi^+ \right\rangle \\
\left( iY\otimes I \right)\left| \Phi^+ \right\rangle &= \frac{1}{\sqrt{2}}\left( \left| 01 \right\rangle - \left| 10 \right\rangle \right) = \left| \Psi^- \right\rangle
\end{align}
$$
and since these are orthogonal to each other,
$$
\begin{align}
\left\langle \Phi^+ \middle| \left( E\otimes I \right) \middle| \Phi^+ \right\rangle &= \left\langle \Phi^+ \middle| c_I I\otimes I \middle| \Phi^+ \right\rangle + \left\langle \Phi^+ \middle| c_x X\otimes I \middle| \Phi^+ \right\rangle + \left\langle \Phi^+ \middle| c_y Y\otimes I \middle| \Phi^+ \right\rangle + \left\langle \Phi^+ \middle| c_z Z\otimes I \middle| \Phi^+ \right\rangle \\
&= c_I \left\langle \Phi^+ \middle| \Phi^+ \right\rangle + c_x \left\langle \Phi^+ \middle| \Psi^+ \right\rangle -ic_y \left\langle \Phi^+ \middle| \Psi^- \right\rangle + c_z \left\langle \Phi^+ \middle| \Phi^- \right\rangle \\
&= c_I.
\end{align}
$$
Then,
$$
\begin{align}
\left\langle \Phi^- \middle| E\otimes I \middle| \Phi^- \right\rangle &= \left\langle \Phi^+ \middle| \left( Z\otimes I \right)^{\dagger} \left( E\otimes I \right) \left( Z\otimes I \right) \middle| \Phi^+ \right\rangle \\
&= \left\langle \Phi^+ \middle| \left( Z\otimes I \right) \left[ \left( c_I I + c_x X + c_y Y + c_z Z \right) \otimes I \right] \left( Z\otimes I \right) \middle| \Phi^+ \right\rangle \\
&= \left\langle \Phi^+ \middle| \left( Z\otimes I \right) \left[ \left( c_I Z + -ic_x Y + ic_y X + c_z I \right) \otimes I \right] \middle| \Phi^+ \right\rangle \\
&= \left\langle \Phi^+ \middle| \left[ \left( c_I I - c_x X - c_y Y + c_z Z \right) \otimes I \right] \middle| \Phi^+ \right\rangle \\
&= c_I \\
\left\langle \Psi^+ \middle| \left( E\otimes I \right) \middle| \Psi^+ \right\rangle &= \left\langle \Phi^+ \middle| \left( X\otimes I \right)^{\dagger}\left( E\otimes I \right)\left( X\otimes I \right) \middle| \Phi^+ \right\rangle \\
&= \left\langle \Phi^+ \middle| \left( X\otimes I \right)\left[ \left( c_I I + c_x X + c_y Y + c_z Z \right) \otimes I \right] \left( X\otimes I \right) \middle| \right\rangle \\
&= \left\langle \Phi^+ \middle| \left( X\otimes I \right)\left[ \left( c_I X + c_x I -i c_y Z + ic_z Y \right)\otimes I \right] \middle| \Phi^+ \right\rangle \\
&= \left\langle \Phi^+ \middle| \left[ \left( c_I I + c_x X - c_y Y - c_z Z \right)\otimes I \right] \middle| \Phi^+ \right\rangle \\
&= c_I \\
\left\langle \Psi^- \middle| \left( E\otimes I \right) \middle| \Psi^- \right\rangle &= \left\langle \Phi^+ \middle| \left( iY\otimes I \right)^{\dagger}\left( E\otimes I \right)\left( iY\otimes I \right) \middle| \Phi^+ \right\rangle \\
&= \left\langle \Phi^+ \middle| \left( -iY\otimes I \right)\left[ \left( c_I I + c_x X + c_y Y + c_z Z \right)\otimes I \right]\left( iY\otimes I \right) \middle| \Phi^+ \right\rangle \\
&= \left\langle \Phi^+ \middle| \left( Y\otimes I \right)\left[ \left( c_I Y + ic_x Z + c_y I -ic_z X \right)\otimes I \right] \middle| \Phi^+ \right\rangle \\
&= \left\langle \Phi^+ \middle| \left[ \left( c_I I - c_x X + c_y Y - c_z Z \right)\otimes I \right] \middle| \Phi^+ \right\rangle \\
&= c_I.
\end{align}
$$
If Eve gets the qubit that Alice sent to Bob and performs the measurement $M_m$, the result Eve gets is
$$
\begin{equation}
\left\langle \psi \middle| \left( M_m^{\dagger}M_m\otimes I \right) \middle| \psi \right\rangle.
\end{equation}
$$
Here
$M_m^{\dagger}M_m$ is positive operator. As confirmed above, the measurement result doesn’t depend on the state Alice prepared. Therefore, Eve cannot infer anything about the information Alice sent.
2.71
$$
\begin{align}
\mathrm{tr} \left( \rho^2 \right) &= \mathrm{tr}\left( \sum_{j,k}p_jp_k \left| j \right\rangle \left\langle j \middle| k \right\rangle \left\langle k \right| \right) \\
&= \mathrm{tr}\left( \sum_{j,k}p_jp_k \delta_{j,k} \left| j \right\rangle \left\langle k \right| \right) \\
&= \mathrm{tr}\left( \sum_j p_j^2 \left| j \right\rangle \left\langle j \right| \right) \\
&= \sum_j p_j^2.
\end{align}
$$
Since $\rho$ is positive matrix and $\sum_j p_j=1$, $;0 \le p_j \le 1$. So, $p_j^2 \le p_j$ for all $p_j$. Therefore, $\sum_j p_j^2 \le 1$. Equality occurs if and only if $\rho$ is a pure state, that is, $\rho = \left| \psi \right\rangle \left\langle \psi \right|$.
2.72
(1)
Since the Pauli matrices along with the identity matrix form an orthogonal basis for the complex 2-dimensional Hilbert space, an arbitrary density matrix for a mixed state qubit is written as a real linear combination of $I,\sigma_1,\sigma_2,\sigma_3$, i.e.
$$
\begin{align}
\rho &= v_0 \sigma_0 + v_1 \sigma_1 + v_2 \sigma_2 + v_3 \sigma_3 \\
&= \begin{pmatrix}
v_0 + v_3 & v_1-iv_2 \\
v_1+iv_2 & v_0-v_3
\end{pmatrix}.
\end{align}
$$
Since $\mathrm{tr} \left( \rho \right) = 1,; v_0 = \frac{1}{2}$.
Now, find eigenvalues by solving characteristic equation
$$
\begin{align}
\det \left( \rho -\lambda I \right) &= \begin{vmatrix}
v_0 + v_3 - \lambda & v_1 -iv_2 \\
v_1+iv_2 & v_0 -v_3 -\lambda
\end{vmatrix} \\
&= v_0^2 - v_3^2 -2v_0\lambda + \lambda^2 - v_1^2 - v_2^2 \\
&= \lambda^2 -2v_0\lambda + v_0^2 - (v_1^2 + v_2^2 + v_3^2) = 0.
\end{align} \\
\therefore \lambda = v_0 \pm \sqrt{v_1^2 + v_2^2 + v_3^2} = \frac{1}{2} \left( 1\pm \sqrt{\left( 2v_1 \right)^2+\left( 2v_2 \right)^2+\left( 2v_3 \right)^2} \right)
$$
Since $\rho$ is a positive semidefinite matrix,
$$
\begin{equation}
\sqrt{\left( 2v_1 \right)^2+\left( 2v_2 \right)^2+\left( 2v_3 \right)^2} \le 1
\end{equation}
$$
Therefore,
$$
\begin{equation}
\rho = \frac{I+\vec{r}\cdot\vec{\sigma}}{2}
\end{equation}
$$
where $|\vec{r}|\le 1$.
(2)
$\rho=\frac{I}{2}$ is at the origin of the Bloch sphere (the maximally mixed state).
(3)
As we did in (1), the eigenvalues of $\rho=\frac{I+\vec{r}\cdot\vec{\sigma}}{2}$ are $\lambda=\frac{1}{2}\left( 1\pm \left| \vec{r} \right| \right)$. Since $\mathrm{tr} \left( \rho^2 \right)=\lambda_1^2+\lambda_2^2$, $\rho$ is pure if and only if $|\vec{r}|=1$.
(4)
Since $\rho$ is pure, $\rho = \left| \psi \right\rangle \left\langle \psi \right|$ where $\left| \psi \right\rangle = \alpha\left| 0 \right\rangle + \beta\left| 1 \right\rangle$. Here $\alpha$ and $\beta$ are constrained by the equation $|\alpha|^2+|\beta|^2=1$. So,
$$
\begin{equation}
\left| \psi \right\rangle = \mathrm{e}^{i\gamma}\left( \cos\frac{\theta}{2}\left| 0 \right\rangle + \mathrm{e}^{i\varphi}\sin\frac{\theta}{2}\left| 1 \right\rangle \right).
\end{equation}
$$
2.73
$$
\begin{align}
\left|\psi_i\right\rangle &= \rho\rho^{-1}\left|\psi_i\right\rangle \\
&= \sum_j p_j \left|\psi_j\right\rangle\left\langle\psi_j\middle|\rho^{-1}\middle|\psi_i\right\rangle \\
&= \sum_j p_j \left|\psi_j\right\rangle\left\langle\psi_j\middle|\rho^{-1}\middle|\psi_i\right\rangle\delta_{i,j} \\
&= p_i\left|\psi_i\right\rangle\left\langle\psi_i\middle|\rho^{-1}\middle|\psi_i\right\rangle \\
&\therefore p_i=\frac{1}{\left\langle\psi_i\middle|\rho^{-1}\middle|\psi_i\right\rangle}
\end{align}
$$
2.74
$$
\begin{align}
\rho^{AB} &= \left| a \right\rangle \left| b \right\rangle\left\langle a \right|\left\langle b \right| = \left| a\right\rangle\left\langle a \right| \otimes \left| b \right\rangle\left\langle b \right|. \\
\rho^A &= \left| a\right\rangle\left\langle a \right| \mathrm{tr}\left( \left| b \right\rangle\left\langle b \right| \right) = \left| a \right\rangle\left\langle a \right|.\\
\mathrm{tr}\left( \left( \rho^A \right)^2 \right) &= \mathrm{tr}\left( \left| a\right\rangle\left\langle a \middle| a \right\rangle\left\langle a \right| \right) = \mathrm{tr} \left( \left| a\right\rangle\left\langle a \right| \right) = 1.
\end{align}
$$
2.75
$$
\begin{align}
\rho^{\Phi^+} &= \left|\Phi^+\right\rangle\left\langle\Phi^+\right| = \frac{\left| 00\right\rangle\left\langle 00\right| + \left| 00\right\rangle\left\langle 11\right| + \left| 11\right\rangle\left\langle 00\right| + \left| 11\right\rangle\left\langle 11\right|}{2} \\
\rho^{\Phi^-} &= \frac{\left| 00\right\rangle\left\langle 00\right| - \left| 00\right\rangle\left\langle 11\right| - \left| 11\right\rangle\left\langle 00\right| + \left| 11\right\rangle\left\langle 11\right|}{2} \\
\rho^{\Psi^+} &= \frac{\left| 01\right\rangle\left\langle 01\right| + \left| 01\right\rangle\left\langle 10\right| + \left| 10\right\rangle\left\langle 01\right| + \left| 10\right\rangle\left\langle 10\right|}{2} \\
\rho^{\Psi^-} &= \frac{\left| 01\right\rangle\left\langle 01\right| - \left| 01\right\rangle\left\langle 10\right| - \left| 10\right\rangle\left\langle 01\right| + \left| 10\right\rangle\left\langle 10\right|}{2} \\
\rho^{\Phi^+,1} &= \frac{\left| 0\right\rangle\left\langle 0\right| + \left| 1\right\rangle\left\langle 1\right|}{2} = \frac{I}{2} \\
\rho^{\Phi^+,2} &= \frac{\left| 0\right\rangle\left\langle 0\right| + \left| 1\right\rangle\left\langle 1\right|}{2} = \frac{I}{2} \\
\rho^{\Phi^-,1} &= \frac{\left| 0\right\rangle\left\langle 0\right| + \left| 1\right\rangle\left\langle 1\right|}{2} = \frac{I}{2} \\
\rho^{\Phi^-,2} &= \frac{\left| 0\right\rangle\left\langle 0\right| + \left| 1\right\rangle\left\langle 1\right|}{2} = \frac{I}{2} \\
\rho^{\Psi^+,1} &= \frac{\left| 0\right\rangle\left\langle 0\right| + \left| 1\right\rangle\left\langle 1\right|}{2} = \frac{I}{2} \\
\rho^{\Psi^+,2} &= \frac{\left| 1\right\rangle\left\langle 1\right| + \left| 0\right\rangle\left\langle 0\right|}{2} = \frac{I}{2} \\
\rho^{\Psi^-,1} &= \frac{\left| 0\right\rangle\left\langle 0\right| + \left| 1\right\rangle\left\langle 1\right|}{2} = \frac{I}{2} \\
\rho^{\Psi^,2} &= \frac{\left| 0\right\rangle\left\langle 0\right| + \left| 1\right\rangle\left\langle 1\right|}{2} = \frac{I}{2} \\
\end{align}
$$
2.76
See Wikipedia .
2.77
$$
\begin{equation}
\frac{\left| 001\right\rangle + \left| 010\right\rangle + \left| 100\right\rangle}{\sqrt{3}}
\end{equation}
$$
References:
Higher order Schmidt decompositions ( arXiv )
Existence of the Schmidt decomposition for tripartite systems ( arXiv )
2.78
(i) product state $\Leftrightarrow$ Schmidt rank is 1
Suppose $\left|\psi\right\rangle$ is a product state. Then
$$
\begin{equation}
\left|\psi\right\rangle = \left|\psi_A\right\rangle\otimes\left|\psi_B\right\rangle .
\end{equation}
$$
Obviously, the Schmidt rank of $\left|\psi\right\rangle$ is 1.
Suppose the Schmidt rank of $\left|\psi\right\rangle$ is 1. Then $\left|\psi\right\rangle = \left|\psi_A\right\rangle\otimes\left|\psi_B\right\rangle$.
(ii) product state $\Leftrightarrow$ $\rho^A$ and $\rho^B$ are pure states.
Using the Schmidt decomposition, $\left|\psi\right\rangle$ can be written as
$$
\begin{equation}
\left|\psi\right\rangle = \sum_i \lambda_i\left| i_A\right\rangle\left| i_B\right\rangle .
\end{equation}
$$
Then,
$$
\begin{align}
\rho^{AB} &= \sum_i\lambda_i^2\left| i_Ai_B\right\rangle\left\langle i_Ai_B\right|\\
\rho^{A} &= \sum_i\lambda_i^2\left| i_A\right\rangle\left\langle i_A\right|\\
\rho^{B} &= \sum_i\lambda_i^2\left| i_B\right\rangle\left\langle i_B\right| .
\end{align}
$$
Now, suppose the Schmidt rank of $\left|\psi\right\rangle$ is 1 ($\Leftrightarrow$ product state). Then
$$
\begin{equation}
\rho^A = \left| i_A\right\rangle\left\langle i_A\right| .
\end{equation}
$$
Therefore, $\rho^A$ and $\rho^B$ are pure states.
Suppose $\rho^A$ and $\rho^B$ are purestates. Then
$$
\begin{equation}
\rho^A = \left| i_A\right\rangle\left\langle i_A\right| .
\end{equation}
$$
Therefore, the Schmidt rank of $\left|\psi\right\rangle$ is 1 ($\Leftrightarrow$ product state).
2.79
$$
\begin{align}
\frac{\left| 00\right\rangle + \left| 11\right\rangle}{\sqrt{2}} &= \sum_{i=1}^{2} \frac{1}{\sqrt{2}}\left| i \right\rangle\left| i\right\rangle \\
\frac{\left| 00\right\rangle + \left| 01\right\rangle + \left| 10\right\rangle + \left| 11\right\rangle}{2} &= \frac{\left| 0\right\rangle + \left| 1\right\rangle}{\sqrt{2}}\otimes\frac{\left| 0\right\rangle + \left| 1\right\rangle}{\sqrt{2}} \\
&= \left| +\right\rangle\left| +\right\rangle
\end{align}
$$
To find the Schmidt decomposition of $\left|\psi\right\rangle\equiv\frac{\left| 00\right\rangle + \left| 01\right\rangle + \left| 10\right\rangle}{\sqrt{3}}$, we construct the density matrix $\rho$ and find the reduced density matrices $\rho^1$ and $\rho^2$.
$$
\begin{align}
\rho &= \frac{1}{\sqrt{3}}\left( \left| 00\right\rangle + \left| 01\right\rangle + \left| 10\right\rangle \right)\left\langle\psi\right| = \frac{1}{3}
\begin{pmatrix}
1 & 1 & 1 & 0 \\
1 & 1 & 1 & 0 \\
1 & 1 & 1 & 0 \\
0 & 0 & 0 & 0 \\
\end{pmatrix} \\
\rho^1 &= \rho^2 = \frac{1}{3}
\begin{pmatrix}
2 & 1 \\
1 & 1
\end{pmatrix} \\
\end{align}
$$
Now we find the eigenvalues and eigenvectors of $\rho^1$.
$$
\begin{align}
\mathrm{det}\left(\rho^1-\lambda I\right) &=
\begin{vmatrix}
\frac{2}{3}-\lambda & \frac{1}{3} \\
\frac{1}{3} & \frac{1}{3}-\lambda
\end{vmatrix} \\
&= 9\lambda^2-9\lambda +1 = 0 \;\;\; \therefore \lambda_{\pm} = \frac{3\pm \sqrt{5}}{6}
\end{align}
$$
For $\lambda_+$,
$$
\begin{equation}
\begin{pmatrix}
\frac{2}{3}-\lambda_+ & \frac{1}{3} \\
\frac{1}{3} & \frac{1}{3}-\lambda_+
\end{pmatrix}
\begin{pmatrix}
a \\ b
\end{pmatrix}
=
\begin{pmatrix}
0 \\ 0
\end{pmatrix} \;\;\; \therefore \left|\lambda_+\right\rangle = \sqrt{\frac{2}{5+\sqrt{5}}}
\begin{pmatrix}
\frac{1+\sqrt{5}}{2} \\
1
\end{pmatrix}
\end{equation}
$$
For $\lambda_-$,
$$
\begin{equation}
\begin{pmatrix}
\frac{2}{3}-\lambda_- & \frac{1}{3} \\
\frac{1}{3} & \frac{1}{3}-\lambda_-
\end{pmatrix}
\begin{pmatrix}
a \\ b
\end{pmatrix}
=
\begin{pmatrix}
0 \\ 0
\end{pmatrix} \;\;\; \therefore \left|\lambda_-\right\rangle = \sqrt{\frac{2}{5-\sqrt{5}}}
\begin{pmatrix}
\frac{1-\sqrt{5}}{2} \\
1
\end{pmatrix}
\end{equation}
$$
So,
$$
\begin{equation}
\frac{\left| 00\right\rangle +\left| 01\right\rangle + \left| 10\right\rangle}{\sqrt{3}} = \sum_{i=\{ +,-\} } \sqrt{\lambda_i} \left|\lambda_i\right\rangle\left|\lambda_i\right\rangle,
\end{equation}
$$
where
$$
\begin{equation}
\lambda_{\pm} = \frac{2}{\sqrt{5\pm\sqrt{5}}},\;\;\; \left|\lambda_{\pm}\right\rangle = \sqrt{\frac{2}{5\pm\sqrt{5}}}
\begin{pmatrix}
\frac{1\pm\sqrt{5}}{2} \\
1
\end{pmatrix}
\end{equation}
$$