\documentclass[10pt]{beamer} \usepackage[utf8]{inputenc} \usepackage{graphicx} \usepackage{amssymb, amsthm} \usepackage{setspace} \usepackage{amsmath} \usepackage{hyperref} \usepackage{geometry} \usepackage{enumerate} \usepackage{physics} \usepackage{listings} %\usepackage{struktex} \usepackage{qcircuit} \usepackage{adjustbox} \usepackage{tikz} \usetheme{metropolis} \setbeamercolor{background canvas}{bg=white!20} \title{An Efficient Quantum Computing Simulator using a Graphical Description for Many-Qbit Systems} \subtitle{Simulation in the Stabilizer Formalism} \author{Daniel Knüttel} \date{21.02.2020} \institute{Universität Regensburg} \titlegraphic{\small\center Universität Regensburg\\ Faculty of the Institute of Theoretical Physics \vspace{-11mm}\flushright\includegraphics[height=1.0cm]{logo.png}} \makeatletter \setbeamertemplate{footline} { %\leavevmode% \hbox{% \begin{beamercolorbox}[wd=.9\paperwidth,ht=2.25ex,dp=1ex,left]{Faculty of the Institute of Theoretical Physics}% \end{beamercolorbox}% \begin{beamercolorbox}[wd=.1\paperwidth,ht=2.25ex,dp=1ex,right]{Faculty of the Institute of Theoretical Physics}% \insertframenumber{} / \inserttotalframenumber\hspace*{2ex} \end{beamercolorbox}}% } \makeatother \begin{document} \maketitle \section{Introduction} { \begin{frame}{Motivation: Exponentially Hard (Physical) Problems} \begin{itemize} \item{Some mathematical problems are exponentially hard to solve, for instance prime factorization.} \item{There exist several physical systems which are interesting to study but hard so simulate such as QCD simulations at finite chemical potential or real time scattering amplitudes in QCD.} \item{The exponential behaviour in time (and space) complexity brings classical supercomputers to their limits.} \end{itemize} \end{frame} } { \begin{frame}{The Universal Quantum Computer} \begin{itemize} % 2^n complex vector, arbitrary unitary, measurements \item{A universal quantum computer is a $2^n$ dimensional complex vector to which any unitary can be applied with a quantum mechanical measurement process.} \item{Algorithms using the exponentially large Hilbert space can solve classically exponentially hard problems in polynomial time.} \end{itemize} \end{frame} } { \begin{frame}{Motivation: Using Quantum Computing for Physics} \begin{itemize} % Literaturübersicht. \item{ Possible applications of quantum computing include: \begin{itemize} \item{Preparing ground states of non-perturbative systems by adiabatic time evolution (\textit{https://www.scottaaronson.com/qclec/26.pdf}).\\ Also works in QCD (\textit{arXiv:2001.11145v1}).} %\item{Use algorithms such as Phase Estimation to analyze the spectrum % of a system.} \item{Study the time evolution by applying the transfer matrix (\textit{see later}).} \item{Compute trace formuli (\textit{arXiv:2001.11145v1}).} \end{itemize} } % Future use & current use. % (Tensor networks) \item{ In the near future only small and noisy machines will be available. Some Hilbert spaces have infinite dimension (bosonic states). Two main strategies exist to deal with this problem: \begin{itemize} \item{Selecting a physically interesting subspace. % (tensor networks) } \item{Dividing the Hilbert space in system and bath, treating the bath statistically (\textit{Andreas Hackl}).} \end{itemize} } \end{itemize} \end{frame} } { \begin{frame}{Quantum Errors and Quantum Error Correction} \begin{itemize} \item Quantum systems at non-zero temperature often have dephasing effects and a finite population lifetime (relaxation). %\pause \item Fault tolerant QC needs a way to correct for those errors. %\pause \item Several strategies exist; one important class of quantum error correction codes are \textbf{stabilizer codes}. \item{Parts of the theoretical description of quantum errors can be used for physical problems (see above).} \end{itemize} \end{frame} } \section{Binary Quantum Computing} { \begin{frame}{Qbits and Gates} \begin{itemize} \item{A qbit is a two level quantum mechanical system. The Hilbert space $\mathcal{H}$ is two dimensional and has the basis vectors $\ket{0}, \ket{1}$.} \item{$n$ qbits have the Hilbert space $\mathcal{H}^{\otimes n}$.} \item{Gates on a quantum computers are unitary operators acting on $\mathcal{H}^{\otimes n}$.} \end{itemize} \end{frame} } { \begin{frame}{Notable Gates on One Qbit} \begin{itemize} \item{The Pauli matrices $X, Y, Z$ are gates commonly used. $X$ is also called the $NOT$ gate as it maps $\ket{0}$ to $\ket{1}$ and vice versa. } \item{The Hadamard gate $H = \frac{1}{\sqrt{2}}\left(\begin{array}{cc} 1 & 1 \\ 1 & -1\end{array}\right)$ transforms from Pauli $Z$ to $X$ basis.} \item{The $R_\phi = \left(\begin{array}{cc} 1 & 0 \\ 0 & \exp(i\phi)\end{array}\right)$ gate rotates a state vector around the $Z$ axis.} \item{The $R_{\frac{\pi}{2}}$ gate transforms from $X$ to $Y$ basis.} \end{itemize} \end{frame} } { \begin{frame}{Notable gates on $n$ Qbits} \begin{itemize} \item{For a unitary $U$ acting on $\mathcal{H}$ \begin{equation} U_i := \left(\bigotimes\limits_{l < i} I\right) \otimes U \otimes \left(\bigotimes\limits_{l > i} I\right) \end{equation} is the $U$ gate acting on qbit $i$.} \item{For two qbits $i\neq j$ the controlled $X$ gate is \begin{equation} CX_{i,j} = \ket{0}\bra{0}_j \otimes I_i + \ket{1}\bra{1}_j \otimes X_i. \end{equation}} \end{itemize} \end{frame} } { \begin{frame}{Universal Gates} \begin{itemize} \item{A quantum computer should be able to simulate any unitary on $\mathcal{H}^{\otimes n}$.} \item{Similarly to classical computers a universal set of operations is required.} \item{One can show that any unitary acting on $\mathcal{H}^{\otimes n}$ can be generated using the $CX$ and universal gates acting on $\mathcal{H}$.} \item{The gates $\{H, R_\phi\}$ are universal on $\mathcal{H}$.} \end{itemize} \end{frame} } { \begin{frame}{Measurements and Computational Hardness} \begin{itemize} \item{When measuring a qbit $i$ the observable $Z_i$ is measured.} \item{The Hilbert space $\mathcal{H}^{\otimes n}$ has the integer basis \begin{equation} \ket{j} = \ket{\mbox{0b}j_{n-1}...j_1j_0} = \bigotimes\limits_{l=0}^{n-1} \ket{j_l}. \end{equation} } \item{A general state $\ket{\psi}$ has $2^n$ coefficients in this basis.} \item{In general an operation on the state $\ket{\psi}$ will have to update $2^n$ coefficients. Mapping a general state $\ket{\psi}$ to $\ket{\psi'}$ cannot be performed in $\mbox{poly}(n)$ time.} \end{itemize} \end{frame} } { \begin{frame}{Case Study: Spin Chain in a Magnetic Field} \begin{itemize} \item{ For a set of $n$ spins in a magnetic field one can rescale the Hamiltonian of the system to \begin{equation} H = -\sum\limits_{i=1}^{n-1} Z_i Z_{i-1} + g\sum\limits_{i=0}^{n-1} X_i \end{equation} } %\pause \item{ The time evolution of such a system is given by the transfer matrix \begin{equation} T := \exp(-itH) \in U(2^n) \end{equation} } %\pause \item{ By associating every qbit with one spin (both are two-level systems) one should be able to simulate the behaviour of the spin chain using a quantum computer. } \end{itemize} \end{frame} } { \begin{frame}{Case Study: Spin Chain in a Magnetic Field} \begin{itemize} \item{ Trotterizing the matrix exponential \begin{equation} \begin{aligned} \exp(t(A + B)) &= \left(\exp(\frac{t}{2N}A)\exp(\frac{t}{N}B)\exp(\frac{t}{2N}A)\right)^N \\ & + \mathcal{O}\left(\frac{t^2}{N^2}\right)\\ \end{aligned} \end{equation} } \item{ For $n=3$ spins one gets a circuit {\centering\adjustbox{max width=\textwidth}{ \Qcircuit @C=1em @R=.7em { & \qw & \gate{X} & \gate{R_{-\frac{t}{2N}}} & \gate{X} & \gate{R_{\frac{t}{2N}}} & \gate{X} & \gate{X} & \qw & \qw & \qw & \qw & \qw & \qw & \qw & \gate{H} & \gate{R_{-2\frac{gt}{2N}}} & \gate{H} & \gate{R_{\frac{gt}{2N}}} & \gate{X} & \gate{R_{\frac{gt}{2N}}} & \gate{X} & \gate{X} & \gate{R_{-\frac{t}{2N}}} & \gate{X} & \gate{R_{\frac{t}{2N}}} & \gate{X} & \gate{X} & \qw & \qw & \qw & \qw & \qw & \qw & \qw &\qw \\ & \qw & \ctrl{-1} & \qw & \qw & \qw & \qw & \ctrl{-1} & \qw & \gate{X} & \gate{R_{-\frac{t}{2N}}} & \gate{X} & \gate{R_{\frac{t}{2N}}} & \gate{X} & \gate{X} & \gate{H} & \gate{R_{-2\frac{gt}{2N}}} & \gate{H} & \gate{R_{\frac{gt}{2N}}} & \gate{X} & \gate{R_{\frac{gt}{2N}}} & \gate{X} & \ctrl{-1} & \qw & \qw & \qw & \qw & \ctrl{-1} & \qw & \gate{X} & \gate{R_{-\frac{t}{2N}}} & \gate{X} & \gate{R_{\frac{t}{2N}}} & \gate{X} & \gate{X} &\qw \\ & \qw & \qw & \qw & \qw & \qw & \qw & \qw & \qw & \ctrl{-1} & \qw & \qw & \qw & \qw & \ctrl{-1} & \gate{H} & \gate{R_{-2\frac{gt}{2N}}} & \gate{H} & \gate{R_{\frac{gt}{2N}}} & \gate{X} & \gate{R_{\frac{gt}{2N}}} & \gate{X} & \qw & \qw & \qw & \qw & \qw & \qw & \qw & \ctrl{-1} & \qw & \qw & \qw & \qw & \ctrl{-1} &\qw \\ } }} } \item{Applying this circuit $N$ times gives an approximation for the time evolution of a state.} \end{itemize} \end{frame} } { \begin{frame}{Case Study: Spin Chain in a Magnetic Field} \begin{figure}[h] \begin{center} \includegraphics[width=\linewidth]{spin_chain/time_evo_6spin_g3.png} \end{center} \end{figure} \end{frame} } \section{Stabilizers} { \begin{frame}{The Gottesman-Knill Theorem} \begin{itemize} \item{When restriciting the $R_\phi$ gate to $\phi = \frac{\pi}{2}$ many states can still be simulated.} \item{\textbf{Theorem}(\textit{Gottesman-Knill}): All states that can be reached using $H, R_\frac{\pi}{2}, CZ$ and measurements starting from the $\ket{0}^{\otimes n}$ state can be simulated and sampled efficiently, i.e. in $\mbox{poly}(n, m)$ time where $m$ is the amount of gates/measurements.} \item{Note that a general state has $2^n$ complex coefficients. Computing operations on this state is therefore exponentially hard in $n$.} \end{itemize} \end{frame} } { \begin{frame}{The multilocal Pauli Group and the Clifford Group} \begin{itemize} \item{Why is simulating $H, R_\frac{\pi}{2}, CZ$ and measurements so much easier?} %\pause \item{ The Pauli group is $P := \{\pm 1, \pm i\} \cdot \{X, Y, Z, I\}$. $P_n := P^{\otimes n}$ is called the multilocal Pauli group. } %\pause \item{ $C_n$ is the normalizer of $P_n$, i.e. it maps $P_n$ to itself.\\ One can show that $C_n$ is generated by $H, R_\frac{\pi}{2}, CZ_{i,j}$. } \end{itemize} \end{frame} } { \begin{frame}{Stabilizers and Stabilizer Spaces} \begin{itemize} \item{ Choose a finite Abelian subgroup $S$ of $P_n$ with $-I \notin S$. } \item{ One says $S = \langle S^{(i)} \rangle_{i=1,...,n}$ is generated by the $S^{(i)}$ if every element of $S$ can be expressed as a product of the $S^{(i)}$ and the $S^{(i)}$ are the minimal amount of matrices required for this property. } \item{ One can show that for $S = \langle S^{(i)} \rangle_{i=1,...,n}$ the stabilizer space $V_S := \{\ket{\psi} | S^{(i)}\ket{\psi} = \ket{\psi} \forall i\}$ has dimension $1$. $\ket{\psi}$ is therefore up to a trivial phase unique. } \end{itemize} \end{frame} } { \begin{frame}{Some Notable Stabilizer States} \begin{itemize} \item{The state $\ket{\mbox{0b}00}$ is stabilized by $\langle Z_0, Z_1\rangle$.} \item{Applying the Hadamard gate to the first qbit changes the state to $\frac{1}{\sqrt{2}}\left(\ket{\mbox{0b}00} + \ket{\mbox{0b}01}\right)$. This state is stabilized by $\langle H_0 Z_0 H_0^\dagger, Z_1 \rangle = \langle X_0, Z_1 \rangle$. } \item{Applying a $CX_{1, 0}$ gate yields $\frac{1}{\sqrt{2}}\left(\ket{\mbox{0b}00} + \ket{\mbox{0b}11}\right)$ the famous EPR/Bell state which is stabilized by $\langle CX_{1, 0} X_0 CX_{1, 0}^\dagger, CX_{1, 0} Z_1 CX_{1, 0}^\dagger \rangle = \langle X_0 X_1, Z_0 Z_1 \rangle$. } \item{When measuring qbit $0$ the resulting state is either $\ket{\mbox{0b}00}$ or $\ket{\mbox{0b}11}$ and the stabilizers are either $\langle Z_0, Z_1\rangle$ or $\langle -Z_0, -Z_1\rangle$.} \end{itemize} \end{frame} } { \begin{frame}{Dynamics and Measurements} \begin{itemize} \item{In general a Clifford gate $U \in C_n$ will map a stabilizer state to another stabilizer state. The new state is stabilized by $\langle U S^{(i)} U^\dagger \rangle_i$.} \item{A Pauli observable $g_a \in \{X_a, Y_a, Z_a\}$ will either commute with all stabilizers (in this case $g_a$ is a stabilizer, the measurement is deterministic and the stabilizers are unchanged) or or anticommute with at least one stabilizer.} \item{If $g_a$ anticommutes with a subset $A := \{S^{(i)} | \{S^{(i)}, g_a\} = 0 \}$ the probability to measure $+1$ or $-1$ is $\frac{1}{2}$ and the stabilizers $A$ are changed.} \item{When going from $+g_a$ to $-g_a$ the results are changed from $+1$ to $-1$ and vice versa.} \end{itemize} \end{frame} } { \begin{frame}{Graphical States} \begin{itemize} \item{The graphical representation of stabilizer states is an optimized way to write the stabilizers.} \item{ $(V, E, O)$ is called the graphical representation of a stabilizer state if $V = \{0, ..., n-1\}$, $E \subset \{\{i,j\} | i,j \in V, i \neq j \}$ and $O = \{o_0, ..., o_{n-1}\}$ where $o_i \in C_1$. $G = (V, E)$ is a graph, $O$ are called vertex operators. } \item{The state associated with $(V, E, O)$ is given by \begin{equation} \ket{G} = \left(\bigotimes\limits_{i=0}^{n-1} o_i \right) \prod\limits_{\{i,j\} \in E} CZ_{i,j} \ket{+}^{\otimes n} \end{equation} where $\ket{+} = H\ket{0} = \frac{1}{\sqrt{2}}(\ket{0} + \ket{1})$. } \item{One can show that all stabilizer states can be brought into this form.} \item{The stabilizers of this state are $K_G^{(i)} = X_i \prod\limits_{\{i,j\} \in E} Z_j$.} %\item{The stabilizers associated with $(V, E, O)$ are % \begin{equation} % \left\langle\left(\bigotimes\limits_{j=0}^{n-1} o_j \right) K_G^{(i)} \left(\bigotimes\limits_{j=0}^{n-1} o_j \right)^\dagger\right\rangle_i % \end{equation} % where % \begin{equation} % K_G^{(i)} = X_i \prod\limits_{\{i,j\} \in E} Z_j. % \end{equation} %} \end{itemize} \end{frame} } %{ %\begin{frame}{Graphical States} % \begin{itemize} % \end{itemize} %\end{frame} %} { \begin{frame}{Dynamics of Graphical States} \begin{itemize} \item{Applying a local Clifford gate $U_i$ is trivial: Just the vertex operator is updated to $U o_i$.} \item{If $o_a, o_b \in \{I, Z, R_\frac{\pi}{2}, R_\frac{\pi}{2}^\dagger\}$ applying a $CZ_{a,b}$ just toggles the edge $\{a,b\}$ in $E$: $E' = E \Delta \{\{a,b\}\}$. With the symmetric set difference $\Delta$.} \item{If the vertices $a, b$ are isolated the resulting state after applying $CZ_{a,b}$ can be precomputed. } \item{When none of the above is possible one can clear at least one vertex operator (i.e. transforming it to $I$).} \item{If both vertex operators can be cleared $\{a,b\}$ is toggled in $E$. If just one vertex operator has been cleared the other vertex is isolated and one can precompute all resulting states.} \end{itemize} \end{frame} } { \begin{frame}{Clearing Vertex Operators} \begin{itemize} \item{One can show that for a non-isolated vertex $j$ the unitary $M_j = \sqrt{-iX_j} \prod\limits_{\{l,j\} \in E} \sqrt{iZ_l}$ when applied to a graphical state with $O = \{I, ..., I\}$ toggles the neighbourhood of $j$.} \item{The operation $L_j$ which simultaneously toggles the neighbourhood of $j$ and right-multiplies $M_j^\dagger$ to the vertex operators keeps the state $\ket{G}$ invariant.} \item{The group $C_1$ is generated by $\sqrt{-iX}, \sqrt{iZ}$. Any $U \in C_1$ is the product of at most $5$ of those matrices. } \item{ Let $a$ be the vertex to be cleared. Then there is one neighbour $j \neq b$ of $a$. Clearing the vertex operator $o_a$ is done by moving from right to left through the product and applying \begin{itemize} \item{$L_a$ if the current matrix is $\sqrt{-iX}$ or} \item{$L_j$ if the current matrix is $\sqrt{iZ}$.} \end{itemize} } \end{itemize} \end{frame} } { \begin{frame}{Clearing VOPs: Example} \noindent\begin{minipage}{0.5\textwidth} \includegraphics[width=\textwidth]{graphs/clear_vop_01.png} \end{minipage} \hfill \begin{minipage}{0.4\textwidth} Vertex operator \\ $19 = \left[\begin{matrix}\frac{\sqrt{2}}{2} & \frac{\sqrt{2} i}{2}\\- \frac{\sqrt{2} i}{2} & - \frac{\sqrt{2}}{2}\end{matrix}\right]$ \end{minipage} \noindent\begin{minipage}{0.5\textwidth} \includegraphics[width=\textwidth]{graphs/clear_vop_02.png} \end{minipage} \hfill \begin{minipage}{0.4\textwidth} Vertex operator \\ $ 21 = \left[\begin{matrix}0 & -1\\1 & 0\end{matrix}\right]$ \end{minipage} \end{frame} } { \begin{frame}{Clearing VOPs: Example} \noindent\begin{minipage}{0.5\textwidth} \includegraphics[width=\textwidth]{graphs/clear_vop_02.png} \end{minipage} \hfill \begin{minipage}{0.4\textwidth} Vertex operator \\ $ 21 = \left[\begin{matrix}0 & -1\\1 & 0\end{matrix}\right]$ \end{minipage} \noindent\begin{minipage}{0.5\textwidth} \includegraphics[width=\textwidth]{graphs/clear_vop_03.png} \end{minipage} \hfill \begin{minipage}{0.4\textwidth} Vertex operator \\ $11 = \left[\begin{matrix}\frac{\sqrt{2}}{2} & - \frac{\sqrt{2} i}{2}\\\frac{\sqrt{2} i}{2} & - \frac{\sqrt{2}}{2}\end{matrix}\right]$ \end{minipage} \end{frame} } { \begin{frame}{Clearing VOPs: Example} \noindent\begin{minipage}{0.5\textwidth} \includegraphics[width=\textwidth]{graphs/clear_vop_03.png} \end{minipage} \hfill \begin{minipage}{0.4\textwidth} Vertex operator \\ $11 = \left[\begin{matrix}\frac{\sqrt{2}}{2} & - \frac{\sqrt{2} i}{2}\\\frac{\sqrt{2} i}{2} & - \frac{\sqrt{2}}{2}\end{matrix}\right]$ \end{minipage} \noindent\begin{minipage}{0.5\textwidth} \includegraphics[width=\textwidth]{graphs/clear_vop_04.png} \end{minipage} \hfill \begin{minipage}{0.4\textwidth} Vertex operator \\ $5 = \left[\begin{matrix}1 & 0\\0 & -1\end{matrix}\right]$ \end{minipage} \end{frame} } { \begin{frame}{Clearing VOPs: Example} \noindent\begin{minipage}{0.5\textwidth} \includegraphics[width=\textwidth]{graphs/clear_vop_04.png} \end{minipage} \hfill \begin{minipage}{0.4\textwidth} Vertex operator \\ $5 = \left[\begin{matrix}1 & 0\\0 & -1\end{matrix}\right]$ \end{minipage} \noindent\begin{minipage}{0.5\textwidth} \includegraphics[width=\textwidth]{graphs/clear_vop_05.png} \end{minipage} \hfill \begin{minipage}{0.4\textwidth} Vertex operator \\ $ 7 = \left[\begin{matrix}\frac{\sqrt{2}}{2} & - \frac{\sqrt{2}}{2}\\\frac{\sqrt{2}}{2} & \frac{\sqrt{2}}{2}\end{matrix}\right]$ \end{minipage} \end{frame} } { \begin{frame}{Clearing VOPs: Example} \noindent\begin{minipage}{0.5\textwidth} \includegraphics[width=\textwidth]{graphs/clear_vop_05.png} \end{minipage} \hfill \begin{minipage}{0.4\textwidth} Vertex operator \\ $ 7 = \left[\begin{matrix}\frac{\sqrt{2}}{2} & - \frac{\sqrt{2}}{2}\\\frac{\sqrt{2}}{2} & \frac{\sqrt{2}}{2}\end{matrix}\right]$ \end{minipage} \noindent\begin{minipage}{0.5\textwidth} \includegraphics[width=\textwidth]{graphs/clear_vop_06.png} \end{minipage} \hfill \begin{minipage}{0.4\textwidth} Vertex operator \\ $2 = \left[\begin{matrix}1 & 0\\0 & 1\end{matrix}\right]$ \end{minipage} \end{frame} } { \begin{frame}{Clearing VOPs: Example} \noindent\begin{minipage}{0.5\textwidth} \includegraphics[width=\textwidth]{graphs/clear_vop_01.png} \end{minipage} \hfill \begin{minipage}{0.4\textwidth} Vertex operator \\ $19 = \left[\begin{matrix}\frac{\sqrt{2}}{2} & \frac{\sqrt{2} i}{2}\\- \frac{\sqrt{2} i}{2} & - \frac{\sqrt{2}}{2}\end{matrix}\right]$ \end{minipage} \noindent\begin{minipage}{0.5\textwidth} \includegraphics[width=\textwidth]{graphs/clear_vop_06.png} \end{minipage} \hfill \begin{minipage}{0.4\textwidth} Vertex operator \\ $2 = \left[\begin{matrix}1 & 0\\0 & 1\end{matrix}\right]$ \end{minipage} \end{frame} } \section{Implementation and Performance} { \begin{frame}{Implementation} \begin{itemize} \item{Both a dense vector simulator and a simulator using the graphical representation have been implemented in the \lstinline{python3} package \lstinline{pyqcs}.} \item{To increase simulation efficiency the core of both simulators has been implemented in \lstinline{C}.} \item{The dense vector states are stored in \lstinline{numpy} arrays.} \item{The graph is stored in an length $n$ array of linked lists. The vertex operators are stored in a \lstinline{uint8_t} array.} \end{itemize} \end{frame} } { \begin{frame}{Performance: Dense Vector vs. Graphical Representation} \includegraphics[width=\textwidth]{../performance/scaling_qbits_linear.png} \end{frame} } { \begin{frame}{Performance: Dense Vector vs. Graphical Representation} \includegraphics[width=\textwidth]{../performance/scaling_qbits_log.png} \end{frame} } { \begin{frame}{Performance: Circuit Length on Graphical Representation} \includegraphics[width=\textwidth]{../performance/regimes/scaling_circuits_linear.png} \end{frame} } { \begin{frame}{Performance: Circuit Length on Graphical Representation} \begin{itemize} \item{There seem to be three regimes: Low-linear regime, intermediate regime and high-linear regime.} \item{In the low-linear regime only few VOPs have to be cleared. The length of this regime increases with the number of qbits. } \item{In the high-linear regime the neighbourhoods are big; the probability that VOPs must be cleared is high. Clearing VOPs involves many vertices.} \item{The intermediate is dominated by growing neighbourhoods $\Rightarrow$ no linear behaviour.} \end{itemize} \end{frame} } { \begin{frame}{Graph in the Low-Linear Regime} \includegraphics[width=\textwidth]{../thesis/graphics/graph_low_linear_regime.png} \end{frame} } { \begin{frame}{Window in a Graph in the Intermediate Regime} \includegraphics[width=\textwidth]{../thesis/graphics/graph_intermediate_regime_cut.png} \end{frame} } { \begin{frame}{Window in a Graph in the High-Linear Regime} \includegraphics[width=\textwidth]{../thesis/graphics/graph_high_linear_regime_cut.png} \end{frame} } { \begin{frame}{Performance: Circuit Length on Graphical Representation} \begin{itemize} \item{Pauli measurements reduce entanglement entropy.} \item{Pauli measurements reduce the amount of edges in the graph.} \item{When adding measurement to the random circuits the regimes do not show up.} \end{itemize} \end{frame} } { \begin{frame}{Performance: Circuit Length on Graphical Representation} \includegraphics[width=\textwidth]{../performance/regimes/scaling_circuits_measurements_linear.png} \end{frame} } \section{Conclusion and Outlook} \end{document}