7JUQQYQCE7G3ESFJETNGEPBCYGR5NGMAWHFPWI24RSQJPFM5YCTQC YXLEIBX6XXIGQETRE23A6PBYT4Z5UWWIR55LFSLCHWC6D2FM4SPQC YS5O2UANXCWRI3W7KMRKKK7OT7R6V3QDL4JS4DRLXM2EZUIPFALAC 567NUQ633R5ESTE2LWXVUGM7XEZGJ65IRKPOCDZJS3JE5RAMR5KAC KUEZAMEH2M3K6EDHWMYXQMHZ2QTTH4KBM5CO7A7EB64ABEWRE6WQC MQJMCLGFSBSFJ3JPCIHQSBZS4XZPGJI7NGGRASX5UVGJO6HES25AC EWCCR4CPDE536DBLSJHXPJI67C4EQOWRJZR7D57P5XBV4ONPKXMQC RVA3DOCNBWFBWA2MLC2TVQOEY25AUCYGEJSWMD72QRQ26AFLVZUAC X7SPUV4CXRX7D5KKL6UVGHC7JWRWCA4QAJVVQISE64UEU7GXJNRQC UVN5HOW3H2OVESJ62UFNQNS6VXHI5TRV22HQ44EOYU42453SDD2QC TKJMPTB73W5TLLTNUOC4FGRQ7DHE5GRBTL7C5QBXRCNQPMJKGEQAC QIVGMR6INKBVT5IFCO2UJJQEO4PVW3ZM7IDTCTCOKOX7VKG7UPFAC DDNESFMD4LGNTDGTCW27FQPCOR5Y5MJWN2TWCSIRNBK6QGBITBRQC JCK77YLTPL6VV5ZOTMGUJSFZ5DNMS3MZGP5UQLXYW6WSMEK6GAQAC KVSF5AFJEIMIYAC264OUAA56SGP5MFAUG3AECKOTUPIXPGULVFQQC QCVDPRXH57UFLU4YV3P2I5RQZGCBUKROYUZHDWOCHL43KDYN7MPAC R2A2OD7J5ATBRR7W4QPTZDVH6GGHQ7XBX2JE5R3GKVS5WIQ43O3AC M3WK5HF3XU5MN2JKJMRJVDMC74F4MZAQ23FTGJRW4CGU5ZL72MHAC 45HZPHVHYJ2BXATQ4MKCMPMMPZFBNJECY7JV3WX63BSA4CSQN6HAC M5WHUJXX7KNK425RXFM2U6WT3M4PARJFO7T7MLN2IBDEYDKPXTMAC GNUONJQNZG2MNEE55RW3XOQP3UICEBOKLVLKV42H3K64IBGLUGIQC A5NVU6DZYOG6GRXJU6DZLCUQTKA7LPUWOGQ4NCJSY6FJB6GSP3OAC B5T3HFKSQF3T4T6Z2KZOA5ZDMI6TVON7BS7KKB3EAGGYAS7RJPBAC EIPHTX56WTRFBMBNA4VYMKK4L7GWHA4Q2AQ3BCG2ZGZB3DBRENBQC 6PTHERE26NXSC3ZXSI5UYKWTMPYZD4WMYFBJKZE3MRRTKC7MIU6QC QMVJ2ZKX5ALAMMIL7E47ZQS67QFZJ7Z3HAY5G7X72KPEQEHWRGCQC \usepackage[ngerman,english]{babel}%% Acronyms\PassOptionsToPackage{acronym}{glossaries}\usepackage{glossaries}
\PassOptionsToPackage{ngerman,english}{babel}\RequirePackage{babel}
% TeX root = ../main.tex\begin{tikzpicture}[program]\node[state,initial] (l0) {$l_0$};\node[state] (l1) [below=of l0] {$l_1$};\node[state] (l2) [below left =of l1] {$l_2$};\node[state] (l3) [below=of l2] {$l_3$};\node[state] (l4) [below right=of l3] {$l_4$};\node[state] (l5) [right=of l4] {$l_5$};\node[state] (l6) [above right=of l5] {$l_6$};\node[state] (l7) [above=of l6] {$l_7$};\node[state] (l8) [above left=of l7] {$l_8$};\node[state] (l9) [below right=of l2] {$l_9$};\node[state] (l10) [right=of l9] {$l_{10}$};% \path[->] (l0) edge node[swap] {$\textit{true}$} (l1)% (l1) edge node[swap] {$x > 0$} (l2)% edge node[swap] {$x \leq 0$} (l4)% (l2) edge node[swap] {$y > 0$} (l3)% edge node[swap] {$\begin{aligned}% y &\leq 0 \\% x &:= x-1 \\% z &:= z+2 \\% \end{aligned}$} (l1)% (l3) edge node[swap] { \begin{aligned}%% x &:= x+y \\% y &:= y-1% \end{aligned}} (l3);\end{tikzpicture}
% TeX root = ../main.tex\begin{tikzpicture}[scale=0.5]\begin{axis}[axis lines = center,axis on top,xmin=-3.3,xmax=3.3,ymin=0,ymax=5.3,y=1cm,x=1cm,ytick={-0,...,5},xtick={-3,...,3},xlabel = x,ylabel = y,font=\tiny]\addplot [ name path = A, thick ] {x + 4};\addplot [ name path = B, thick ] {x + 1};\addplot [ name path = C, thick ] {-x + 4};\addplot [ name path = D, thick ] {-x + 1};\addplot[name path=top,draw=none] {5.3};\addplot[name path=bottom,draw=none] {0};\addplot[pattern=north west lines,pattern color=blue!30] fillbetween[of=A and top];\addplot[pattern=north west lines,pattern color=blue!30] fillbetween[of=C and top];\addplot[pattern=north west lines,pattern color=blue!30] fillbetween[of=B and bottom];\addplot[pattern=north west lines,pattern color=blue!30] fillbetween[of=D and bottom];\addplot[only marks,mark=o] coordinates {(0, 4)(-1, 3) (0, 3) (1, 3)(-1, 2) (0, 2) (1, 2)(0, 1)};\addplot[fill=red,fill opacity=0.20] fill between [of=B and bottom,softclip={domain=0:1.5}];\addplot[fill=red,fill opacity=0.20] fill between [of=D and bottom,softclip={domain=-1.5:0}];\end{axis}\end{tikzpicture}
% TeX root = ../main.tex\begin{tikzpicture}[scale=0.5]\begin{axis}[axis lines = center,axis on top,xmin=-3.3,xmax=3.3,ymin=-3.3,ymax=3.3,y=1cm,x=1cm,ytick={-3,...,3},xtick={-3,...,3},xlabel = x,ylabel = y,font=\tiny]\addplot [% domain=-3:3,name path = A,thick,% pattern=north east lines,] {0.5*x + 1};\addplot[name path=C,draw=none] {3.3};\addplot [% domain={-1.65:3},name path = B,thick,% pattern=north east lines,] {0.5*x*x-2};\addplot[name path=D,draw=none] {-3.3};\addplot[pattern=north west lines,pattern color=blue!30] fill between[of=A and C];\addplot[pattern=north east lines,pattern color=red!30] fill between[of=B and D];\addplot[only marks,mark=o] coordinates {(2, 2)(0, 1) (1, 1) (2, 1)(-1, 0) (0, 0) (1, 0)(-1, -1) (0, -1) (1, -1)};\end{axis}\end{tikzpicture}
% TeX root = ../main.tex\begin{tikzpicture}[scale=0.5]\begin{axis}[axis lines = center,axis on top,xmin=-3.3,xmax=3.3,ymin=-3.3,ymax=3.3,y=1cm,x=1cm,ytick={-3,...,3},xtick={-3,...,3},xlabel = x,ylabel = y,font=\tiny]\addplot [% domain={-1.65:3},name path = B,thick,% pattern=north east lines,] {0.5*x*x-2};\addplot[name path=D,draw=none] {-3.3};\addplot[pattern=north east lines,pattern color=red!30] fill between[of=B and D];\addplot[only marks,mark=o] coordinates {(-3, 3) (-2, 3) (-1, 3) (0, 3) (1, 3) (2, 3) (3, 3)(-2, 2) (-1, 2) (0, 2) (1, 2) (2, 2)(-2, 1) (-1, 1) (0, 1) (1, 1) (2, 1)(-2, 1) (-1, 1) (0, 1) (1, 1) (2, 1)(-1, 0) (0, 0) (1, 0)(-1, -1) (0, -1) (1, -1)};\end{axis}\end{tikzpicture}
% TeX root = ../main.tex\begin{tikzpicture}[scale=0.5]\begin{axis}[axis lines = center,axis on top,xmin=-3.3,xmax=3.3,ymin=-3.3,ymax=3.3,y=1cm,x=1cm,ytick={-3,...,3},xtick={-3,...,3},xlabel = x,ylabel = y,font=\tiny]\addplot [% domain=-3:3,name path = A,thick,% pattern=north east lines,] {0.5*x + 1};\addplot[name path=C,draw=none] {3.3};\addplot[pattern=north west lines,pattern color=blue!30] fill between[of=A and C];\addplot[only marks,mark=o] coordinates {(2, 2) (3, 2)(0, 1) (1, 1) (2, 1) (3, 1)(-2, -0) (-1, -0) (0, -0) (1, -0) (2, -0) (3, -0)(-3,-1) (-2, -1) (-1, -1) (0, -1) (1, -1) (2, -1) (3, -1)(-3,-2) (-2, -2) (-1, -2) (0, -2) (1, -2) (2, -2) (3, -2)(-3,-3) (-2, -3) (-1, -3) (0, -3) (1, -3) (2, -3) (3, -3)};\addplot[only marks,mark=o,red!80] coordinates { (0, -2) };\end{axis}\end{tikzpicture}
\node[state,initial] (l0) {$l_0$};\node[state] (l1) [right=3cm of l0] {$l_1$};\node[state] (l2) [right=3cm of l1] {$l_2$};
\node[state,initial] (l0) {$\ell_0$};\node[state] (l1) [right=4cm of l0] {$\ell_1$};\node[state] (l2) [right=4cm of l1] {$\ell_2$};
\node[state,initial] (l0) {$l_0$};\node[state] (l1) [right=3cm of l0] {$l_1$};\node[state] (l2) [right=3cm of l1] {$l_2$};
\node[state,initial] (l0) {$\ell_0$};\node[state] (l1) [right=4cm of l0] {$\ell_1$};\node[state] (l2) [right=4cm of l1] {$\ell_2$};
edge[in=35,out=80,looseness=15,draw=rot-75] node[above right] {$t_3:\begin{aligned}p&=0.5 \\x &> 0 \\ x&:=x+2\end{aligned}$} (l1)edge[loop below] node {$t_4:\begin{aligned} x &> 0 \\ x&:=x-1\end{aligned}$} ()
edge[in=35,out=80,looseness=15,draw=rot-75] node[aboveright] {$t_3:\begin{aligned} p&=0.5 \\\tau &= x > 0 \\\eta(x)&= x+2\end{aligned}$} (l1)edge[loop below] node {$t_4:\begin{aligned} \tau&= x > 0\\ \eta(x)&= x-1\end{aligned}$} ()
\node[state,initial] (l0) {$l_0$};\node[state] (l1) [right=3cm of l0] {$l_1$};\node[state] (l2) [right=3cm of l1] {$l_2$};
\node[state,initial] (l0) {$\ell_0$};\node[state] (l1) [right=4cm of l0] {$\ell_1$};\node[state] (l2) [right=4cm of l1] {$\ell_2$};
\node[state,initial] (l0) {$l_0$};\node[state] (l1) [right=3cm of l0] {$l_1$};\node[state] (l2) [right=3cm of l1] {$l_2$};
\node[state,initial] (l0) {$\ell_0$};\node[state] (l1) [right=4cm of l0] {$\ell_1$};\node[state] (l2) [right=4cm of l1] {$\ell_2$};
\path[->] (l0) edge node {$t_1:\textit{true}$} (l1)(l1) edge node {$t_4:x \leq 0$} (l2)edge[loop above] node {$t_2:\begin{aligned} x &> 1 \\ x&:=x+1\end{aligned}$} ()edge[loop below] node {$t_3:\begin{aligned} x &> 0 \\ x&:=x-1\end{aligned}$} ()
\path[->] (l0) edge node {$t_1:\tau = \texttt{true}$} (l1)(l1) edge node {$t_4:\tau = x \leq 0$} (l2)edge[loop above] node {$t_2:\begin{aligned} \tau&= x > 1 \\\eta(x)&= x+1\end{aligned}$} ()edge[loop below] node {$t_3:\begin{aligned} \tau&= x > 0 \\\eta(x)&= x-1\end{aligned}$} ()
% TeX root = ../main.tex{\RestyleAlgo{plain}\SetAlgoNoLine\LinesNotNumbered\begin{algorithm}[H]$x = u$\;\lIf{$1 \leq x \leq 3$}{ $x = x+1 \oplus_\frac{1}{2} x = x$}\lWhile{$1 \leq x \leq 3$}{ $x = x+1 \oplus_\frac{1}{2} x = x$ }\lWhile{$y \geq 0$}{ $y = y-1 \oplus_\frac{1}{2} y = y$ }\end{algorithm}}
% TeX root = ../main.tex\begin{tikzpicture}[program,initial above,font=\scriptsize]\node[state,initial] (l0) {$l_0$};\node[state] (l1) [below=2cm of l0] {$l_1$};\node[state] (l1a) at ($(l1) - (3,1.5)$) {$l_{1a}$};\node[state] (l2) [below=3cm of l1] {$l_2$};\path[->](l0) edge node { $t_0: \eta(x) = u $ } (l1)(l1) edge [out=140,in=120,looseness=2,draw=rot-75]node[below=0.3cm,pos=0.35] {$t_{1a}:\begin{aligned} \tau = &1 \leq x \leq 3\\ &\Land w = 0 \\ \eta(x) &= x + 1\end{aligned}$} (l1a)(l1) edge [out=130,in=130,looseness=2,draw=rot-75] node[above,pos=0.4] {$t_{1b}:\begin{aligned} \tau = &1 \leq x \leq 3\\ &\Land w = 0 \\ \eta(x) &= x + 1\end{aligned}$} (l1a)(l1a) edge [out=150,in=210,looseness=10] node[swap]{$\begin{aligned}t_{1b}:& \\ \tau = &1 \leq x \leq 3\\ &\Land w = 0 \\ \eta(x) &= x + 1\end{aligned}$} (l1a)(l1a) edge [bend right=15] node[swap] {$t_{2b}: \begin{aligned}\tau &= y > 0 \\&\Land w = 1\end{aligned}$} (l2)(l1) edge[bend left =20] node {$\begin{aligned}t_{2a}:& \\ \tau =& y > 0 \\\Land& w = 1\end{aligned}$} (l2)(l2) edge[bend left=20] node {$\begin{aligned}&t_3:\\ &\eta(y) = y - 1\end{aligned}$} (l1)% (l1) edge node {$t_4:x \leq 0$} (l2)% edge[loop above] node {$t_2:\begin{aligned} x &> 1 \\ x&:=% x+1\end{aligned}$} ()% edge[loop below] node {$t_3:\begin{aligned} x &> 0 \\ x&:=% x-1\end{aligned}$} ();\end{tikzpicture}
}
% TeX root = ../main.tex\begin{tikzpicture}[program,initial above,font=\scriptsize]\node[state,initial] (l0) {$l_0$};\node[state] (l1) [below=2cm of l0] {$l_1$};\node[state] (l2) [below=3cm of l1] {$l_2$};\path[->](l0) edge node { $t_0: \eta(x) = u $ } (l1)(l1) edge [out=110,in=150,looseness=10,draw=rot-75]node[swap]{$t_{1a}:\begin{aligned} p&=0.5 \\ \tau&= 1 \leq x \leq 3\Land w = 0\end{aligned}$} (l1)(l1) edge [out=160,in=200,looseness=10,draw=rot-75]node[swap]{$ t_{1b}: \begin{aligned}p&=0.5 \\ \tau&= 1 \leq x \leq 3\Land w = 0 \\ \eta(x)&= x + 1\end{aligned}$} (l1)(l1) edge[bend left =30] node {$t_{2}:\begin{aligned}\tau =& y > 0\Land w = 1\end{aligned}$} (l2)(l2) edge[bend left=30] node {$t_{3}:\begin{aligned}p&=0.5 \\ \eta(y) &= y - \textsc{Bern}(0.5)\end{aligned}$} (l1)% (l2) edge[bend left=30,draw=rwth-75] node[swap] {$t_{3a}: p=0.5$} (l1)% (l1) edge node {$t_4:x \leq 0$} (l2)% edge[loop above] node {$t_2:\begin{aligned} x &> 1 \\ x&:=% x+1\end{aligned}$} ()% edge[loop below] node {$t_3:\begin{aligned} x &> 0 \\ x&:=% x-1\end{aligned}$} ();\end{tikzpicture}
}
% TeX root = ../main.tex\begin{tikzpicture}[program,initial above,font=\scriptsize]\node[state,initial] (l0) {$l_0$};\node[state] (l1) [below=2cm of l0] {$l_1$};\node[state] (l1a) at ($(l1) - (3,1.5)$) {$l_{1a}$};\node[state] (l2) [below=3cm of l1] {$l_2$};\path[->](l0) edge node { $t_0: \eta(x) = u $ } (l1)(l1) edge [bend right=15] node[swap] {$t_{1a}:\begin{aligned} \tau = &1 \leq x \leq 3\\ &\Land w = 0 \\ \eta(x) &= x + 1\end{aligned}$} (l1a)(l1a) edge [out=150,in=210,looseness=10] node[swap]{$\begin{aligned}t_{1b}:& \\ \tau = &1 \leq x \leq 3\\ &\Land w = 0 \\ \eta(x) &= x + 1\end{aligned}$} (l1a)(l1a) edge [bend right=15] node[swap] {$t_{2b}: \begin{aligned}\tau &= y > 0 \\&\Land w = 1\end{aligned}$} (l2)(l1) edge[bend left =20] node {$\begin{aligned}t_{2a}:& \\ \tau =& y > 0 \\\Land& w = 1\end{aligned}$} (l2)(l2) edge[bend left=20] node {$\begin{aligned}&t_3:\\ &\eta(y) = y - 1\end{aligned}$} (l1)% (l1) edge node {$t_4:x \leq 0$} (l2)% edge[loop above] node {$t_2:\begin{aligned} x &> 1 \\ x&:=% x+1\end{aligned}$} ()% edge[loop below] node {$t_3:\begin{aligned} x &> 0 \\ x&:=% x-1\end{aligned}$} ();\end{tikzpicture}
}
% TeX root = ../main.tex\begin{tikzpicture}[program,initial above,font=\scriptsize]\node[state,initial] (l0) {$l_0$};\node[state] (l1) [below=2cm of l0] {$l_1$};\node[state] (l2) [below=3cm of l1] {$l_2$};\path[->](l0) edge node { $t_0: \eta(x) = u $ } (l1)(l1) edge [out=150,in=210,looseness=10] node[swap]{$\begin{aligned}t_1:& \\\tau = &1 \leq x \leq 3\\ &\Land w = 0 \\ \eta(x) &= x + 1\end{aligned}$} (l1)(l1) edge[bend left =30] node {$\begin{aligned}t_{2}:& \\ \tau =& y > 0 \\\Land& w = 1\end{aligned}$} (l2)(l2) edge[bend left=30] node {$t_3: \eta(y) = y - 1$} (l1)% (l1) edge node {$t_4:x \leq 0$} (l2)% edge[loop above] node {$t_2:\begin{aligned} x &> 1 \\ x&:=% x+1\end{aligned}$} ()% edge[loop below] node {$t_3:\begin{aligned} x &> 0 \\ x&:=% x-1\end{aligned}$} ();\end{tikzpicture}
\begin{apdxlemma}{\ref{lem:entailment}}\label{apdx:lem:entailment}$\varphi_1$ entails $\varphi_2$ if and only if $\varphi_1 \Land\neg\varphi_2$ is unsatisfiable.\begin{proof}\enquote{$\Rightarrow$} Let $\varphi_1$ entails $\varphi_2$, and assumean assignment $\beta: \V \rightarrow \Z$ exists such that $\beta \models\varphi_1 \Land \neg \varphi_2$.\begin{align*}\Rightarrow &\beta \models \varphi_1 \\\Rightarrow & \beta \in \llbracket \varphi_1 \rrbracket \\\Rightarrow & \beta \in \llbracket \varphi_2 \rrbracket \\\Rightarrow & \beta \models \varphi_2 \\\Rightarrow & \beta \not\models \neg\varphi_2 \\\Rightarrow & \beta \not\models \varphi_1 \Land \neg\varphi_2 \lightning\end{align*}By contradiction $\varphi_1 \Land \neg\varphi_2$ is unsatisfiable.\enquote{$\Leftarrow$} Let $\varphi_1 \Land \neg\varphi_2$ isunsatisfiable and assume $\llbracket \varphi_1 \rrbracket \not\subseteq\llbracket \varphi_1 \rrbracket$. There exists an assignment\begin{align*}& \beta \in \llbracket \varphi_1\rrbracket \\\Rightarrow&\beta \models \varphi_1 \\\end{align*}and\begin{align*}&\beta \not\in\llbracket\varphi_2\rrbracket \\\Rightarrow& \beta \not\models \varphi_2 \\\Rightarrow& \beta \models \neg\varphi_2\end{align*}Combined $\beta \models \varphi_1 \Land\neg \varphi_2 \lightning$By contradiction $\varphi_1$ entails $\varphi_2$.\end{proof}\end{apdxlemma}\begin{apdxlemma}{\ref{lem:nonprop_update}}\label{apdx:lem:nonprop_update}Given an assignment $s\in\Sigma$, a formula $\varphi$ and an update $u: A\rightarrow \Z[\V]$. Without loss of generality $A' = \set{x'}{x \in A}$ isa set of fresh variables with $A' \cap \V = \emptyset$. We define$u(\varphi) := ((\varphi \Land \LAnd_{x\in A} x' = u(x))|_{A'})[x'/x]_{x\inA}$. For any partial assignment $s' = u(s)|_A$ and if $s \models \varphi$,then $s' \models u(\varphi)$.\begin{proof}\begin{align}s' &\models ((\varphi \Land \LAnd_{x\in A} x' =u(x))|_{A'})[x'/x]_{x\in A} \nonumber\\\Leftrightarrow & \models ((\varphi \Land \LAnd_{x\in A} x' =u(x))|_{A'})[x'/x]_{x\in A}[x/s'(x)]_{x\in A} \nonumber\\\Leftrightarrow & \models ((\varphi \Land \LAnd_{x\in A} x' =u(x))|_{A'})[x'/s'(x)]_{x\in A}\label{eq:chaining_subst}\\\Leftrightarrow & \models ((\varphi \Land \LAnd_{x\in A} x' =u(x)))[x'/s'(x)]_{x\in A} \label{eq:dropping_projection}\\\Leftrightarrow & \models ((\varphi \Land \LAnd_{x\in A} x' =u(x)))[x'/\bigl(\rho_u(s)\bigr)(x)]_{x\in A}\nonumber\\\Leftrightarrow & \models ((\varphi \Land \LAnd_{x\in A} x' =u(x)))[x'/\bigl(u(x)\bigr)(s)]_{x\in A}\nonumber\\\Leftrightarrow & \models(\varphi[x'/\bigl(u(x)\bigr)(s)]_{x_\in A}) \Land (\LAnd_{x\in A}x'[x'/\bigl(u(x)\bigr)(s)]_{x_\in A} = u(x)[x'/\bigl(u(x)\bigr)(s)]_{x\inA}) \nonumber\\\Leftrightarrow & \models\varphi \Land \LAnd_{x\in A} x'[x'/\bigl(u(x)\bigr)(s)] =u(x) \label{eq:dropping_substitutions}\\\Leftrightarrow & \models\varphi \Land \LAnd_{x\in A} \bigl(u(x)\bigr)(s) = u(x)\nonumber\\\Leftrightarrow & \models\varphi[x/s(x)]\Land \LAnd_{x\in A}u(x)[x/s(x)] =u(x)\label{eq:definition_of_update}\\\Leftarrow & \models\varphi[x/s(x)]\Land \LAnd_{x\in A}u(x)[x/s(x)][x/s(x)] =u(x)[x/s(x)]\label{eq:adding_model}\\\Leftrightarrow & \models(\varphi\Land \LAnd_{x\in A} u(x) = u(x))[x/s(x)]\label{eq:dedup_substitution}\\\Leftrightarrow & \models\varphi[x/s(x)]\nonumber\\\Leftrightarrow s & \models \varphi\label{eq:assumption}\end{align}
Equation \ref{eq:chaining_subst} holds, because the substitutions can bechained. Equation \ref{eq:dropping_projection} holds, because theprojection removed no variables contained in the substitution. Hence theyare just implicitly existentially quantified and not affected otherwise.In equation \ref{eq:dropping_substitutions} the substitutions can beremoved, because $\varphi$ and $u(x)$ do not contain any affectedvariables, as they were freshly generated. In Equation\ref{eq:definition_of_update} the update is just replaced by it'sdefinition. Equation \ref{eq:adding_model} holds because if $s$ is amodel for the formula, the formula is satisfiable. The inverse is notnecessary true. Applying the substitution $[x/s(x)]$ a secondtime, does nothing in Equation \ref{eq:dedup_substitution} because$s(x) \in \Z$ and specifically do not contain any variables $x\in A$.Afterwards, the substitution is moved to the outside. Equation\ref{eq:assumption} holds by assumption.\end{proof}\end{apdxlemma}
projections of constraints and handling polyhedra we recurred \gls{apron} andthe \gls{ppl}\cite{bagnara2008socp}, which can be used as an abstract domain inapron.
projections of constraints and handling polyhedra we recurred\gls{apron}\cite{apron} and the \gls{ppl}\cite{bagnara2008socp}, which can beused as an abstract domain in apron.
to performance reasons, but made available through a new sub command\texttt{cfr}. It might be used by other tools in the future, that want to profitfrom partial evaluation on probabilistic programs.
to performance reasons, but made available through a new sub command. It mightbe used by other tools in the future, that want to profit from partialevaluation on probabilistic programs.
\subsection{Loops}\subsection{Sub-\gls{scc} evaluation}% \begin{comment}% Base Algorithm% fun find_loops_in(P)% loop_heads = {}% // Dijkstra% on loop_back from l' -> l_h:% loop_heads := loop_heads + l_h% return loop_heads% end
% fun heuristic(L: Loop)% // same as original paper% end% fun partial_evaluation(Program P = (PV, L, GT, l_0))% let lh := loop_heads(P)% let properties := heuristic(P)% let initial_state := (l_0, [])% let P' := ({l_0}, l_0, {locations: P.l_0, {}) // todo% for gt in GT:% let gt' = {}% for t in gt:% (s', l_t') := evaluate(P, s, t)% if l_t \in lh:% else :% gt' := gt' +% end% end% Claims:% Every path in P has an equivalent path in the evaluation graph EG% Every path in EG has an equivalent path in P'% SCC Algorithm:% similar to above but stops at SCC boundaries.% \end{comment}% \section{Modules}% \section{Command-Line Interface}% The techniques described above were implemented natively in Koat2. They are% available as part of the normal analysis via the flags \texttt{--cfr-native}.% The old option for control-flow refinement \texttt{--cfr} was renamed to% \texttt{--cfr-ir}, and can probably be removed in the future.% During normal analysis the control-flow refinement is done on every \gls{scc}% and the bounds of the resulting scc is compared to the previous bounds.% A full program can be refined using the native control-flow refinement with a% separate command \texttt{cfr}. For the new control-flow refinement the option is% added to switch between the various method to select location for abstraction% and the abstraction method itself (see \Sref{sec:abstraction}). An summary of% the compatible options is given in \tref{tab:options}.
\subsection{Selecting the Abstract domain}In the implementation, the projection is computed using an abstract domain.Thanks to the \gls{apron}\cite{apron} library, the new implementation is genericover the specific abstract domain used. We decided to use the abstract domain ofpolyhedra (see. \Sref{ssec:polyhedra}), since they are exact on linearconstraints. In practice only small components are partially evaluated, makingthe performance disadvantage acceptable. \gls{apron} uses the\gls{ppl}\cite{bagnara2008socp} under the hood for polyhedra. Other abstractdomains supported by \gls{apron} might be used in the future. In addition to theprojection operation, the library supports updates as-well, which is used whenpossible. The \gls{ppl} ignores all non-linear constraints, over-approximatingthe set of constraints in the process. In turn the projection isover-approximated as well. One could decide to over-approximate non-linearconstraints manually, in order to achieve tighter projections. For examplequadratic polynomials have an easy to compute minimum and maximum, which couldbe transformed into a single linear constraint.
% As discussed in \Sref{sec:updateinvariants} the polyhedra computed during% evaluation are tighter than the invariants computed during preprocessing. In% order to update the invariants after the partial evaluation a new option% \texttt{--update-invariants} is added and activated by default.
\subsection{Loops}The abstraction relies on the fact that every loop contains at least onelocation marked for abstraction. The loop detection is implemented using thealgorithm described by \citeauthor{johnson1975}\cite{johnson1975}. By defaultthe first location of the loop is selected, which by design of\citeauthor{johnson1975}'s algorithm is the smallest location with regard to anarbitrary order. In our implementation the locations were ordered topologicallyfrom the start of the program.
% \begin{table}% \center% \begin{tabular}[ccc]% \hline% \begin{comment}% analysis --cfr=native,update-invariants, --o% --cfr=irankfinder,% --cfr=off
Optionally, the \gls{fvs} can be computed on the detected loops to minimize thenumber of locations marked for abstraction.
% prob-analysis --cfr=native
\subsection{Property-based abstraction}We implemented the property based abstraction (recall\Sref{sec:property_based_abstraction}) proposed by\citeauthor{domenech2019arxiv}. The heuristic uses backwards-propagation as onesource of properties. Our implementation applies the backwards-propagation forevery loop a location marked for abstraction is on, possibly aggregatingproperties from multiple loops.
% cfr --native% --irankfinder% \end{comment}
The properties are projected onto the program variables, in order to reducetheir number, and because values for temporary variables do not propagatethroughout the program. The entailment checks of properties are done usingZ3 and Lemma \ref{lem:entailment}. Since a set of constraints is checkedagainst many properties, an incremental solver is used in order avoidrecomputing the model for the set of constraints every time.Once a set of constraints is abstracted, it is replaced by the set of entailedproperties.
% \hline% \end{tabular}% \caption{}% \end{table}
Recall Algorithm \ref{alg:evaluate_abstr}. When adding a new transition to thepartial evaluation graph, we check, if the version is already present in thegraph. Instead of checking for equivalence on every version, we use structuralequality, which might not detect equivalent versions on non-abstractedconstraints, but works for the abstracted versions with properties, since theproperties are reused during abstraction.
\gls{koat}s analysis goal is to find tight upper bounds for \gls{pip}. Theanalysis techniques used such as \acrfull{mprf}, have their limits and cannotalways find tight bounds for every program, which motivates control-flowrefinement in order to find an equivalent program to the original one, thatopens the possibility for the analysis to find tighter runtime-complexitybounds.
\gls{koat}s analysis goal is to find tight upper bounds for \gls{pip} andpossibly non-probabilistic and non-deterministic integer programs that are aspecialization of the first. The analysis techniques used such as\gls{mprf},\todo{more} have their limits and cannot always find tight bounds forevery program, which motivates control-flow refinement in order to find anequivalent program to the original one, that opens the possibility for theanalysis to to find tighter bounds.
Recall \ref{def:runtime_complexity}; the newly constructed refined program shallhave they have the same worst-case expected runtime complexity as the originalprogram which implies that runtime-complexity bounds found for the refinedprogram are also runtime-complexity bounds for the original one. Naturally thecontrol-flow refinement will add and replace transitions and locations, hencethe number runtime-complexity bounds and size-bounds of each transition won'tbe preserved. However we will show, that each transition in the refined programis a similar to a transition in the original program and hence every run in thenew program has a run of equal runtime-complexity and probability in theoriginal program and vice-versa.
Recall \ref{def:overallcomplexity}; the newly constructed refined program shallhave they have the same overall runtime complexity as the original program,which implies that complexity bounds found for the refined program are alsocomplexity bounds for the original one.Naturally the control-flow refinement will add and replace transitions andlocations, hence the runtime and size complexity per each transition won't bepreserved. However we will show, that each transition in the refined program isa copy of a transition in the original program, and that complexity of anoriginal transition is equal to the sum of its copies.
evaluation presented by \citeauthor{domenech2019arxiv}\cite{domenech2019arxiv}for \acrlong{pip}. The partial evaluation will be constructed in a similarway but explicitly taking probability and general transitions into account.
evaluation technique presented by\citeauthor{domenech2019arxiv}\cite{domenech2019arxiv} for \acrlong{pip}. Thepartial evaluation will be constructed in a similar way but explicitly takingprobability and general transitions into account.
possibly infinite evaluation tree is constructed that will finally be collapsedinto a finite evaluation graph by an abstraction layer.
possibly infinite evaluation graph is constructed that will finally be collapsedinto a finite evaluation graph by the use of an abstraction layer.Afterwards we will show that the partial evaluation is sound for \gls{pip} andprove that the expected runtime-complexity is preserved and thus also theworst-case runtime-complexity of non-probabilistic programs.\begin{example}\begin{figure}\centering\input{figures/ch1_prob}\caption{Representation of $\Prog_3$, a variataion of $\Prog_2$ from\fref{fig:prob_c}, as a transition graph, with non-deterministicbranching.\label{fig:ex_prob}}\end{figure}The examples in this chapter will follow through using a variation of theprobabilistic program $\Prog_2$ from the introduction (\fref{fig:prob_c}),which we call $\Prog_3$. $\Prog_3$ is visually represented as a transitiongraph in \fref{fig:ex:prob}. The program uses a set of variables $\V =\{x,y,u,w\}$ and has the program variables $\PV_{\Prog_3} = \{x,y\}$. $u,w$are temporary variables whose values reassigned by the scheduler on everytransition and whose values do not propagate throughout the program.
Afterwards we will show that the partial evaluation is sound for probabilisticinteger programs and prove that the overall expected runtime complexity(\cref{def:past}) is equally preserved as the overall (worst-case) runtimecomplexity (\cref{def:runtimebounds}).
In transition $t_0$ the value of $x$ is set by non-deterministic samplingvia the temporary variable $u$. At location $\ell_1$ the scheduler decidesbetween one of the general transitions $g_1 = \{t_{1a}, t_{1b}\}$ or $g_2 =\{t_2\}$, and assigns the value for $w$ accordingly.The program can alter between the loops non-deterministically after everyiteration. Since both loops are independent of one another, thenon-determinism doesn't change anything about the expectedruntime-complexity of the program.On the general transition $g_1 = \{t_{1a}, t_{1b}\}$ at location $\ell_1$the program branches probabilistically, with a probability of $p=0.5$. Onthe transition $t_3$, the value with which $y$ is decremented is sampledprobabilistically from a Bernoulli distribution, where both possibleoutcomes have the same probability $p=0.5$.\end{example}
During a normal execution a program visits one location after another andchanges state with every transition. The configuration at a given point of theexecution can be described by tuple of location and assignment. Simulatingevery possible run of a program from every possible starting state isimpossible: First, the number of starting states is infinite; second theprogram is not guaranteed to terminate and hence the simulation of a specificrun might not terminate either.For non-deterministic and probabilistic programs, the problem get's even morecomplex, since the transition taken does not only depend on the current statebut also on the answer of the scheduler and some random event.Instead of simulating every single possible run, it would be nice to treat\enquote{similar} states as one. Instead of simulating a single assignment,the set of all possible assignments after a step is described by a polyhedron.The resulting graph is called a \textit{partial evaluation graph}. It wasformalized for \gls{chc} by \citeauthor{gallagher2019eptcs}. In this thesis thedefinitions are adapted to better fit the algorithm and the formalisms of\Glspl{pip}.
During a normal execution a program visits locations one after another andchanges its state with every transition. The configuration at a given point ofthe execution can be described by tuples of locations and assignments.Simulating every possible run of a program from every possible starting state isimpossible: First, the number of starting states is infinite; second the programis not guaranteed to terminate and hence the simulation of a specific run mightnot terminate either.For non-deterministic and probabilistic programs, the problem gets even morecomplex, since the transition taken do not only depend on the current state butalso on the answer of the scheduler and some random event. Instead of simulatingevery single possible run, it would be nice to treat \emph{similar} states asone. Instead of simulating a single assignment, the set of all possibleassignments after a step is described by a set of constraints. The resultinggraph is called a \emph{partial evaluation graph}. It was formalized for\glspl{chc} by \citeauthor{gallagher2019eptcs}\cite{gallagher2019eptcs}. In thisthesis the definitions are adapted to better fit the algorithm and the formalismof \Glspl{pip}.
In order to guarantee termination, as will be elaborated further later on, afinite set of abstracted values (not to be confused with abstract domains, suchas polyhedra etc.) is required. For now the set of abstracted values shall be afinite set $\A \subset \C$.
% In order to guarantee termination of the refinement, as will be elaborated further later on, a% finite set of abstracted values (not to be confused with abstract domains, such% as polyhedra etc.) is required. For now the set of abstracted values shall be a% finite set $\A \subset \C$.
Given a \gls{pip} $\Prog$ with locations $\Loc$, transitions $\T$ and generaltransitions $\GT$, the versions are the vertices of the partial evaluationgraph. A version is composed of location from $\Loc$ and either an abstractedvalue or a non-abstracted constraint.
% Given a \gls{pip} $\Prog$ with locations $\Loc$, transitions $\T$ and general% transitions $\GT$, the versions are the vertices of the partial evaluation% graph. A version is composed of location from $\Loc$ and either an abstracted% value or a non-abstracted constraint.
The set of \textit{constraint versions} is the set of tuples over thelocations $L$ from $P$ and constraints $\C$. The set of \textit{abstractversions} is the set of tuples over the locations $L$ from $P$ and somefinite abstraction space $A\subseteq\C$. Since the set of locations and theabstraction space are both finite, so are the abstract versions. The set ofversions is the union of constraint and abstract versions.
The set of \textit{versions} is the set of tuples over thelocations $\Loc_\Prog$ and constraints $\C$:
For clarity sake, versions will be denoted with angles $\langle \cdot\rangle$. Since elements of the abstraction space are constraints as well.The set notation with curly braces is used for explicitly abstractedconstraints like so $\langle l, \{\psi_1, \dots\}\rangle$ and the formulanotation without brackets is used for non-abstracted values like so $\langlel, \varphi\rangle$. An abstracted version can be lifted to a non-abstractedversion trivially.
% For clarity sake, versions will be denoted with angles $\langle \cdot% \rangle$. Since elements of the abstraction space are constraints as well.% The set notation with curly braces is used for explicitly abstracted% constraints like so $\langle l, \{\psi_1, \dots\}\rangle$ and the formula% notation without brackets is used for non-abstracted values like so $\langle% l, \varphi\rangle$. An abstracted version can be lifted to a non-abstracted% version trivially.
Every version describes a subset of configurations of the program $P$. We write$\llbracket \langle l, \varphi \rangle \rrbracket$ for the set ofconfigurations $\set{(l,\_,s)}{s \models \varphi}$.
% Every version describes a subset of configurations of the program $P$. We write% $\llbracket \langle l, \varphi \rangle \rrbracket$ for the set of% configurations $\set{(l,\_,s)}{s \models \varphi}$.
The edges of the partial evaluation graph are transitions $\T_{PE}$ analogouslydefined to Definition \ref{def:pip}, except that they connect versions and notlocations.
The versions are the edges/locations of the partial evaluation graph. The edgesof the partial evaluation graph are transitions $\T_{PE}$ analogously defined toDefinition \ref{def:pip}, except that they connect versions and not locations.
A \Gls{pip} $P$ can be lifted to a trivial partial evaluation graph by replacingevery location $l \in L$ by the trivial version $\langle l,\textit{true}\rangle$. Obviously the lift of program is not useful for theanalysis, as it doesn't capture any interesting properties of the program,
A \Gls{pip} $P$ can be lifted to a trivial partial evaluation graph byreplacing every location $\ell \in \Loc_\Prog$ by the trivial version $\langle\ell, \texttt{true}\rangle$. Obviously the lift of a program is not useful forthe analysis, as it doesn't capture any interesting properties of the program,
\langle l', \varphi'\rangle$ is computed by unfolding a transition $t \in \T$from the original \gls{pip} that starts in the same location $l \in \Loc$ as theversion $v = \langle l, \varphi\rangle$. The resulting constraint $\phi'$contains all states $s' \in \Sigma$ with $s' \models \varphi'$ reachable via thetransition from a state in the source version, i.e. $\prSs((l, s, \_)\rightarrow (l',s',t)) > 0$.
\langle l', \varphi'\rangle \in \vers_\Prog$ is computed by unfolding atransition $t \in \T_\Prog$ from the original \gls{pip} that starts in the samelocation $\ell \in \Loc$ as the source version $v = \langle \ell, \varphi\rangle\in \vers_\Prog$. The resulting constraint $\varphi'$ contains all states $s'\in \Sigma$ with $s' \models \varphi'$ wich are reachable via the transition $t$from a state $s \models \varphi$ in the source version, i.e., $\prSs((\ell, s, \_)\rightarrow (\ell',s',t)) > 0$.
An unfolding operation $\unfold : \C \times (\PV \rightarrow\Z[\V\union\D)]) \rightarrow \C$ of a formula $\varphi \in \C$ with anupdate $\eta: \PV\rightarrow\Z[\V \union \D]$ is defined as
An unfolding operation $\unfold : \Phi_\PV \times (\PV \rightarrow\Z[\V\union\D)]) \rightarrow \Phi_\PV$ of a formula $\varphi \in \Phi_\PV$with an update $\eta: \PV\rightarrow\Z[\V \union \D]$ is defined as
\unfold(\varphi, \eta) = \begin{cases}\tilde{\eta}(\varphi \Land \lfloor\eta\rceil)|_{\PV} & \text{if }\tilde{\eta}(\varphi \Land \lfloor\eta\rceil) \text{ isSAT}\\\textit{false} & \text{otherwise}\end{cases}
\unfold(\varphi, \eta) = \tilde{\eta}(\varphi \Land \lfloor\eta\rceil)
For any $\varphi\in2^\C$, assignments $s, s' \in \Sigma$ and probabilisticupdate $\eta : \PV \rightarrow \Z[\V\union\D]$, if $\models\varphi$ then
For any $\varphi\in\Phi_\PV$, probabilistic update $\eta : \PV \rightarrow\Z[\V\union\D]$, if there exists an $s\in \Sigma$ with $s\models\varphi$then
\item \label{itm:unfold1} $\models\unfold(\varphi, \eta)$.\item \label{itm:unfold2} If additionally $s \models \varphi$ and $s' \in\support_s(\eta)$,then $s' \models \unfold(\varphi, \eta)$.
\item \label{itm:unfold1} $\unfold(\varphi, \eta)$ is satisfiable.\item \label{itm:unfold2} If $s' \in \support_s(\eta)$ for some $s' \in\Sigma$, then $s' \models \unfold(\varphi, \eta)$.
$\models\unfold(\varphi, \eta)$ follows directly from the existence of$s \models \varphi$ due to satisfiability and $s' \in \support_s(\eta)$since the support cannot be empty.
The satisfiability of $\unfold(\varphi, \eta)$ follows directly from theexistence of $s \models \varphi$ due to satisfiability and $s' \in\support_s(\eta)$ since the support cannot be empty.
\begin{example}Consider transition $t_3$ of $\Prog_3$ from \fref{fig:ex_prob}. $t_3$has an probabilistic update $\eta(y) = y - \textsc{Bern}(0.5)$ andimplicitly $\eta(x) = x$.Imagine a constraint $y > 0$, that is unfolded with the probabilistic update$\eta$. The update is approximated by $(\tilde{\eta},\lfloor\eta\rceil)$with\begin{align*}\tilde{\eta}(x) &= x' \\\tilde{\eta}(y) &= y' \\\lfloor\eta\rceil &= (x' = x \Land y' = y - d \Land 0 \leq d \leq 1)\end{align*}
The unfolding computes the following new constraint.\begin{align*}\tilde{\eta}(\varphi \Land \lfloor\eta\rceil) &= \tilde{\eta}(x > 0 \Landx' = x \Land y' = y - d \Land 0 \leq d \leq 1) \\&= (y > 0 \Land x' = x \Land y' = y - d \Land 0 \leq d \leq 1 \Land x'' =x' \Land y'' = y')|_{\PV''}[x''/x]_{x\in\PV}\\&= (y'' \geq 0)[x''/x]_{x\in\PV'}\\&= (y \geq 0)\end{align*}Any satisfying assignment for $\varphi$ must assign $y$ a value larger thanzero and after the update, the value of the assignment must be larger orequal to zero. Since $x$ is neither constrained before the update norupdated in any way, it remains unconstrained after the unfolding.\end{example}
Using the unfolding, one can derive an unfolding operation $\unfold^*: \vers\times \T \rightarrow 2^{\T_{PE}}$ that constructs a new transition between theversions copying the guard $\tau$, update $\eta$, and probability $p$ from theoriginal transition. Since unsatisfiable versions cannot be reached they areignored and no transition is created.
Using the unfolding, one can derive an unfolding operation $\unfold:\vers_\Prog \times \T \rightarrow 2^{\T_{PE}}$ that constructs a new transitionbetween the versions copying the guard $\tau$, update $\eta$, and probability$p$ from the original transition.% Since unsatisfiable versions cannot be% reached they are ignored and no transition is created.% \begin{align*}% \unfold^*(\langle l,\varphi\rangle, t) =% \begin{cases}% \{(\langle \ell, \varphi\rangle, p, \tau\Land\varphi, \eta,\langle \ell',\varphi'\rangle% \} & \text{if } \begin{aligned}[t]% &t = (\ell, p, \tau, \eta, \ell')\text {, and} \\% &\varphi' = \unfold(\varphi \land \tau, \eta) \text{ is SAT}% \end{aligned} \\% \emptyset & \text{ otherwise}% \end{cases}% \end{align*}\begin{align*}\unfold(\langle l,\varphi\rangle, t) =\{(\langle \ell, \varphi\rangle, p, \tau\Land\varphi, \eta,\langle \ell',\varphi'\rangle\} \\ \text{ for } t = (\ell, p, \tau, \eta, \ell')\text { and }\varphi' = \unfold(\varphi \land \tau, \eta)\end{align*}
\unfold^*(\langle l,\varphi\rangle, t) =\begin{cases}\{(\langle l, \varphi\rangle, p, \tau, \eta,\langle l',\varphi'\rangle\} & \text{if } \begin{aligned}[t]&t = (l, p, \tau, \eta, l'), p \in [0,1], \tau \in 2^\C,\\&\eta: \PV \rightarrow \Z[\V\union\D]\text {, and} \\&\varphi' = \unfold(\varphi \land \tau, \eta) \text{ is SAT}\end{aligned} \\\{\} & \text{ otherwise}\end{cases}
\unfold
\begin{lemma} [Existance, Similarity, Reachability of unfolded transitions]\label{lem:existance_and_similarity}Let $l \in \Loc_\Prog$, $t' \in \T_\Prog$, $s \in \Sigma$, and $\varphi$ bean \gls{fo}-formula over variables $\PV$. Moreover let $t = (l, p, \tau,\eta, l')$ with locations $l' \in \Loc_\Prog$, a probability $p \in[0,1]$ and an update $\eta: \PV \rightarrow \Z[\V\union\D]$.
\begin{lemma} [Existence, Similarity, Reachability of unfolded transitions]\label{lem:existence_and_similarity}Let $\ell \in \Loc_\Prog$, $t' \in \T_\Prog$, $s \in \Sigma$, and $\varphi$be an \gls{fo}-formula over variables $\PV$. Moreover, let $t = (\ell, p,\tau, \eta, \ell')$ with locations $\ell' \in \Loc_\Prog$, a probability $p\in [0,1]$ and an update $\eta: \PV \rightarrow \Z[\V\union\D]$.
For the sub-SCC level evaluation Algorithm \ref{alg:abstr_eval} is adapted tostop the evaluation when leaving the component and instead of starting only fromthe initial location of the program, the evaluation starts from everyentrytransition to the program. The exact algorithm is displayed in Algorithm\ref{alg:eval_component}.\begin{algorithm}\caption{Evaluate the component $S \subseteq \T_\Prog$ of a Program $\Prog$with abstraction $\alpha$.\label{alg:eval_component}}\KwData{ A \gls{pip} $\Prog$, component $S$, abstraction function $\alpha$,and abstraction oracle $S_\alpha$}\KwResult{A Graph $G$, and a set of pairwise disjunct general transitions$\GTG \subset \GT_\text{PE}$}$\ET \leftarrow \Tout(S)$\;\SetKwFunction{FEvaluate}{evaluate}\SetKwProg{Fn}{Function}{:}{end}$\GTG \leftarrow \emptyset$\;\Fn{\FEvaluate{$G$, $v$}}{$\langle l, \varphi\rangle \leftarrow v$\;\For{$g \in \GTout(l)$} {$g' \leftarrow \unfold^*(v, g)$\label{alg:abstr_line_unfold_g}\;\If {$g^* \neq \emptyset$ \label{alg:abstr_line_filteremptyg}}{$g^* \leftarrow \emptyset$\;\For{$t = (v, p, \tau, \eta, \langle l', \varphi\rangle) \in g$}{\uIf {$g \in \ET$} {$t \leftarrow (v, p, \tau, \eta, \langle l',\texttt{true}\rangle)$\label{alg:abstr_line_exit}\;}\ElseIf {$S_\alpha$} {$t \leftarrow (v, p, \tau, \eta, \langle l',\alpha(\varphi)\rangle)$\label{alg:abstr_line_abstraction}\;}$g^* \leftarrow g^* + t$\label{alg:abstr_line_addtog}\;\uIf {$v' \not\in V(G)$}{$G \leftarrow G + t$\;$G \leftarrow \FEvaluate{G, v'}$\;}\Else {$G \leftarrow G + t$\label{alg:abstr_line_addt}\;}}$\GTG \leftarrow \GTG + g^*$\;}}\Return {$G$}}$G \leftarrow \text{ graph of }\Prog \text{ lifted to a partial evaluationgraph}$\;$G \leftarrow G - S$\;\For{$t = (\ell, p, \tau, \eta, \ell')\in \Tin(S)$} {$G \leftarrow \FEvaluate{$G, \langle\ell',\texttt{true}\rangle $}$ \;}\Return $G, \GTG$\end{algorithm}The partial evaluation starts at every entry transition $\T_in(S)$ to thecomponent. When unfolding an exit transition $t = (\ell, \_, \_, \_, \ell') \in\Tout(S)$ from a version $\langle \ell, \varphi\rangle$, to $\unfold^*(\langle\ell, \varphi\rangle, t) = \{(\langle\ell,\varphi\rangle, \_, \_, \_,\langle\ell', \varphi'\rangle)\}$ the target version$\langle\ell',\varphi'\rangle$ is replaced by the trivial overapproximation$\langle\ell',\texttt{true}\rangle$. This target is always already contained inthe program, by construction and the algorithm always backtracks.Lets cosider an admissible finite prefix $f \in \fpath_\Prog$ with\begin{align*}f &= c_0c_1\dots{}c_k\dots{}c_m\dots{}c_n \\&=(\ell_0,t_\in,s_0)(\ell_1,t_1,s_1)\dots\underbrace{(\ell_k,t_k,s_k)\dots(\ell_m,t_m,s_m)}_{t_k,\dots,t_m\in S}\dots(\ell_n,t_n,s_n)\end{align*}with an infix $c_m\dots{}c_k$ going through the component.
By construction, up to the component the partial evaluation $\Prog'$ contains asimilar prefix$(\langle\ell_0,\texttt{true}\rangle,t_\in,s_0)(\langle\ell_1,\texttt{true}\rangle,t_1,s_1)\dots(\langle\ell_{k-1},\texttt{true}\rangle,t_{k-1},s_{k-1})\in \fpath_{\Prog'}$. The component is entered via the transition$t_k\in\Tin(S)$ which is was used as a starting point for the evaluation withthe trivial version $\langle\ell_{k-1}, \texttt{true}\rangle$ in Algorithm\ref{alg:eval_component}. By the same argument as in Theorem\ref{thm:correctness}, a similar prefix$(\langle\ell_0,\texttt{true}\rangle,t_\in,s_0)(\langle\ell_1,\texttt{true}\rangle,t_1,s_1)\dots(\langle\ell_{k-1},\texttt{true}\rangle,t_{k-1},s_{k-1})(\langle\ell_k,\varphi_k\rangle,t_k,s_k)\dots(\langle\ell_{m-1},\varphi_{m-1}\rangle,t_{m-1},s_{m-1})\in \fpath_{\Prog'}$ through the component exists in $\Prog'$. The exittransition $t_m$ must have been unfolded before backtracking and afterwards thesimilar prefix trivially follows the lifted versions again, yielding a similarfinite prefix$(\langle\ell_0,\texttt{true}\rangle,t_\in,s_0)(\langle\ell_1,\texttt{true}\rangle,t_1,s_1)\dots(\langle\ell_{k-1},\texttt{true}\rangle,t_{k-1},s_{k-1})(\langle\ell_k,\varphi_k\rangle,t_k,s_k)\dots(\langle\ell_{m-1},\varphi_{m-1}\rangle,t_{m-1},s_{m-1})(\langle\ell_m,\texttt{true}\rangle,t_m,s_m)\dots(\langle\ell_n,\texttt{true}\rangle,t_n,s_n)\in \fpath_{\Prog'}$.
\begin{definition}[Solutions\cite{Domenech19}]\label{def:solutions}Let $\varphi \in \Phi$ be an \gls{fo}-formula. $\llbracket \varphi\rrbracket$ is defined as the set of all satisfying assignments of theformula $\varphi$.\end{definition}
Using the $\models$ relation and $\equiv$ relation from \gls{fo} logic, thenotion of satisfiability is defined.\cite{barwise1977} Constraints are a subsetof formulas in \gls{fo} logic, thus, their semantics shall be definedaccordingly. In this thesis we will only consider integer arithmetic. Let $\Zs:= (\Z, +, \cdot, 0, 1, \leq, <, =)$ be the structure standard integerarithmetic. For an assignment $\beta : \V \rightarrow \Z$ and a formula$\varphi \in Phi$ we write $\beta \models \varphi$ instead of $\Zs, \beta\models \varphi$.
The set of quantifier-free formulas of first-order logic over a variable set$A\subset\V$ is denoted with $\Phi_A$, or simply $\Phi$, when $A=\V$. Forfoundations on FO-logic we'll refer to the literature \cite{barwise1977}.Constraints are a subset of formulas in \gls{fo} logic, thus, there semanticsshall be defined accordingly using the $\models$ and $\equiv$ relation from\gls{fo} logic.\cite{barwise1977}In this thesis we will only consider integer arithmetic. Let $\Zs := (\Z, +,\cdot, 0, 1, \leq, <, =)$ be the structure standard integer arithmetic. For anassignment $\beta : \V \rightarrow \Z$ and a formula $\varphi \in \Phi$ we write$\beta \models \varphi$ instead of $\Zs, \beta \models \varphi$.
\begin{proof}\enquote{$\Rightarrow$} Let $\varphi_1$ entails $\varphi_2$, and assumean assignment $\beta: \V \rightarrow \Z$ exists such that $\beta \models\varphi_1 \Land \neg \varphi_2$.\begin{align*}\Rightarrow &\beta \models \varphi_1 \\\Rightarrow & \beta \in \llbracket \varphi_1 \rrbracket \\\Rightarrow & \beta \in \llbracket \varphi_2 \rrbracket \\\Rightarrow & \beta \models \varphi_2 \\\Rightarrow & \beta \not\models \neg\varphi_2 \\\Rightarrow & \beta \not\models \varphi_1 \Land \neg\varphi_2 \lightning\end{align*}By contradiction $\varphi_1 \Land \neg\varphi_2$ is unsatisfiable.
\enquote{$\Leftarrow$} Let $\varphi_1 \Land \neg\varphi_2$ isunsatisfiable and assume $\llbracket \varphi_1 \rrbracket \not\subseteq\llbracket \varphi_1 \rrbracket$. There exists an assignment\begin{align*}& \beta \in \llbracket \varphi_1\rrbracket \\\Rightarrow&\beta \models \varphi_1 \\\end{align*}and\begin{align*}&\beta \not\in\llbracket\varphi_2\rrbracket \\\Rightarrow& \beta \not\models \varphi_2 \\\Rightarrow& \beta \models \neg\varphi_2\end{align*}Combined $\beta \models \varphi_1 \Land\neg \varphi_2 \lightning$By contradiction $\varphi_1$ entails $\varphi_2$.\end{proof}
\proof{see Appendix on page \pageref{apdx:lem:entailment}.}
Let $\Z[\V]$ be the set of integer polynomials over variables $\V$ and $\Sigma =\set{s : A \rightarrow \Z}{A \subseteq \V}$ be the set of full integerassignments.For an integer polynomial $f\in\Z[\V]$ and a full assignment $s \in \Sigma$ wewrite $f(s)$ for the value that is found by fully evaluating the polynomial$f[x_i/s(x_i)]$. \todo{restrict to full assignments?}
Let $\Sigma = \{s : \V \rightarrow \Z\}$ be the set of full integerassignments, that are assignments which assign a value to every variable$v\in\V$. In contrast, partial assignments over a variable set $A \subseteq \V$assign values only to a subset of variables, set set of partial assignments overthe variables $A\subseteq\V$ is denoted by $\Sigma_A = \{s: A \rightarrow \Z\}$.For an integer polynomial $f\in\Z[A]$ over a variable set $A \subseteq \V$ and aassignment $s \in \Sigma_A$ we write $\varf(s)$ for the value that is found byevaluating the polynomial $\varf[x_i/s(x_i)]_{x\in A}$.
An update is described by an update function $u : A \rightarrow \Z[\V]$ thatmaps every updated variable to a polynomial expression over $\V$. The valueof a variable $x\in A$ \textit{after} the update is determined by $u(x)(s)$.Note, that $u(x)$ is a polynomial and $u(x)(s)$ is the evaluation of thatpolynomial.
Let $A,B \subseteq \V$. An update is described by an update function $u : A\rightarrow \Z[B]$ that maps every updated variable to a polynomialexpression over $B$. The value of a variable $x\in A$ \textit{after} theupdate for an assignment $s\in\Sigma_B$ is determined by$\bigl(u(x)\bigr)(s)$. Note, that $u(x) \in \Z[\V]$ is a polynomial and$\bigl(u(x)\bigr)(s) \in \Z$ is the evaluation of that polynomial.
full assignment $s : \V \rightarrow \Z$, we write $u(s) : A \rightarrow \Z$ forthe partial assignment found by assigning every updated variable to it's valueafter the update.
assignment $s \in \Sigma$, we define a function $\rho_u: \Sigma \rightarrow\Sigma_A$. It computes the assignment after an update $u$ given an assignment$s \in \Sigma$ before the update:
\begin{lemma}\label{lem:nonprop_update}Given a full assignment $s\in\Sigma$, aformula $\varphi$ and an update $u: A \rightarrow \Z[\V]$. Without loss ofgenerality $A' = \set{x'}{x \in A}$ is a set of fresh temporaryvariables. We define $u(\varphi) := ((\varphi \Land \LAnd_{x\in A} x' =u(x))|_{\PV'})[x'/x]$. For any partial assignment $s' = u(s)|A$and if $s \models \varphi$, then $s' \models u(\varphi)$.
\begin{example}Consider the update $u : \{x\} \rightarrow \Z[\{x,y\}]$ with $u(x) = x\cdoty+1$. Let $s\in\Sigma_{\{x,y\}}$ assign $s(x) = 2$ and $s(y)=3$\textit{before} the update. \textit{After} the update$\bigl(\rho_u(s)\bigr)(x) = 2 \cdot 3 + 1 = 7$\end{example}
\begin{proof}\begin{align}s' &\models ((\varphi \Land \LAnd_{x\in A} x' =u(x))|_{A'})[x'/x] \nonumber\\\Leftrightarrow & \models ((\varphi \Land \LAnd_{x\in A} x' =u(x))|_{A'})[x'/x][x/s'(x)] \nonumber\\\Leftrightarrow & \models ((\varphi \Land \LAnd_{x\in A} x' =u(x))|_{A'})[x'/s'(x)] \label{eq:chaining_subst}\\\Leftrightarrow & \models ((\varphi \Land \LAnd_{x\in A} x' =u(x)))[x'/s'(x)]_{x\in A} \label{eq:dropping_projection}\\\Leftrightarrow & \models ((\varphi \Land \LAnd_{x\in A} x' =u(x)))[x'/u(s)(x)]_{x\in A}\nonumber\\\Leftrightarrow & \models ((\varphi \Land \LAnd_{x\in A} x' =u(x)))[x'/u(x)(s)]_{x\in A}\nonumber\\\Leftrightarrow & \models(\varphi[x'/u(x)(s)]_{x_\in A}) \Land (\LAnd_{x\in A}x'[x'/u(x)(s)]_{x_\in A} = [x'/u(x)(s)]_{x\inA}) \nonumber\\\Leftrightarrow & \models\varphi \Land \LAnd_{x\in A} x'[x'/u(x)(s)] =u(x) \label{eq:dropping_substitutions}\\\Leftrightarrow & \models\varphi \Land \LAnd_{x\in A} u(x)(s) = u(x)\nonumber\\\Leftrightarrow & \models\varphi[x/s(x)]\Land \LAnd_{x\in A}u(x)[x/s(x)] =u(x)\label{eq:definition_of_update}\\\Leftarrow & \models\varphi[x/s(x)]\Land \LAnd_{x\in A}u(x)[x/s(x)][x/s(x)] =u(x)[x/s(x)]\label{eq:adding_model}\\\Leftrightarrow & \models(\varphi\Land \LAnd_{x\in A} u(x) = u(x))[x/s(x)]\label{eq:dedup_substitution}\\\Leftrightarrow & \models\varphi[x/s(x)]\nonumber\\\Leftrightarrow s & \models \varphi\label{eq:assumption}\end{align}
\begin{lemma}\label{lem:nonprop_update}Let $A, B \subseteq \V$. Given an assignment $s\in\Sigma$, a formula$\varphi \in \Phi$ and an update $u: A \rightarrow \Z[B]$. Without loss ofgenerality $A' = \set{x'}{x \in A} \subset \V$ is a set of fresh variableswith $A' \cap (A \union B) = \emptyset$. We define $u(\varphi) := ((\varphi\Land \LAnd_{x\in A} x' = u(x))|_{A'})[x'/x]_{x\in A}$. For any partialassignment $s' = \rho_u(s)|_A$ and if $s \models \varphi$, then $s' \modelsu(\varphi)$.
Equation \ref{eq:chaining_subst} holds, because the substitutions can bechained. Equation \ref{eq:dropping_projection} holds, because theprojection removed no variables contained in the substitution. Hence theyare just implicitly existentially quantified and not affected otherwise.In equation \ref{eq:dropping_substitutions} the substitutions can beremoved, because $\varphi$ and $u(x)$ do not contain any affectedvariables, as they were freshly generated. In Equation\ref{eq:definition_of_update} the update is just replaced by it'sdefinition. Equation \ref{eq:adding_model} holds because if $s$ is amodel for the formula, the formula is satisfiable. The inverse is notnecessary true. Applying the substitution $[x/s(x)]$ a secondtime, does nothing in Equation \ref{eq:dedup_substitution} because$s(x) \in \Z$ and specifically do not contain any variables $x\in A$.Afterwards, the substitution is moved to the outside. Equation\ref{eq:assumption} holds by assumption.\end{proof}
\proof{see Appendix on page \pref{apdx:lem:nonprop_update}.}
The orthogonal projection computes the projection of an \gls{fo} formula.Geometrically, the projection can be seen as set of solutions projected (castinga shadow) onto the remaining dimensions.
The projection operation computes the projection of an \gls{fo} formula.Geometrically, the projection is the set of assignments found by orthogonallyprojecting every satisfying assignment onto the remaining dimensions.
Taking the definitions from \cite{bagnara2005convexpolyhedron} a set of linearconstraints exactly defines a polyhedron. Every linear non-strict inequalityconstraint $\psi \in \Lin^{\leq}$ describes an closed affine half-space and everylinear strict inequality constraint $\psi \in \Lin^{\leq}$ describes an openaffine half-space in the euclidean space $\R^n$.
Every linear non-strict inequality constraint describes an closed affinehalf-space and every linear strict inequality constraint describes an openaffine half-space in the euclidean space $\R^n$. As discussed in\Sref{sec:constraints} strictness of linear constraints and in turn openness ofhalf-spaces are not relevant to the integer domain.
\begin{definition}[Not necessarily closedPolyhedra\cite{bagnara2005convexpolyhedron}\label{def:polyhedra}]$\mathcal{P} \subseteq \R^n$ is a closed (convex) polyhedron if and only ifit can be expressed by an intersection of a finite number of not necessarilyclosed affine half-spaces.
\begin{definition}[Convex Polyhedra\cite{bagnara2005convexpolyhedron}\label{def:polyhedra}]$\mathcal{H} \subseteq \N^n$ is a \emph{convex polyhedron} if and only if itcan be expressed by an intersection of a finite number of affinehalf-spaces.
As discussed in \Sref{sec:constraints} strictness of linear constraints and inturn openness of half-spaces are not relevant to the integer domain. Hence whentalking about integer arithmetic one refers only to polyhedra.Polyhedra can exactly only represent linear constraints. Non-linear polynomial
Polyhedra can represent linear constraints exactly. Non-linear polynomial
Polyhedra will be used for the partial evaluation algorithm described in chapter\ref{ch:theory}, since in practice the partial evaluation is called only onsmall sub-programs and precision is more desirable than performance.
Polyhedra will be used to compute the projection of \gls{fo}-formulas, possiblyover-approximating the constraints in the process.
\subsection{Intervals}\label{ssec:box}For completeness sake, let's introduce a third abstract domain: the intervaldomain. A closed interval $[a,b]$ with $a, b \in \R$ for a variable $v \in \V$can be expressed as a pair of linear constraints of the form $a \leq v$ and $v\leq b$.
\subsection{Boxes}\label{ssec:box}For completeness' sake, let's introduce a third abstract domain of Boxes. Aclosed interval $[a,b]$ with $a, b \in \R$ for a variable $v \in \V$ can beexpressed as a pair of linear constraints of the form $a \leq v$ and $v \leq b$.
$\mathcal{P} \subseteq \R^n$ is a box if it can be expressed by a set ofintervals.
$\mathcal{B} \subseteq \R^n$ is a Box if it can be expressed by a cartesianproduct $I_1 \times \dots \times I_n$ of intervals $I_1,\dots,I_n$
polyhedron $P$ one can find a box $P'$ that contains the polyhedron $P$. Boxesare computationally very efficient, although rather imprecise for mostapplications.
polyhedron $\mathcal{H}$ one can find a box $\mathcal{B}$ that contains thepolyhedron $\mathcal{H}$. Boxes are computationally very efficient, althoughrather imprecise for most applications.
\section{Probability Theory}% Since we are talking about probabilistic programs, we will need tools to% represent probabilities throughout the program. For example a variable in a% probabilistic program can take a random value. That means that the value% assigned to a variable is not deterministic but chosen at random from a given% distribution.
\section{Probability Theory}\label{sec:probability}
Imagine a probabilistic event with some possible outcomes $\Omega$. For examplea coin-toss can have arbitrary many outcomes when taking the position on thetable into account, the fact that the coin might get lost in the process etc. Aprobability measure describes the probabilities of sets of random outcomes. Forexample the coin-toss on the table has a sub-set of outcomes where the coin'shead is above. The probability measure would assign a probability close to 50\%for a somewhat normal coin-toss.
Imagine a probabilistic experiment with some possible outcomes $\Omega$. Forexample, a coin-toss can have arbitrary many outcomes when taking the position onthe table into account, the fact that the coin might get lost in the processetc. A probability measure describes the probabilities of sets of randomoutcomes. For example, the coin-toss on the table has a sub-set of outcomeswhere the coin's head is above. The probability measure would assign aprobability close to 50\% for a somewhat normal coin-toss.
the intuitive property of random event: if you can measure two subsets ofoutcomes (e.g. $A = \set{ \omega \in \Omega} {\text{ coin shows head}}$, and $B= \set{\omega \in \Omega} {\text{coin shows tail}}$) so you can for both subsetstogether ($A \union B = \set{\omega \in \Omega} {\text{coin shows head or
the intuitive property of a random event: if you can measure two subsets ofoutcomes (e.g. $A = \set{ \omega \in \Omega} {\text{ coin shows head}}$ and $B= \set{\omega \in \Omega} {\text{coin shows tail}}$) so you can for the union ofboth subsets ($A \union B = \set{\omega \in \Omega} {\text{coin shows head or
Let $X$ be a discrete random variable. The distribution is described by theprobability measure $\mu: \Z \rightarrow \R$ with\begin{align}\mu(A) = P(\set{\omega \in \Omega}{X(\omega) \in A})
Let $X$ be a discrete random variable. The probability of a single value iswell defined by the distribution $\mu:F \rightarrow [0,1]$ as follows:\begin{align}P(X=r) = \mu(\{r\}) = P(\set{\omega \in \Omega}{X(\omega) = r})
When considering a random variable one is usually interested the value to expectin general. For example most possible outcomes of a binomial distribution arevery improbable. This is described by the expected value.\begin{definition}[Expected Value\cite{billingsley2011, meyer20arxiv}]The expected value of a discrete random variable $X$ on a probability space$(\Omega, \F, P)$ is the weighted sum of all values X with respect to themeasure $P$:\begin{equation*}\E(X) = \sum_{r \in \bar{\N}} r \cdot P(X = r)\end{equation*}If $\E(X)$ does not exist, then $\E(X) = \infty$.\end{definition}
When considering a random variable one is usually interested the value to expectin general. For example most possible outcomes of a binomial distribution arevery improbable. This is described by the expected value.\begin{definition}[Expected Value\cite{billingsley2011}]The expected value of a random variable $X$ on a probability space $(\Omega,\F, P)$ is the integral of X with respect to the measure $P$:\begin{equation*}\E(X) = \int X dP = \int_\Omega X(\omega) P(d\omega)\end{equation*}This gets simplified for discrete random variables where every the integral canbe replaced by a sum. (see Lemma 48 in \cite{meyer20arxiv})\begin{equation*}\E(X) = \sum_{r \in \bar{\R}_{\geq0}} r \cdot P(X = r)\end{equation*}\end{definition}
$\Z[\V\union \D]$, is the set of integer polynomials containing variables $\V$or probability distributions $\D$. They behave exactly like normal polynomialsexcept that the values of the distributions are sampled probabilisticallyinstead of being given as argument. As such we can define a function $[\cdot]:\Z[\V\union \D] \rightarrow \Sigma \rightarrow \Z \rightarrow [0,1]$ that returnsthe probability that the polynomial $f \in \Z[\V \union D]$ with a state $s \in\Sigma$ evaluates to a value $k\in\Z$ with a probability $[f](s)(k)$.
Let $\D$ be the set of (valid) probability distributions. $\Z[\V\union \D]$, isthe set of integer polynomials containing variables $\V$ or probabilitydistributions $\D$. They behave exactly like normal polynomials except that thevalues of the distributions are sampled probabilistically instead of being givenas argument. As such we can define a function $[\cdot]: \Z[\V\union \D]\rightarrow \Sigma \rightarrow \Z \rightarrow [0,1]$ that returns theprobability that the polynomial $f \in \Z[\V \union D]$ with a state $s \in\Sigma$ evaluates to a value $k\in\Z$ with a probability $[\varf{}](s)(k)$.
we write $[x_i\backslash s(x_i)]$.The resulting polynomial $f[x_i\backslash s(x_i)]$ describes a random variable$[f](s)$ that takes a value at random depending only on the distributions $D_1,\dots, D_m$ within. The random variable has the following probabilitydistribution:
we write $[x_i\backslash s(x_i)]$. The resulting polynomial$\varf{}[x_i\backslash s(x_i)]$ describes a random variable $[\varf{}](s)$ thattakes a value at random depending only on the distributions $D_1, \dots, D_m$within. The random variable has the following probability distribution:
The value of a random variable can approximated by a constraint for the boundsof the support of the probability distribution. For any probability distribution$D \in \D$ and a variable $d \in \V$, we define\begin{equation*}\lfloor D \rceil_d = \begin{cases}\min (\support(D)) \leq d \leq \max (\support(D)) & \text{if}\min(\support(D)) \text{and} \max (\support(D)) \text{ exist}, \\\min (\support(D)) \leq d & \text{if} \min(\support(D))\text{ exists}, \\d \leq \max(\support(D)) & \text{if} \max(\support(D)) \text{ exists},\\\texttt{true} & \text{otherwise}.\end{cases}\end{equation*}
Let $f \in \Z[\V\union\D]$ be a probabilistic polynomial with probabilitydistributions $D_1,\dots,D_n$ used within. Then the non-probabilisticover-approximation of a probabilistic polynomial is defined as the tuple$(\lfloor f\rceil,\tilde{f})$ with\begin{align*}\lfloor f \rceil = \LAnd_{i=1,\dots,m} \lfloor D_i \rceil \\\tilde{f} = f[D_i/d_i] \text{ and }
Let $A\subset\V$ and $f \in \Z[A\union\D]$ be a probabilistic polynomialwith probability distributions $D_1,\dots,D_n$ used within. Then thenon-probabilistic over-approximation of a probabilistic polynomial isdefined as the tuple$(\lfloor f\rceil,\tilde{\varf})$ with\begin{align*}\lfloor \varf \rceil = \LAnd_{i=1,\dots,m} \lfloor D_i \rceil_{d_i} \text{and }\\ \tilde{f} = \varf{}[D_i/d_i]
\begin{example}Consider the same probabilistic polynomial as in Example\ref{ex:prob_polynomial}: $f = x^2 + \textsc{Unif}(3,5)$. The distributionsupport of the uniform distribution is bounded in both directions.\begin{align*}\min(\support(\textsc{Unif}(3,5))) &= 3 \\\max(\support(\textsc{Unif}(3,5))) &= 5 \\\end{align*}Hence a variable $d$ assigned from the distribution satisfies the constraint$\lfloor \varf\rceil = \lfloor \textsc{Unif}(3,5)\rceil_d = 3 \leq d \leq5$. The distribution in the probabilistic polynomial is replaced by thevariable $d$. We get the new approximated polynomial $\tilde{\varf} = x^2 +d$.\end{example}
contains a condition, called \textit{guard}, that must be true for the currentstate to be taken. Whenever a transition is taken it can modifies the state withsome \textit{update}. An update assigns a new value to a program variabledepending on the previous state. Every integer program starts at a uniquelocation usually called $l_0$. Whenever no transition guard is satisfied theprogram terminates.
contains a condition $\tau$, called \textit{guard}, that must be true for thecurrent state to be taken. Whenever a transition is taken it can modifies thestate with some \textit{update} $\eta$. An update assigns a new value to eachprogram variable depending on the previous state. Every integer program startsat a unique location usually called $\ell_0$. Whenever no transition guard issatisfied the program terminates. The arguments to the program is given by somestarting state $s_0$, assigning a value for every program variable.We use a graphical representation of programs, as shown in\fref{fig:ex_pip_nd}. Updates are assign a value to every program variable,however for better readability, the updates that preserve the value of a programvariable are omitted.
or probabilistic, which is called non-deterministic or probabilisticsampling. Non-deterministic sampling occurs when on a transition a variable isupdated to an expression that contains unbound variables (variables that are notassigned a value in the previous state). The unbound variables are sampledarbitrarily from all the integers, but can be constraint by the guard, in whichcase the transition is only taken if the non-deterministic sampling fulfills theguard. If the sampled value fulfills no transition's guard it means the program
or probabilistic, which is called non-deterministic or probabilistic sampling.Non-deterministic sampling occurs when on a transition a variable is updated toan expression that contains unbound variables (variables that are not part ofthe set of program variables). The unbound variables are sampled arbitrarilyfrom all the integers, but can be constraint by the guard, in which case thetransition is only taken if the non-deterministic sampling fulfills the guard.If the sampled value fulfills no transition's guard it means the program
\Sref{distributions}. Updates are restricted to integer polynomials withvariables and additionally distributions when probabilistic sampling is allowed.The value of the indeterminate in the polynomial is equal to the (possiblytemporary) variable or sampled from the distribution with the correspondingprobability.
\Sref{sec:probability}. Updates are expanded to integer polynomials over withvariables and distributions. The value of the indeterminate in the polynomial isequal to the (possibly temporary) variable or sampled from the distribution withthe corresponding probability.
\item $\Loc_\Prog$ is a finite non-empty set of location,\item $\GT_\Prog$ is a finite set of general transitions, where ageneral transition $g \in \GT_\Prog$ is a finite non-empty set oftransitions $t = (l, p, \tau, \eta, \kappa, l') \in \T_\Prog \subseteq\GT_\Prog$ with:
\item $\Loc_\Prog \subseteq \Loc$ is a finite non-empty set of location,\item $\GT_\Prog \subseteq \GT$ is a finite set of general transitions,where a general transition $g \in \GT_\Prog$ is a finite non-emptyset of transitions $t = (\ell, p, \tau, \eta, \ell') \in \T_\Prog\subseteq \GT_\Prog$ with:
transitions share a common start location which we call $l_g$.\item $l_0 \in L$ is the start location of the program.
transitions share a common start location which we call $\ell_g$.\item $\ell_0 \in \Loc$ is the start location of the program.
Before defining the semantics of a \gls{pip} a new special location $l_\bot$ isintroduced that will be used to indicate the termination of a program as well asa virtual transition $t_\bot$ and general transition $g_\bot = \{t_\bot\}$ withthe same purpose. Instead of just \enquote{terminating} a program takes thetransition $g_\bot$ to the location $l_\bot$ if and only if no other general
Before defining the semantics of a \gls{pip} a new special location $\ell_\bot$is introduced that will be used to indicate the termination of a program as wellas a virtual transition $t_\bot$ and general transition $g_\bot = \{t_\bot\}$with the same purpose. Instead of just \enquote{terminating} a program takes thetransition $g_\bot$ to the location $\ell_\bot$ if and only if no other general
$l_\bot$ the run loops indefinitely on the location $l_\bot$. In addition a newtransition $t_\text{in}$ is added for the start of the program.
$l_\bot$ the run loops indefinitely on the location $\ell_\bot$. In addition anew transition $t_\text{in}$ is added for the start of the program.
defined as $\confs_\Prog = \Loc_\Prog \union \{l_\bot\} \times \T_\Prog \union \{t_{\text{in}},t_\bot\} \times \Sigma$.Note that additionally to the location and state, the last transition taken tothe current location is remembered as well. This won't be relevant for the partialevaluation but is important to define the probabilistic properties of a\gls{pip}.
defined as $\confs_\Prog = (\Loc_\Prog \union \{\ell_\bot\}) \times (\T_\Prog\union \{t_{\text{in}}, t_\bot\}) \times \Sigma$. Note that additionally to thelocation and state, the last transition taken to the current location isremembered as well. This won't be relevant for the partial evaluation but isimportant to define the probabilistic properties of a \gls{pip}.
set of all runs is defined as $\runs_\Prog = \confs_\Prog^\omega$. In contrast a finiteprefix is a finite sequence of configurations. The set of all finite prefixes isdefined as $\fpath_\Prog = \confs_\Prog^*$.
set of all runs is defined as $\runs_\Prog = \confs_\Prog^\omega$. In contrast afinite prefix is a finite sequence of configurations. The set of all finiteprefixes is defined as $\fpath_\Prog = \confs_\Prog^*$.
\begin{lemma}[Proposition 7.1.1 in \cite{puterman1994markov}]For a random variable $X$ describing the total reward of a Markoviandecision process, and if the expected reward $\ESs(X)$ is finite for allschedulers $\scheduler \in \HDS$, and initial states $s_0$ the followingholds:\begin{equation*}\sup_{\scheduler \in \HDS}{\ESs(X)} = \sup_{\scheduler \in\MDS}{\ESs(X)}\end{equation*}\end{lemma}
Let $\scheduler \in \MDS^\Prog$ be a scheduler and $s_0$ be an initial state.First, the probability for a program to start at a configuration $c$ is 1 onlyfor the configuration that has the initial state $s_0$ at the start location$l_0$ coming from the initial transition $t_\text{in}$. All other configurationare invalid starting configurations and get assigned a zero probability by theprobability measure $\prSs : \confs_\Prog \rightarrow [0,1]$
Let $\scheduler \in \MDS_Prog$ be a scheduler and $s_0 \in \Sigma$ be aninitial state. First, the probability for a program to start at a configuration$c$ is 1 only for the configuration that has the initial state $s_0$ at thestart location $\ell_0$ coming from the initial transition $t_\text{in}$. Allother configuration are invalid starting configurations and get assigned a zeroprobability by the probability measure $\prSs : \confs_\Prog \rightarrow [0,1]$
the configuration $c$ with $c' = (l', t', s')$ and $t' = (l, p, \tau, \eta, \_,l') \in g$ depends on three things: First the transition $t \in g$ is taken withthe probability $p$, second the probability that each program variable$x\in\PV$ is updated to the value $s'(x)$ by the probabilistic update - recalldefinition \ref{def:prob_update}, third that the temporary variables are
the configuration $c$ with $c' = (\ell', t', s')$ and $t' = (\ell, p, \tau,\eta, \_, \ell') \in g$ depends on three things: First the transition $t \in g$is taken with the probability $p$, second the probability that each programvariable $x\in\PV$ is updated to the value $s'(x)$ by the probabilistic update -recall definition \ref{def:prob_update}, third that the temporary variables are
$\prSs(f) > 0$. A run $c_0c_1\dots \in \runs$ is \textit{admissible} if andonly if every finite prefix $c_0\dots c_n \in\fpath_\Prog$ is admissible.
$\prSs(f) > 0$. A run $c_0c_1\dots \in \runs_\Prog$ is \textit{admissible}if and only if every finite prefix $c_0\dots c_n \in\fpath_\Prog$ isadmissible.
program is in the terminal location $l_\bot$ and the following configurationmust again be in the terminal location $l_\bot$. A terminating run must end with
program is in the terminal location $\ell_\bot$ and the following configurationmust again be in the terminal location $\ell_\bot$. A terminating run must end with
For a scheduler $\scheduler$ and a start location $s_0 \in \Sigma$ theexpected runtime complexity $\Rt_{\scheduler,s_0}(\Prog)$ of a program$\Prog$ is defined as the expected value of the runtime complexity.
For a scheduler $\scheduler \in MDS_\Prog$ and a start location $s_0 \in\Sigma$ the expected runtime complexity $\Rt_{\scheduler,s_0}(\Prog)$ of a\gls{pip} $\Prog$ is defined as the expected value of the runtimecomplexity.
for a given starting state $s_0$ and $scheduler \in \MDS$, the expectedruntime depends on the initial variable $x$. The probability for decrementing$x$ once follows a geometric distribution. The runtime for decrementing $x$until zero ($x$ times) is
for a given starting state $s_0 \in \Sigma$ and $scheduler \in \MDS$, theexpected runtime depends on the initial variable $x$. The probability fordecrementing $x$ once follows a geometric distribution. The runtime fordecrementing $x$ until zero ($x$ times) is
For $s_0\in\Sigma$ and a \gls{pip} $\Prog$, $\RB \in \B$ is a expected runtimebound for $s_0$, if
For $s_0\in\Sigma$ and a \gls{pip} $\Prog$, $\RB \in \B$ is a expectedruntime bound if
case for a scheduler $\scheduler$ and a starting state $s_0$ there is only asingle run $c_0c_1\dots$ with probability $\PrSs(c_0c_1\dots) = 1$ and theexpected runtime defines the runtime of the program.
case for a scheduler $\scheduler$ and a initial state $s_0 \in \Sigma$ there isonly a single run $c_0c_1\dots \in \runs_\Prog$ with probability$\PrSs(c_0c_1\dots) = 1$ and the expected runtime defines the runtime of theprogram.
\subsection{Graph Operations}The operations $\Tin : \Loc \rightarrow 2^\T$, $\Tout : \Loc \rightarrow 2^\T$,$\GTin : \Loc \rightarrow 2^\GT$, and $\GTout: \Loc \rightarrow 2^\GT$ returnthe set of (general) transitions starting or ending in the given location. A\gls{pip} $\Prog$ describes a graph $G_\Prog$ with vertices $\Loc_\Prog$ andedges $\T_\Prog$. Let $\G$ be the set of graphs over vertices $\Loc$ andtransitions $\T$. Adding a transition and possibly new vertices(locations)therein to a graph is denoted by the plus operator $+: \G \times \T \rightarrow\G$. Removing a transitions from a graph, while preserving the vertices therein,is denoted by the minus operator $- : \G \times \T \rightarrow \G$.\begin{definition}[\acrfull{scc}]A set of transitions $\S \subseteq \T_\Prog$ is called a \emph{component} of$\Prog$ with locations $V(\S)= \set{\ell, \ell'}{(\ell, \_, \_, \_,\ell') \in\S}$. A component $\S \subseteq \T_\Prog$ is \emph{strongly connected} whenfor every two locations $\ell, \ell' \in V(\S)$ there exists a path betweenthe two using only the transitions from $\S$.\end{definition}% -----
Adding a transition and possibly new locations therein to graph is denoted bythe plus operator $+: \G \times \T \rightarrow \G$\subsection{Strongly connected components}A given \gls{pip} induces two relations. The first one being the relation if atransition exists from one location to another and the other one describes if apath exists from one location to another.
% \begin{figure}% \centering% \begin{subcaptionblock}[t]{0.4\textwidth}% \centering% \input{figures/ch1_classic}% \caption{Classic integer program $\Prog_1$\label{fig:classic}}% \end{subcaptionblock}% \begin{subcaptionblock}[t]{0.5\textwidth}% \centering% \input{figures/ch1_classic_pe}% \caption{(UNFINISHED) Partial evaluation of% $\Prog_1$\label{fig:classic_pe}}% \end{subcaptionblock}% \caption{Program with hard to find runtime-complexity bounds using only% \gls{mprf}.\cite{giesl2022arxiv}}% \end{figure}Consider the program $\Prog_1$ displayed in \fref{fig:classic}. It contains twoprogram variables $x, y$ and one temporary variables $u$ which is used to
Consider the program $\Prog_1$ displayed in \fref{fig:classic_c}. It containstwo program variables $x, y$ and one temporary variables $u$ which is used to
clearly terminates. The first loop increments $x$ up to 3 if $x$ ispositive to begin with. The second loop decrements $y$ while $y$ is positive.However, the classical analysis using only \gls{mprf} fails to find a finite
clearly terminates. The first loop increments $x$ up to 3 if $x$ is positive tobegin with. The second loop decrements $y$ while $y$ is positive. However, theclassical analysis using only \gls{mprf} fails to find a finite
of \gls{cfr} on sub-\gls{scc} level, the program (see \fref{fig:classic_pe}) canbe transformed into an equivalent program where the analysis with \gls{mprf}succeeds at finding a finite runtime-complexity bound.
of \gls{cfr} on sub-\gls{scc} level, the program (see \fref{fig:classic_pe_c})can be transformed into an equivalent program where the analysis with\gls{mprf} succeeds at finding a finite runtime-complexity bound.
\fref{fig:prob}. The variables $x$ and $y$ are now only in/-decremented with50\% probability in every iteration. The program will probably always terminate,since a longer runtime gets increasingly unlikely. The current version of\gls{koat} fails to find finite runtime and expected complexity bounds for thisprogram. It would be nice to transform $\Prog_2$ into an equivalent program likethe one shown in \fref{fig:prob_pe} similarly to the \gls{cfr} techniquepresented by \citeauthor{giesl2022lncs}\cite{giesl2022lncs} and\citeauthor{Domenech19}\cite{Domenech19}.
\fref{fig:prob_c}. The variables $x$ and $y$ are now only in/-decremented with50\% probability in every iteration. The program will probably alwaysterminate, since a longer runtime gets increasingly unlikely. The currentversion of \gls{koat} fails to find finite runtime and expected complexitybounds for this program. It would be nice to transform $\Prog_2$ into anequivalent program like the one shown in \fref{fig:prob_pe_c} similarly to the\gls{cfr} technique presented by \citeauthor{giesl2022lncs}\cite{giesl2022lncs}and \citeauthor{Domenech19}\cite{Domenech19}.
RWTH University and aim at the finding lower and respectively upperruntime-complexity and size bounds on transition systems. At its core, KoATcomputes bounds for subprograms using \gls{mprf} or \gls{twn}, and then puttingthe subprograms together generate upper runtime-complexity and size bounds forthe whole program. Optionally to improve the analysis, \gls{koat} can resort topartial evaluation as a control-flow refinement techniqueson a sub-\gls{scc} level\cite{giesl2022lncs}.
RWTH University and aim at finding lower and respectively upperruntime-complexity bounds on transition systems. At its core, KoAT computesbounds for subprograms using \gls{mprf} or \gls{twn}, and then putting thesubprograms together in order to generate upper runtime-complexity and sizebounds for the whole program. Optionally to improve the analysis, \gls{koat} canresort to partial evaluation as a control-flow refinement techniques on asub-\gls{scc} level\cite{giesl2022lncs}.\gls{apron} is the overall tool, taking part in competitions. It works onvarious input formats and transforms them accordingly to be used withspecialized tools. \gls{koat} and \gls{loat} under the hood of \gls{apron} forthe overall analysis.
independent consecutive loops. iRankFinder successfully implemented thecontrol-flow refinement technique by partial evaluation proposed by
independent consecutive loops. iRankFinder implemented the control-flowrefinement technique by partial evaluation proposed by
\Sref{sec:preliminaries}, focusing especially on constraints, probability, and(probabilistic) integer programs. In \Sref{sec:theory} the \gls{cfr} techniquesfrom iRankFinder are adapted to \Gls{pip}, showing the proving the theoreticalcorrectness of the construction. \Sref{sec:implementation} will focus on thechanges made to \gls{koat} and the challenges met during implementation of thetechnique in OCaml. Finally in \Cref{sec:conclusion} the new version of\gls{koat} will be compared to its predecessor with regard to the foundruntime-complexity and size bounds as well as the overall computationalperformance of the tool using the problems from the \gls{tpdb}.
\Cref{ch:preliminaries}, focusing especially on constraints, probabilitytheory, and (probabilistic) integer programs.In \Cref{ch:theory} the \gls{cfr} techniques from iRankFinder are adapted to\Glspl{pip}. We will prove the theoretical correctness of the construction.Furthermore, the technique of sub-\gls{scc} level control-flow-refinement ispresented.\Cref{ch:implementation} will focus on the changes made to \gls{koat}, and theperformance improvements made with the new implementation. Finallyin \Cref{ch:conclusion} we will recapitulate the results of this thesis and givean outlook on future research.
\newacronym{fvs}{FVS}{Feedback Vertex Set}