YUEGUKXBV6IKGZPTBXJXW2ANYGYSRJILQFLYCYQUYD54LJVZS4CQC KDY46C5VOI2QWEXPE6J6BW3U2KEMH3IYPUNYKH5FAISTRCWBWO3AC 5GR6GP7ONTMALU3XHWDTWWIDIW2CGYOUOU6M3H7WMV56KCBI25AAC HF25ZQRDZXNE2IM3YGE3NAPWGDE3U252H764IBIJSIZS3LRLYIMAC 567NUQ633R5ESTE2LWXVUGM7XEZGJ65IRKPOCDZJS3JE5RAMR5KAC FKCEFKZYPSWFLC4GEG3L46BFTNSAMNMFD4VAGU6DADWAHYYIQIMQC 6PTHERE26NXSC3ZXSI5UYKWTMPYZD4WMYFBJKZE3MRRTKC7MIU6QC Q3TJZVCBWD7FDMS4MWTT7VUNOI3R3MQCOU6JJCJ5UVIDJPXKMLWAC R2A2OD7J5ATBRR7W4QPTZDVH6GGHQ7XBX2JE5R3GKVS5WIQ43O3AC 7JUQQYQCE7G3ESFJETNGEPBCYGR5NGMAWHFPWI24RSQJPFM5YCTQC NEWG2Q2EYR3BOPWVN44EXIPVH2GH23BYFHJXUSNYWWT7WWDEAIKQC KVSF5AFJEIMIYAC264OUAA56SGP5MFAUG3AECKOTUPIXPGULVFQQC QIVGMR6INKBVT5IFCO2UJJQEO4PVW3ZM7IDTCTCOKOX7VKG7UPFAC GNUONJQNZG2MNEE55RW3XOQP3UICEBOKLVLKV42H3K64IBGLUGIQC QCVDPRXH57UFLU4YV3P2I5RQZGCBUKROYUZHDWOCHL43KDYN7MPAC TKJMPTB73W5TLLTNUOC4FGRQ7DHE5GRBTL7C5QBXRCNQPMJKGEQAC RVA3DOCNBWFBWA2MLC2TVQOEY25AUCYGEJSWMD72QRQ26AFLVZUAC BGMT6NIKIDEGQ74DC4LPVBR7JCUYHHQBOZXHYXMO5F2UVEQ3N47QC M3WK5HF3XU5MN2JKJMRJVDMC74F4MZAQ23FTGJRW4CGU5ZL72MHAC YS5O2UANXCWRI3W7KMRKKK7OT7R6V3QDL4JS4DRLXM2EZUIPFALAC A5NVU6DZYOG6GRXJU6DZLCUQTKA7LPUWOGQ4NCJSY6FJB6GSP3OAC EIPHTX56WTRFBMBNA4VYMKK4L7GWHA4Q2AQ3BCG2ZGZB3DBRENBQC UVN5HOW3H2OVESJ62UFNQNS6VXHI5TRV22HQ44EOYU42453SDD2QC DDNESFMD4LGNTDGTCW27FQPCOR5Y5MJWN2TWCSIRNBK6QGBITBRQC KUEZAMEH2M3K6EDHWMYXQMHZ2QTTH4KBM5CO7A7EB64ABEWRE6WQC QMVJ2ZKX5ALAMMIL7E47ZQS67QFZJ7Z3HAY5G7X72KPEQEHWRGCQC 45HZPHVHYJ2BXATQ4MKCMPMMPZFBNJECY7JV3WX63BSA4CSQN6HAC MQJMCLGFSBSFJ3JPCIHQSBZS4XZPGJI7NGGRASX5UVGJO6HES25AC % TeX root = ../main.tex\begin{tikzpicture}[tree]\node[state,initial] (l0_0) {$\langle l_0, \textit{true}\rangle$};\node[state] (l1_0) [below=1.2cm of l0_0] {$\langle l_1, \textit{true}\rangle$};\node[state] (l2_0) [below right=of l1_0,anchor=west] {$\langle l_2, x>0 \rangle$};\node[state] (l3_0) [below left=of l1_0,anchor=east] {$\langle l_3, x\leq0 \rangle$};\node[state] (l1_1) [below left=of l2_0] {$\langle l_1, x \geq 0, y\leq 0\rangle$};\node[state] (l1_2) [below right=of l2_0] {$\langle l_1, x > 0, y \geq 0\rangle$};\node[state] (l3_1) [below left=of l1_1,anchor=east] {$\langle l_3, x=0,y\leq 0\rangle$};\node[state] (l2_2) [below right=of l1_1] {$\langle l_2, x > 0,y\leq 0\rangle$};% \node[state] (l2') [below left=of l1] {$\langle l_2, x \leq 0\rangle$};% \node[state] (l3) [below right=of l2] {$\langle l_1, x > 2 \rangle$};% \node[state] (l3') [below left=of l2] {$\langle l_2, \textit{false}\rangle$};% \node[state] (l4) [below right=of l3] {};% \node[state] (l4') [below left=of l3] {};\path[->] (l0_0) edge node {$\textit{true}$} (l1_0)(l1_0) edge node[ ]{$x > 0$} (l2_0)edge node[swap]{$x \leq 0$ } (l3_0)(l2_0) edge node[swap,anchor=east]{$y \leq 0, x := x-1$} (l1_1)edge node[anchor=west]{$y > 0, y:=y-1$ } (l1_2)% (l2) edge node[swap]{$x \leq 0$} (l3')% edge node[] {$x > 0, x := x+1$ } (l3);% \path[dotted,->]% (l3) edge (l4')% edge (l4);\end{tikzpicture}
% TeX root = ../main.tex\todo{unfinished}\begin{tikzpicture}[program]\node[state,initial] (l0) {$l_0$};\node[state] (l1) [below=of l0] {$l_1$};\node[state] (l2) [right=of l1] {$l_2$};\node[state] (l3) [right=of l2] {$l_3$};\node[state] (l4) [below=of l1] {$l_4$};\path[->] (l0) edge node[swap] {$\textit{true}$} (l1)(l1) edge node[swap] {$x > 0$} (l2)edge node[swap] {$x \leq 0$} (l4)(l2) edge node[swap] {$y > 0$} (l3)edge node[swap] { \begin{aligned}y &\leq 0 \\x &:= x-1 \\z &:= z+2 \\\end{aligned}} (l1)(l3) edge node[swap] { \begin{aligned}%x &:= x+y \\y &:= y-1\end{aligned}} (l3);\end{tikzpicture}
% TeX root = ../main.tex\begin{tikzpicture}[program,initial above,font=\scriptsize]\node[state,initial] (l0) {$l_0$};\node[state] (l1) [below=2cm of l0] {$l_1$};\node[state] (l1a) at ($(l1) - (3,1.5)$) {$l_{1a}$};\node[state] (l2) [below=3cm of l1] {$l_2$};\path[->](l0) edge node { $t_0: \eta(x) = u $ } (l1)(l1) edge [out=140,in=120,looseness=2,draw=rot-75]node[below=0.3cm,pos=0.35] {$t_{1a}:\begin{aligned} \tau = &1 \leq x \leq 3\\ &\Land w = 0 \\ \eta(x) &= x + 1\end{aligned}$} (l1a)(l1) edge [out=130,in=130,looseness=2,draw=rot-75] node[above,pos=0.4] {$t_{1b}:\begin{aligned} \tau = &1 \leq x \leq 3\\ &\Land w = 0 \\ \eta(x) &= x + 1\end{aligned}$} (l1a)(l1a) edge [out=150,in=210,looseness=10] node[swap]{$\begin{aligned}t_{1b}:& \\ \tau = &1 \leq x \leq 3\\ &\Land w = 0 \\ \eta(x) &= x + 1\end{aligned}$} (l1a)(l1a) edge [bend right=15] node[swap] {$t_{2b}: \begin{aligned}\tau &= y > 0 \\&\Land w = 1\end{aligned}$} (l2)(l1) edge[bend left =20] node {$\begin{aligned}t_{2a}:& \\ \tau =& y > 0 \\\Land& w = 1\end{aligned}$} (l2)(l2) edge[bend left=20] node {$\begin{aligned}&t_3:\\ &\eta(y) = y - 1\end{aligned}$} (l1)% (l1) edge node {$t_4:x \leq 0$} (l2)% edge[loop above] node {$t_2:\begin{aligned} x &> 1 \\ x&:=% x+1\end{aligned}$} ()% edge[loop below] node {$t_3:\begin{aligned} x &> 0 \\ x&:=% x-1\end{aligned}$} ();\end{tikzpicture}
% TeX root = ../main.tex\begin{tikzpicture}[program]\node[state,initial] (l0) {$l_0$};\node[state] (l1) [below = of l0] {$l_1$};\node[state] (l2) [right= 3cm of l1] {$l_2$};\node[state] (l3) [below = of l1] {$l_3$};\path[->] (l0) edge node[swap] {$\textit{true}$} (l1)(l1) edge node {$x > 0$} (l2)edge node[swap] {$x \leq 0$} (l3)(l2) edge[bend left=45]node[anchor=north] {$t:\begin{aligned}y &> 0\\ y&:= y-1\end{aligned}$} (l1)edge[bend right=45]node[anchor=south] {$\begin{aligned}y &\leq 0\\ x&:=x-1\end{aligned}$} (l1);\end{tikzpicture}
% TeX root = ../main.tex\begin{tikzpicture}[program]\node[state,initial] (l0) {$l_0$};\node[state] (l1) [below=of l0] {$l_1$};\node[state] (l2) [right=of l1] {$l_2$};\node[state] (l3) [right=of l2] {$l_3$};\node[state] (l4) [below=of l1] {$l_4$};\path[->] (l0) edge node[swap] {$\textit{true}$} (l1)(l1) edge node[swap] {$x > 0$} (l2)edge node[swap] {$x \leq 0$} (l4)(l2) edge node[swap] {$y > 0$} (l3)edge node[swap] { \begin{aligned}y &\leq 0 \\x &:= x-1 \\z &:= z+2 \\\end{aligned}} (l1)(l3) edge node[swap] { \begin{aligned}%x &:= x+y \\y &:= y-1\end{aligned}} (l3);\end{tikzpicture}
% TeX root = ../main.tex\begin{tikzpicture}[tree]\node[state] (l1) [below=1.2cm of l0] {$\langle l_1, \textit{true}\rangle$};\node[state] (l4) [below right=of l3] {};\node[state] (l4') [below left=of l3] {};(l2) edge[gray]node[gray,swap]{$x \leq 0$} (l3');\path[dotted,->](l3) edge[gray] (l4')edge (l4);\end{tikzpicture}edge node[] {$\tau = x > 0; \eta(x) = x+1$ } (l3)\path[->] (l0) edge node {$\tau = \textit{true}$} (l1)(l1) edge node[swap]{$\tau = x \leq 0$} (l2')edge node {$\tau = x > 0; \eta(x) = x+1$ } (l2)\node[state] (l2) [below right=of l1] {$\langle \ell_1, x > 1 \rangle$};\node[state] (l2') [below left=of l1] {$\langle \ell_2, x \leq 0\rangle$};\node[state] (l3) [below right=of l2] {$\langle \ell_1, x > 2 \rangle$};\node[state] (l3') [below left=of l2,gray]{$\langle \ell_2, \textit{false}\rangle$};\node[state,initial] (l0) {$\langle \ell_0, \textit{true}\rangle$};
% TeX root = ../main.tex\begin{tikzpicture}[program]edge [loop right] node {%$t_2: \begin{aligned}\end{aligned}$ } ();\end{tikzpicture}\tau &= x > 0 \\ \eta(x)&= x+1\path[->] (l0) edge node[swap] {$t_1: \tau = \texttt{true}$} (l1)(l1) edge node[swap] {$t_3: \tau = x \leq 0$} (l2)\node[state,initial] (l0) {$\ell_0$};\node[state] (l1) [below = of l0] {$\ell_1$};\node[state] (l2) [below = of l1] {$\ell_2$};
% TeX root = ../main.tex\begin{tikzpicture}[program,initial above,font=\scriptsize]\path[->](l1) edge [out=110,in=150,looseness=10,draw=rot-75]\Land w = 0\end{aligned}$} (l1)(l1) edge [out=160,in=200,looseness=10,draw=rot-75]\Land w = 1\end{aligned}$} (l2)(l2) edge[bend left=30] node {$t_{3}:\begin{aligned}\end{aligned}$} (l1)% (l2) edge[bend left=30,draw=rwth-75] node[swap] {$t_{3a}: p=0.5$} (l1)% (l1) edge node {$t_4:x \leq 0$} (l2)% edge[loop above] node {$t_2:\begin{aligned} x &> 1 \\ x&:=% x+1\end{aligned}$} ()% edge[loop below] node {$t_3:\begin{aligned} x &> 0 \\ x&:=% x-1\end{aligned}$} ();\end{tikzpicture}p&=0.5 \\ \eta_3(y) &= y - \textsc{Bern}(0.5)node[swap]{$ t_{1b}: \begin{aligned}p&=0.5 \\ \tau_1&= 1 \leq x \leq 3\Land w = 0 \\ \eta_{1b}(x)&= x + 1\end{aligned}$} (l1)(l1) edge[bend left =30] node {$t_{2}:\begin{aligned}\tau_2 =& y > 0node[swap]{$t_{1a}:\begin{aligned} p&=0.5 \\ \tau_1&= 1 \leq x \leq 3(l0) edge node { $t_0: \eta_0(x) = u $ } (l1)\node[state,initial] (l0) {$\ell_0$};\node[state] (l1) [below=2cm of l0] {$\ell_1$};\node[state] (l2) [below=3cm of l1] {$\ell_2$};
% TeX root = ../main.tex\begin{tikzpicture}[tree,font=\scriptsize,every state/.append style={rectangle,rounded corners=10}]\node[state,initial] (l0) {$\langle \ell_0, \texttt{true}\rangle$};\node[state] (l1) [below=1.2cm of l0] {$\langle l_1, \textit{true}\rangle$};\node[state] (l2) [below right=of l1] {$\langle \ell_1, x > 1 \rangle$};\node[state] (l2') [below left=of l1] {$\langle \ell_2, x \leq 0\rangle$};\node[state] (l3) [below right=of l2] {$\langle \ell_1, x > 2 \rangle$};\node[state] (l3') [below left=of l2,gray]{$\langle \ell_2, \texttt{false}\rangle$};\node[] (l4) [below right=of l3] {};\node[] (l4') [below left=of l3] {};\path[->] (l0) edge node {$\tau = \texttt{true}$} (l1)(l1) edge node[swap]{$\tau = x \leq 0$} (l2')edge node {$\tau = x > 0; \eta(x) = x+1$ } (l2)(l2) edge[gray]node[gray,swap]{$x \leq 0$} (l3')edge node[] {$\tau = x > 0; \eta(x) = x+1$ } (l3);\path[dotted,->](l3) edge[gray] (l4')edge (l4);\end{tikzpicture}
% TeX root = ../main.tex\begin{tikzpicture}[program,font=\scriptsize]\node[state,initial] (l0) {$\ell_0$};\node[state] (l1) [below = of l0] {$\ell_1$};\node[state] (l2) [below = of l1] {$\ell_2$};\path[->] (l0) edge node[swap] {$t_1: \tau = \texttt{true}$} (l1)(l1) edge node[swap] {$t_3: \tau = x \leq 0$} (l2)edge [loop right] node {%$t_2: \begin{aligned}\tau &= x > 0 \\ \eta(x)&= x+1\end{aligned}$ } ();\end{tikzpicture}
% TeX root = ../main.tex\begin{tikzpicture}[program,initial above,font=\scriptsize]\node[state,initial] (l0) {$\ell_0$};\node[state] (l1) [below=2cm of l0] {$\ell_1$};\node[state] (l2) [below=3cm of l1] {$\ell_2$};\path[->](l0) edge node { $t_0: \eta_0(x) = u $ } (l1)(l1) edge [out=110,in=150,looseness=10,draw=rot-75]node[swap]{$t_{1a}:\begin{aligned} p&=0.5 \\ \tau_1&= 1 \leq x \leq 3\Land w = 0\end{aligned}$} (l1)(l1) edge [out=160,in=200,looseness=10,draw=rot-75]node[swap]{$ t_{1b}: \begin{aligned}p&=0.5 \\ \tau_1&= 1 \leq x \leq 3\Land w = 0 \\ \eta_{1b}(x)&= x + 1\end{aligned}$} (l1)(l1) edge[bend left =30] node {$t_{2}:\tau_2 = y > 0\Land w = 1$} (l2)(l2) edge[bend left=30] node {$t_{3}:\begin{aligned}p&=0.5 \\ \eta_3(y) &= y - \textsc{Bern}(0.5)\end{aligned}$} (l1)% (l2) edge[bend left=30,draw=rwth-75] node[swap] {$t_{3a}: p=0.5$} (l1)% (l1) edge node {$t_4:x \leq 0$} (l2)% edge[loop above] node {$t_2:\begin{aligned} x &> 1 \\ x&:=% x+1\end{aligned}$} ()% edge[loop below] node {$t_3:\begin{aligned} x &> 0 \\ x&:=% x-1\end{aligned}$} ();\end{tikzpicture}
% TeX root = ../main.tex\begin{tikzpicture}[tree,initial above,font=\scriptsize,every state/.append style={rectangle,rounded corners=10}]\node[state,initial] (l0) {$\langle\ell_0,\texttt{true}\rangle$};\node[state] (l1) [below=3cm of l0] {$\langle\ell_1,\texttt{true}\rangle$};\node[state] (l2) [below=3cm of l1] {$\langle\ell_2,\texttt{true}\rangle$};\path[->](l0) edge node { $t_0: \eta_0(x) = u $ } (l1)(l1) edge [out=110,in=150,looseness=6,draw=rot-75]node[swap]{$t_{1a}:\begin{aligned} p&=0.5 \\ \tau_1&= 1 \leq x \leq 3\Land w = 0\end{aligned}$} (l1)(l1) edge [out=160,in=200,looseness=6,draw=rot-75]node[swap]{$ t_{1b}: \begin{aligned}p&=0.5 \\ \tau_1&= 1 \leq x \leq 3\Land w = 0 \\ \eta_{1b}(x)&= x + 1\end{aligned}$} (l1)(l1) edge[bend left =30] node {$t_{2}:\begin{aligned}\tau_2 =& y > 0\Land w = 1\end{aligned}$} (l2)(l2) edge[bend left=30] node {$t_{3}:\begin{aligned}p&=0.5 \\ \eta_3(y) &= y - \textsc{Bern}(0.5)\end{aligned}$} (l1);\end{tikzpicture}
% TeX root = ../main.tex\begin{tikzpicture}[program,initial above,font=\scriptsize,every state/.append style={rectangle,rounded corners=10},]\node[state,initial] (l0t) {$\langle \ell_0, \texttt{true} \rangle$};\node[state] (l1t) [below=2cm of l0t] {$\langle \ell_1, \texttt{true} \rangle$};\node[state] (l1p134) [below=2cm of l1t] {$\langle \ell_1, \lbrace \psi_1, \psi_3, \psi_4 \rbrace \rangle$};\node[state] (l1p124) [right=4cm of l1p134] {$\langle \ell_1, \lbrace \psi_1, \psi_2, \psi_4 \rbrace \rangle$};\node[state] (l2py) [left=4cm of l1p134] {$\langle \ell_2, y > 0 \rangle$};\node[state] (l2p134y) [below=2cm of l1p134] {$\langle \ell_2, \psi_1 \land \psi_3 \land \psi_4 \land y>0 \rangle$};\node[state] (l2p124y) [right=4cm of l2p134y] {$\langle \ell_2, \psi_1 \land \psi_2 \land \psi_4 \land y>0 \rangle$};\node[state] (l1p56) [left=4cm of l2p1345] {$\langle \ell_1, \lbrace \psi_5, \psi_6 \rbrace \rangle$};\node[state,] (l1p13456) [below=2cm of l2p1345] {$\langle \ell_1, \lbrace \psi_1, \psi_3, \psi_4, \psi_5, \psi_6 \rbrace \rangle$};\node[state,] (l1p12456) [right=4cm of l1p13456] {$\langle \ell_1, \lbrace \psi_1, \psi_2, \psi_4, \psi_5, \psi_6 \rbrace \rangle$};% Neu in Bild 2\node[state] (l2p13456y) [below=2.5cm of l1p13456] {$\left\langle\ell_2,\begin{aligned} \psi_1 &\land \psi_3 \land \psi_4 \land\\ \psi_5 &\land \psi_6 \land y > 0 \end{aligned}\right\rangle$};\node[state] (l2p56y) [below=3cm of l1p56] {$\langle \ell_2, \psi_5 \land \psi_6 \land y > 0 \rangle$};\node[state] (l2p12456y) [right=4cm of l2p13456y] {$\left\langle\ell_2, \begin{aligned}\psi_1 &\land \psi_2 \land \psi_4 \land \\ \psi_5 &\land \psi_6 \land y > 0 \end{aligned}\right\rangle$};% Transitions\path[->](l0t) edge[] node (t01) { $t_{0,1}$ } (l1t)(l1t) edge[] node[swap] (t21) { $t_{2,1}$ } (l2p5)(l1t) edge[bend right=20] node[swap] (t1a1) { $t_{1a,1}$ } (l1p134)(l1t) edge[] node (t1b1) { $t_{1b,1}$ } (l1p124) %FEHLT IN DER SKIZZE(l2py) edge[] node[swap] (t31) { $t_{3,1}$ } (l1p56)(l1p134) edge[bend right=10] node[swap] (t1b2) { $t_{1b,2}$ } (l1p124)(l1p134) edge[loop above,transform canvas={xshift=5mm}] node[swap] (t1a2) { $t_{1a,2}$ } ()(l1p134) edge[] node (t22) { $t_{2,2}$ } (l2p134y)(l1p124) edge[bend right=10] node[swap] (t1a3) { $t_{1a,3}$ } (l1p134)(l1p124) edge[loop above] node (t1b3) { $t_{1b,3}$ } ()(l1p124) edge[] node (t23) { $t_{2,3}$ } (l2p124y)(l1p56) edge[bend right=30] node (t1a6) { $t_{1a,6}$ } (l1p13456)(l1p56) edge[out=290,in=205] node[swap,pos=.40] (t1b6) { $t_{1b,6}$ } (l1p12456)(l2p134y) edge[] node[swap] (t32) { $t_{3,2}$ } (l1p13456)(l2p124y) edge[] node[swap] (t33) { $t_{3,3}$ } (l1p12456)(l1p13456) edge[loop above,transform canvas={xshift=12mm}] node[swap] (t1a4) { $t_{1a,4}$ } ()(l1p12456) edge[loop above,transform canvas={xshift=12mm}] node[swap] (t1b5) { $t_{1b,5}$ } ()(l1p13456) edge[bend left=10] node (t1b4) { $t_{1b,4}$ } (l1p12456)(l1p12456) edge[bend left=10] node (t1a5) { $t_{1a,5}$ } (l1p13456)% (l1p56) edge[rot, bend right=20] node[swap] (t25) { $t_{2,5}$ } (l2p5)(l1p56) edge[bend left=15,transform canvas={xshift=-4mm}] node[] (t26) { $t_{2,6}$ } (l2p56y)(l2p56y) edge[bend left=15,transform canvas={xshift=-4mm}] node[] (t36) { $t_{3,6}$ } (l1p56)% (l1p13456) edge[bend right=20] node[swap] (t24) { $t_{2,4}$ } (l2p1345)(l1p13456) edge[bend left=20] node[pos=.55] (t26) { $t_{2,4}$ } (l2p13456y)(l2p13456y) edge[bend left=20] node[pos=.45] (t34) { $t_{3,4}$ } (l1p13456)% (l1p12456) edge[bend right=20] node[swap] (t26) { $t_{2,6}$ } (l2p1245)(l1p12456) edge[bend right=20] node[swap] (t25) { $t_{2,5}$ } (l2p12456y)(l2p12456y) edge[bend right=20] node[swap] (t35) { $t_{3,5}$ } (l1p12456);\end{tikzpicture}
% TeX root = ../main.tex\begin{tikzpicture}[program,initial above,font=\scriptsize,every state/.append style={rectangle,rounded corners=10},every initial by arrow/.style={grun}]\node[state,initial,grun] (l0t) {$\langle \ell_0, \texttt{true} \rangle$};\node[state,grun] (l1t) [below=2cm of l0t] {$\langle \ell_1, \texttt{true} \rangle$};\node[state,rwth] (l1p134) [below=2cm of l1t] {$\langle \ell_1, \lbrace \psi_1, \psi_3, \psi_4 \rbrace \rangle$};\node[state,rwth] (l1p124) [right=4cm of l1p134] {$\langle \ell_1, \lbrace \psi_1, \psi_2, \psi_4 \rbrace \rangle$};\node[state,rwth] (l2p5) [left=4cm of l1p134] {$\langle \ell_2, \lbrace \psi_5 \rbrace \rangle$};\node[state,rot] (l2p1345) [below=2cm of l1p134] {$\langle \ell_2, \lbrace \psi_1, \psi_3, \psi_4, \psi_5 \rbrace \rangle$};\node[state,rot] (l2p1245) [right=4cm of l2p1345] {$\langle \ell_2, \lbrace \psi_1, \psi_2, \psi_4, \psi_5 \rbrace \rangle$};\node[state,rot] (l1p56) [left=4cm of l2p1345] {$\langle \ell_1, \lbrace \psi_5, \psi_6 \rbrace \rangle$};\node[state,] (l1p13456) [below=2cm of l2p1345] {$\langle \ell_1, \lbrace \psi_1, \psi_3, \psi_4, \psi_5, \psi_6 \rbrace \rangle$};\node[state,] (l1p12456) [right=4cm of l1p13456] {$\langle \ell_1, \lbrace \psi_1, \psi_2, \psi_4, \psi_5, \psi_6 \rbrace \rangle$};% Transitions\path[->](l0t) edge[grun] node (t01) { $t_{0,1}$ } (l1t)(l1t) edge[rwth] node[swap] (t21) { $t_{2,1}$ } (l2p5)(l1t) edge[rwth,bend right=20] node[swap] (t1a1) { $t_{1a,1}$ } (l1p134)(l1t) edge[rwth] node (t1b1) { $t_{1b,1}$ } (l1p124)(l2p5) edge[rot, bend right=20] node[swap] (t31) { $t_{3,1}$ } (l1p56)(l1p134) edge[rot] node (t22) { $t_{2,2}$ } (l2p1345)(l1p134) edge[rot, bend right=10] node[swap] (t1b2) { $t_{1b,2}$ } (l1p124)(l1p134) edge[rot,loop above,transform canvas={xshift=5mm}] node[swap] (t1a2) { $t_{1a,2}$ } ()(l1p134) edge[rot] node (t22) { $t_{2,2}$ } (l2p1345)(l1p124) edge[rot, bend right=10] node[swap] (t1a3) { $t_{1a,3}$ } (l1p134)(l1p124) edge[rot,loop above] node (t1b3) { $t_{1b,3}$ } ()(l1p124) edge[rot] node (t23) { $t_{2,3}$ } (l2p1245)(l1p56) edge[rot, bend right=20] node[swap] (t25) { $t_{2,5}$ } (l2p5)(l1p56) edge[bend right=30] node (t1a5) { $t_{1a,5}$ } (l1p13456)(l1p56) edge[out=270,in=210] node[swap] (t1b5) { $t_{1b,5}$ } (l1p12456)(l2p1345) edge[bend right=20] node[swap] (t32) { $t_{3,2}$ } (l1p13456)(l1p13456) edge[bend right=20] node[swap] (t24) { $t_{2,4}$ } (l2p1345)(l2p1245) edge[bend right=20] node[swap] (t33) { $t_{3,3}$ } (l1p12456)(l1p12456) edge[bend right=20] node[swap] (t26) { $t_{2,6}$ } (l2p1245)(l1p13456) edge[loop above,transform canvas={xshift=12mm}] node[swap] (t1a4) { $t_{1a,4}$ } ()(l1p12456) edge[loop above,transform canvas={xshift=12mm}] node[swap] (t1b6) { $t_{1b,6}$ } ()(l1p13456) edge[bend left=10] node (t1b4) { $t_{1b,4}$ } (l1p12456)(l1p12456) edge[bend left=10] node (t1a6) { $t_{1a,6}$ } (l1p13456);% General transitions\node[grun] [right=1cm of t01] {$G_1$};\node[rwth] [above right=1cm of t1b1] {$G_2$};\node[rot] [right=1cm of t23] {$G_3$};\end{tikzpicture}
% TeX root = ../main.tex\begin{tikzpicture}[program,initial above,font=\scriptsize,every state/.append style={rectangle,rounded corners=10},]\node[state,initial] (l0t) {$\langle \ell_0, \texttt{true} \rangle$};\node[state] (l1t) [below=2cm of l0t] {$\langle \ell_1, \texttt{true} \rangle$};\node[state] (l2t) [below=2cm of l1t] {$\langle \ell_2, \texttt{true} \rangle$};\node[state,grun] (l1p134) [right=4cm of l1t] {$\langle \ell_1, \lbrace \psi_1, \psi_3, \psi_4 \rbrace \rangle$};\node[state,grun] (l1p124) [right=4cm of l2t] {$\langle \ell_1, \lbrace \psi_1, \psi_2, \psi_4 \rbrace \rangle$};% Transitions\path[->](l0t) edge[] node (t0) { $t_{0}$ } (l1t)(l1t) edge[bend left=20] node (t1) { $t_{2}$ } (l2t)(l2t) edge[bend left=20] node (t2) { $t_{3}$ } (l1t)(l1t) edge[grun, bend left=10] node (t1a1) { $t_{1a,1}$ } (l1p134)(l1t) edge[grun, bend left=10] node[pos=0.35] (t1b1) { $t_{1b,1}$ } (l1p124)(l1p134) edge[rot, bend left=10] node[pos=0.65] (t21) { $t_{2,1}$ } (l2t)(l1p124) edge[rot, bend left=10] node (t22) { $t_{2,2}$ } (l2t)(l1p134) edge[rot,loop above] node (t1a2) { $t_{1a,2}$ } ()(l1p124) edge[rot,loop below] node (t1b3) { $t_{1b,3}$ } ()(l1p134) edge[rot, bend right=20,transform canvas={xshift=5mm}] node[swap] (t1b2) { $t_{1b,2}$ } (l1p124)(l1p124) edge[rot, bend right=20,transform canvas={xshift=5mm}] node[swap] (t1a3) { $t_{1a,3}$ } (l1p134);% General transitions\node[] [below right=1cm of l0t] {$G_0$};\node[grun] [above left=1cm of l1p134] {$G_1$};\node[rot] [below right=2cm of l1p134] {$G_2$};\end{tikzpicture}
\gls{koat}s analysis goal is to find tight upper bounds for \glspl{pip}. Theanalysis techniques used such as \acrfull{mprf}, have their limits and cannotalways find tight bounds for every program, which motivates control-flowrefinement in order to find an equivalent program to the original one, thatopens the possibility for the analysis to find tighter runtime-complexity
The goal of \gls{koat}'s analysis is to find tight upper bounds for \glspl{pip}.The analysis techniques used, such as \acrfullpl{mprf}, have their limits andcannot always find tight bounds for every program. This motivates control-flowrefinement in order to find a program that is equivalent to the original one,which opens the possibility for the analysis to find tighter runtime-complexity
Naturally the control-flow refinement will add and replace transitions andlocations, hence the number runtime-complexity bounds and size-bounds of eachtransition won't be preserved. However we will show, that each transition in therefined program is a similar to a transition in the original program and henceevery run in the new program has a run of equal runtime-complexity andprobability in the original program and vice-versa.
Naturally, the control-flow refinement will add and replace transitions andlocations. We will show that each transition in the refined program is similarto a transition in the original program and consequently every run in the newprogram has a run of equal run~time complexity and probability in the originalprogram and vice-versa.
evaluation technique presented by\citeauthor{Domenech19}\cite{Domenech19} for \acrlong{pip}. Thepartial evaluation will be constructed in a similar way but explicitly takingprobability and general transitions into account.
evaluation technique presented by \citeauthor{Domenech19} \cite{Domenech19} for\glspl{pip}. The partial evaluation will be constructed in a similar way butexplicitly taking probability and general transitions into account.
The construction is done in three steps. First the single evaluation step(unfolding) of transitions in a probabilistic program is presented. Second, apossibly infinite evaluation graph is constructed that will finally be collapsedinto a finite evaluation graph by the use of an abstraction layer.
The construction is done in three steps. First, an unfolding operation ispresented. Second, an evaluation step is presented, which uses the unfolding.Third, the evaluation step is repeated until a fix-point is found. The unfoldingand evaluation will be adapted multiple times in order to 1. find finitepartial evaluation graphs, and 2. evaluate only the relevant parts of theprogram.
Afterwards we will show that the partial evaluation is sound for \gls{pip} andprove that the expected runtime-complexity is preserved and thus also the
Afterwards, we will show that the partial evaluation is sound for \glspl{pip}and prove that the expected runtime complexity is preserved and thus also the
\{t_2\}$, and assigns the value for $w$ accordingly.The program can alter between the loops non-deterministically after everyiteration. Since both loops are independent of one another, thenon-determinism doesn't change anything about the expectedruntime-complexity of the program.On the general transition $g_1 = \{t_{1a}, t_{1b}\}$ at location $\ell_1$the program branches probabilistically, with a probability of $p=0.5$. Onthe transition $t_3$, the value with which $y$ is decremented is sampled
\{t_2\}$, and assigns the value for $w$ accordingly. The program canalternate between the loops non-deterministically after every iteration.Since both loops are independent of one another, the non-determinism doesnot change anything about the expected runtime complexity of the program. Onthe general transition $g_1 = \{t_{1a}, t_{1b}\}$ at location $\ell_1$ theprogram branches probabilistically, with a probability of $p=0.5$. On thetransition $t_3$, the value with which $y$ is decremented is sampled
impossible: First, the number of starting states is infinite; second the programis not guaranteed to terminate and hence the simulation of a specific run mightnot terminate either.For non-deterministic and probabilistic programs, the problem gets even morecomplex, since the transition taken does not only depend on the current state
impossible: First, the number of starting states is infinite, and second theprogram is not guaranteed to terminate, hence the simulation of a specific runmight not terminate either.For non-deterministic and probabilistic programs, the problem becomes even morecomplex, since the transitions taken do not only depend on the current state
simulating every single possible run, it would be nice to treat \emph{similar}states as one. Instead of simulating a single assignment, the set of allpossible assignments after a step is described by a set of constraints. Theresulting graph is called a \emph{partial evaluation graph}. It was formalizedfor \glspl{chc} by \citeauthor{gallagher2019eptcs}\cite{gallagher2019eptcs}. Inthis thesis the definitions are adapted to better fit the algorithm and theformalism of \Glspl{pip}.
simulating every single possible run, it would be more efficient to treat\emph{similar} states as one. Instead of simulating a single assignment, the setof all possible assignments after a step is described by a set of constraints.The resulting graph is called a \emph{partial evaluation graph}. It wasformalised for \glspl{chc} by\citeauthor{gallagher2019eptcs} \cite{gallagher2019eptcs}. In this thesis, thedefinitions are adapted to better fit the algorithm and the formalism of\Glspl{pip}.
\ell, \texttt{true}\rangle \in \vers_\Prog$. Obviously, the lift of a program isnot useful for the analysis, as it doesn't capture any interesting properties ofthe program, besides what is already known in the original program. In thefollowing we will present a construction for a partial evaluation graph usingunfolding and abstraction.
\ell, \texttt{true}\rangle \in \vers_\Prog$.\begin{definition}We define the lift $G\lift\in \PEG$ of a graph $G \in \G$ denoted as\begin{equation*}G\lift = (\set{\langle\ell,\texttt{true}\rangle}{\ell \in V(G)},\set{(\langle\ell,\texttt{true}\rangle,p,\tau,\eta,\langle\ell',\texttt{true})}{t \in E(G)})\end{equation*}\end{definition}\begin{example}\label{ex:lift}The lift of the graph $G_{\Prog_3}$ of the program $\Prog_3$ is displayed in\fref{fig:ex_lift}. It has the very same structure as the original program.
\begin{figure}[ht]\centering\input{figures/ch3_p3_lift}\caption{Lift of the graph $G_{\Prog_3}$ of the program$\Prog_3$.\label{fig:ex_lift}}\end{figure}\end{example}Obviously, the lift of a program is not useful for the analysis, as it does notcapture any interesting properties of the program, besides what is alreadyknown in the original program. In the following we will present a constructionfor a partial evaluation graph using unfolding and abstraction.
probabilistic sampling is over-approximated with an non-probabilistic update$\tilde{\eta}: \PV \rightarrow Z[\V]$ and a constraint denoted as
probabilistic sampling is over-approximated with a non-probabilistic update$\tilde{\eta}: \PV \rightarrow \Z[\V]$ and a constraint denoted as
We define a function $\evaluate_\Prog(T) : \G_\Prog \rightarrow \G_\Prog $ thatunfolds all versions $\langle \ell, \varphi \rangle$ in the graph with eachoutgoing transitions $t \in \TPout(\ell)$.
We define a function $\evaluate_\Prog : \G_\Prog \rightarrow \G_\Prog $ thatunfolds all versions $\langle \ell, \varphi \rangle \in V(G)$ in the graph $G\in \G_\Prog$ with each outgoing transition $t \in \TPout(\ell)$.
\input{figures/ch3_example1_program.tex}\caption{A Program $\Prog$\label{fig:ch3_inf_prog}}
\input{figures/ch3_p4_program.tex}\caption{A Program $\Prog_4$\label{fig:ch3_inf_prog}}
\input{figures/ch3_example1_tree.tex}\caption{Extract of the infinite evaluation graph of$\Prog$\label{fig:ch3_inf_tree}}
\input{figures/ch3_p4_tree.tex}\caption{Extract of the infinite evaluation \\graph of $\Prog_4$\label{fig:ch3_inf_tree}}
\begin{lemma}Let $\alpha: \Phi \rightarrow \A$ be an abstraction function over a finiteabstraction space $\A\subset\C$. The partial evaluation graph $G_\alpha^*$exists and is finite.
% \begin{lemma}% Let $\alpha: \Phi \rightarrow \A$ be an abstraction function over a finite% abstraction space $\A\subset\C$. The partial evaluation graph $G_\alpha^*$% exists and is finite.
\begin{proof}The abstraction $\alpha$ can map to only finitely many values in thefinite abstraction space $\A$. Every evaluation step adds only versionsfrom a reduced and finite set of version $\Loc_\Prog \times \A \subset\vers_\Prog$. Hence the set of vertices of partial evaluation graphequally finite. A transition is added after a version $\langle \ell,\_\rangle$ for every outgoing transitions $t \in \TPout(\ell)$ only.Since $\TPout(\ell) \subseteq \T_\Prog$ is finite, only finitely manytransitions can exist between any two versions.The graph grows with every evaluation step, hence the limit exists andit must be reached in a finite number of evaluation steps.\end{proof}\end{lemma}
% \begin{proof}% The abstraction $\alpha$ can map to only finitely many values in the% finite abstraction space $\A$. Every evaluation step adds only versions% from a reduced and finite set of version $\Loc_\Prog \times \A \subset% \vers_\Prog$. Hence the set of vertices of partial evaluation graph% equally finite. A transition is added after a version $\langle \ell,% \_\rangle$ for every outgoing transitions $t \in \TPout(\ell)$ only.% Since $\TPout(\ell) \subseteq \T_\Prog$ is finite, only finitely many% transitions can exist between any two versions.% The graph grows with every evaluation step, hence the limit exists and% it must be reached in a finite number of evaluation steps.% \end{proof}% \end{lemma}
\psi_1 = 1 \leq x, \hspace{1cm} \neg\psi_1 = 1 > x\\\psi_2 = 2 \leq x, \hspace{1cm} \neg\psi_2 = 2 > x\\\psi_3 = x \leq 3, \hspace{1cm} \neg\psi_3 = x > 3\\\psi_4 = x \leq 4, \hspace{1cm} \neg\psi_4 = x > 4\\\psi_5 = y > 0,\hspace{1cm} \neg\psi_5 = y \leq 0\\\psi_6 = y \geq 0,\hspace{1cm} \neg\psi_6 = y < 0\\\A = \{\psi_1,\neg\psi_1,\dots,\psi_6,\neg\psi_6\}
\begin{aligned}\psi_1 &= 1 \leq x, \hspace{1cm} \neg\psi_1 = 1 > x,\\\psi_2 &= 2 \leq x, \hspace{1cm} \neg\psi_2 = 2 > x,\\\psi_3 &= x \leq 3, \hspace{1cm} \neg\psi_3 = x > 3,\\\psi_4 &= x \leq 4, \hspace{1cm} \neg\psi_4 = x > 4,\\\psi_5 &= y > 0,\hspace{1cm} \neg\psi_5 = y \leq 0,\\\psi_6 &= y \geq 0,\hspace{1cm} \neg\psi_6 = y < 0, \text{ and}\end{aligned}\\\A_\Psi =\{\psi_1,\neg\psi_1,\dots,\psi_6,\neg\psi_6,\texttt{true},\texttt{false}\}.
partial evaluation graph is shown in \fref{fig:ex_prob_fpe}.\todo{updatefigure}\begin{figure}
partial evaluation graph is shown in \fref{fig:ex_prob_fpe}.\begin{figure}[ht]
evaluation step, and just show the final evaluation graph. It looks verysimilar to before, but we saved some entailment checks on location $\ell_2$,in return the versions at $\ell_2$ are not collapsed anymore.
evaluation step, and just show the final evaluation graph in\fref{fig:ex_localized_pba} It looks very similar to before, but we savedsome entailment checks on location $\ell_2$, in return the versions at$\ell_2$ are not collapsed anymore.\begin{figure}[ht]\centering\input{figures/ch3_p3_full_pe_localized}\caption{Partial evaluation of $\Prog_3$ with a localized property basedabstraction $S_{\bot,\Psi_{\ell_1},\bot}$.\label{fig:ex_localized_pba}}\end{figure}
The property based abstraction crafted in Example \ref{ex:localized_abstr} was
Clearly, a global abstraction function is a special case of a localizedabstraction function, where every location is abstracted with the sameabstraction function. How to select properties for the localized property-basedabstraction is discussed in \Cref{ch:implementation}.The property based abstraction crafted in Example \ref{ex:localized_pba} was
per loop in the original program. This is formalized by the following lemma.
per loop in the original program. This is formalized by the lemma\ref{lem:localized_pba_finite}. But before, we will adapt the evaluation onelast time. Instead of evaluating the entire program, the evaluation will unfoldonly transitions in a given component $T$. The component is not required to beconnected. How to select the component for best results ins discussed in\Cref{ch:implementation}.
\begin{lemma}Let $S_\alpha$ be a dynamic abstraction function. The partial evaluationgraph $G_{S_\alpha}^*$ is finite if every loop $L$ in $\Prog$ contains alocation $\ell \in L$ such that$\set{S_\alpha(\langle\ell,\varphi\rangle)}{\varphi \in \Phi}$ is finite.
\begin{definition}[Component-level Evaluation]Let $\Prog$ be a \gls{pip} and $T \subseteq \T_\Prog$ be a component of$\Prog$. For a localized abstraction $S : \vers_\Prog \rightarrow \C$ anevaluation step $\evaluate_{\Prog,S,T} : \G_\Prog \rightarrow \G_\Prog$ isdefined as
\begin{proof}\todo{Proof}\end{proof}\end{lemma}
\begin{align*}\evaluate_{\Prog,S}(G) = G&+ \set{ \unfold_{S}(\langle \ell, \varphi \rangle, t)}{ \langle \ell,\varphi\rangle \in E(G) \text{, and } t \in \TPout(\ell) \cap T}\\&+ \set{(\langle\ell,\varphi\rangle,p,\tau,\eta,\langle\ell',\texttt{true}\rangle)}{\langle \ell, \varphi\rangle \in E(G) \text{, and } t \in\TPout(\ell) \backslash T}\end{align*}\end{definition}
Clearly, a global abstraction function is a special case of a dynamicabstraction function, where every location is abstracted with the sameabstraction function. How to select properties for the localized property-basedabstraction is discussed in \Cref{ch:implementation}.
The partial evaluation is bootstrapped with the lift of the program with thecomponent removed.\begin{equation*}G_0 = (G_\Prog - T)\lift\end{equation*}The component-level evaluation is then repeated on $G_0$ until a fix-point isfound.\begin{equation*}G_{S,T}^* = \evaluate_{\Prog,S}^*(G_0) =\lim_{n\rightarrow\infty}\evaluate_{\Prog,S}^n(G_0)\end{equation*}
Next, we will adapt the evaluation a final time. Instead of evaluating theentire program, the evaluation will unfold only transitions in a given component$T$. The component is not required to be connected. How to select the componentfor best results ins discussed in \Cref{ch:implementation}.
\begin{example}\label{ex:component_pe}In this example the partial evaluation of $\Prog_3$ is restricted to thecomponent $T = \{t_{1a}, t_{1b}\}$, because the two loops are the onlyproblematic transitions during analysis with \gls{mprf}s. Also we reduce thenumber of properties. The evaluation uses the localized property-basedabstraction $S_{\bot,\Psi_{\ell_1},\bot}$ with $\Psi_1 = {\psi_1, \psi_2,\psi_3, \psi_4}$ where\begin{align*}\psi_1 = 1 \leq x, \hspace{1cm} \neg\psi_1 = 1 > x\\\psi_2 = 2 \leq x, \hspace{1cm} \neg\psi_2 = 2 > x\\\psi_3 = x \leq 3, \hspace{1cm} \neg\psi_3 = x > 3\\\psi_4 = x \leq 4, \hspace{1cm} \neg\psi_4 = x > 4\\\end{align*}
\todo{continue here}
The program is lifted as shown in Example \ref{ex:lift} and\fref{fig:ex_lift}, but with the transitions $t_{1a}$ and $t_{1b}$ removed.The partial evaluation starts on this graph $G_0$ with\begin{align*}V(G_0) &= \{\langle\ell_0,\texttt{true}\rangle,\langle\ell_1,\texttt{true}\rangle,\langle\ell_2,\texttt{true}\rangle\}\\E(G_0) &= \{t_0,t_2,t_3\}\end{align*}
We will proceed with proving, that the graph can be used for constructing an newprogram $\Prog'$ with equal expected runtime-complexity.
The transitions $t_{1a}$ and $t_{1b}$ are unfolded already in the firstevaluation step. We find to new transitions by unfolding $t_{1a}, t_{1b} \in\TPout(\ell_1) \cap T$ from $\langle\ell_1,\texttt{true}\rangle$\begin{align*}t_{1a,1} &= (\langle \ell_1, \texttt{true}\rangle, 0.5, \tau_1,\eta_{1a}, \langle \ell_1, \{\psi_1,\psi_3,\psi_4\}\rangle) \\t_{1b,1} &= (\langle \ell_1, \texttt{true}\rangle, 0.5, \tau_1,\eta_{1b},\langle \ell_1, \{\psi_1,\psi_2,\psi_4\}\rangle) \\G_1 &= G_0 + \{t_{1a,1},t_{1b,1}\}\end{align*}Note, the transition $t_2$ is not in $T$. Instead of unfolding it, thetransition $t_2' = (\langle\ell_1,\texttt{true}\rangle,1,\tau_2,\eta_2,\langle\ell_2,\texttt{true}\rangle)$ is added to the graph, which is alreadycontained as the lift of $t_2$. The same holds for $t_0$ and $t_3$.In the second step the new versions $\langle \ell_1,\{\psi_1,\psi_3,\psi_4\}\rangle$ and $\langle \ell_1,\{\psi_1\psi_2,\psi_4\}\rangle$ are unfolded. We find new transitions\begin{align*}t_{1a,2} &= (\langle \ell_1, \{\psi_1,\psi_3,\psi_4\}\rangle, 0.5,\tau_1 \Land \psi_1 \Land \psi_3 \Land \psi_4, \eta_{1a}, \langle\ell_1, \{\psi_1,\psi_3,\psi_4\}\rangle) \\t_{1b,2} &= (\langle \ell_1, \{\psi_1,\psi_3,\psi_4\}\rangle, 0.5,\tau_1 \Land \psi_1 \Land \psi_3 \Land \psi_4, \eta_{1b}, \langle\ell_1, \{\psi_1,\psi_2,\psi_4\}\rangle) \\t_{2,1} &= (\langle \ell_1, \{\psi_1,\psi_3,\psi_4\}\rangle, 1, \tau_2\Land \psi_1 \Land \psi_3 \Land \psi_4, \eta_2,\langle \ell_2,\texttt{true}\rangle)\\t_{1a,3} &= (\langle \ell_1, \{\psi_1,\psi_2,\psi_4\}\rangle, 0.5,\tau_1 \Land \psi_1 \Land \psi_2 \Land \psi_4, \eta_{1a}, \langle\ell_1, \{\psi_1,\psi_3,\psi_4\}\rangle) \\t_{1b,3} &= (\langle \ell_1, \{\psi_1,\psi_2,\psi_4\}\rangle, 0.5,\tau_1 \Land \psi_1 \Land \psi_2 \Land \psi_4, \eta_{1b}, \langle\ell_1, \{\psi_1,\psi_2,\psi_4\}\rangle) \\t_{2,2} &= (\langle \ell_1, \{\psi_1,\psi_2,\psi_4\}\rangle, 1, \tau_2\Land \psi_1 \Land \psi_2 \Land \psi_4, \eta_2, \langle \ell_2,\texttt{true}\rangle) \\G_2 &= G_1+ \{t_{1a,2},t_{1b,2},t_{2,1},t_{1a,3},t_{1b,3},t_{2,2}\}\end{align*}Note, the transition $t_{2,1}$ and $t_{2,2}$ are new transitions but theytransition to the trivial version instead beeing unfolded due to $t_2\not\in\T$.
The evaluation ends here, since no new versions were added in the previousstep. We get $G_3 = G_2 = G^*_{\Prog_3,S_{\bot,\Psi_{\ell_1},\bot}}$.The full evaluation graph is displayed in \fref{fig:ex_component_pe}.\begin{figure}[ht]\centering\input{figures/ch3_p3_component_pe}\caption{Component-level partial evaluation graph of $\Prog_3$ from\fref{fig:ex_prob} on a component $T = \{t_{1a},t_{1b}\}$ and with alocalized property-based abstraction$S_{\bot,\Psi_{\ell_1},\bot}$.\label{fig:ex_component_pe}}\end{figure}\end{example}\rem{In practice versions that have no incoming transitions in $G_0$ are removed,before the evaluation, to avoid unnecessary unfoldings. If the version isreachable in the partial evaluation nonetheless, it is readded by theevaluation and evaluated on in later step.}The partial evaluation over the entire program is a special case of thecomponent level evaluation with a component $T = \T_\Prog$. All following lemmasand theorems hold equally for a (global) abstraction and partial evaluations ofthe entire program.
Let $G^*_{\Prog,\alpha}$ be the evaluation graph of a \gls{pip} $\Prog$ forany abstraction function $\alpha$. For every transition $t = (\langle \ell_1,\varphi_1\rangle, p, \tau, \eta, \langle \ell_2, \varphi_2\rangle) \inE(G^*_{\Prog,\alpha})$ one can find a unique transition denoted as
Let $G^*_{\Prog,S}$ be the evaluation graph of a \gls{pip} $\Prog$ for anylocalized abstraction function $S$. For every transition $t = (\langle\ell_1, \varphi_1\rangle, p, \tau, \eta, \langle \ell_2, \varphi_2\rangle)\in E(G^*_{\Prog,\alpha})$ one can find a unique transition denoted as
with Lemma \ref{lem:original_transition}.
with Lemma \ref{lem:original_transition}. In Example \ref{ex:global_abstr} and\ref{ex:component_pe} the localized property-based abstraction was carefullycrafted so that the evaluation graph is finite. As was already foreshadowed thepartial evaluation graph is for a localized abstraction function is finite when at least one locationper loop in the original program is abstracted to a finite abstraction space.\begin{lemma}\label{lem:localized_pba_finite}Let $S$ be a localized abstraction function. The partial evaluation graph$G_{\Prog,S}^*$ is finite if every loop $L$ in $\Prog$ contains a location$\ell \in L$ such that $\set{S(\langle\ell,\varphi\rangle)}{\varphi \in\Phi}$ is finite.\begin{proof}Let $G_{\Prog,S}^*$ be the partial evaluation graph of a \gls{pip}$\Prog$ with a localized abstraction $S$, and let$\set{S(\langle\ell,\varphi\rangle)}{\varphi \in \Phi}$ for some $\ell\in L$ on any loop $L$ of $Prog$.The proof will be done by contradiction. Assume the partial evaluationgraph $G_{\Prog,S}^*$ is infinite.We can construct an infinite path $p = v_0v_1\dots = \langle\ell_0\_\rangle\langle\ell_1,\_\rangle\dots \in \vers_\Prog^\omega$, where allversions on this path are pairwise different. Let $n \in \N$. Since$\evaluate_\Prog^*$ doesn't converge there must exist a fresh version$v_n \in E(G_n) \backslash E(G_{n-1})$ that was added by unfolding orcopying a transition $t_n =(\ell_{n-1},\_, \_, \_,\ell_n)$ from aversion $v_{n-1} = \langle \ell_{n-1},\_\rangle \in E(G_{n-1})$.$v_{n-1}$ must have been freshly added in $G_{n-1}$ or else $v_n$ wouldhave been part of $G_{n-1}$ already. By backwards induction we cancreate a path $v_0v_1\dots{}v_n$ of $G_n$ for arbitrarily large $n \in N$with pairwise different versions which implies that there exists aninfinite path $p = v_0v_1\dots = \langle\ell_0\_\rangle\langle\ell_1,\_\rangle\dots \in \vers_\Prog^\omega$ withpairwise different versions in $G_{\Prog,S}^*$ as well.
Every version was added by unfolding or copying a transition $t_i=(\ell_{i-1},\_, \_, \_,\ell_i) \in \T_\Prog$. Consequently, there mustexist an infinite path $p' = \ell_0\ell_1\dots \in \Loc_\Prog^\omega$and since $\Prog$ is finite the path must contain a loop $L \in \Loc^*$that is appears an infinite number of times on $p'$.By assumption there must exists a location $\ell^* \in L$ for which$\set{S(\langle\ell,\varphi\rangle)}{\varphi \in \Phi}$ is finite. Atthe same time versions $\langle\ell^*,\_\rangle$ must appear an infinitenumber of times on $p$, and there can be only abstracted to finitelymany possible versions, which is a contradiction to the versions on $p$being pairwise different.$G_{\Prog,S}^*$ cannot be infinite.\end{proof}\end{lemma}We will proceed with proving, that the partial evaluation graph of a componentlevel partial evaluation can be used for constructing an new program $\Prog'$with equal expected runtime-complexity. All results equally hold for partialevaluation of entire program and independently of the choice of the (global orlocalized) abstraction function as long as the localized evaluation functionasserts a finite partial evaluation graph by Lemma\ref{lem:localized_pba_finite}.
Let $G^*_{\Prog,\alpha}$ be the evaluation graph of a \gls{pip} $\Prog$ withgeneral transitions $\GTP$ and the abstraction function $\alpha$. Thetransitions of $E(G^*_{\Prog,\alpha})$ can be split into finite pairwise
Let $G^*_{\Prog,S,T}$ be the evaluation graph of a \gls{pip} $\Prog$ with alocalized abstraction function $S$ on a component $T \subseteq \TP$. Thetransitions of $E(G^*_{\Prog,S,T})$ can be split into finite pairwise
\item there exist a unique general transition $\bar{g_i} \in \GTP$ andsome $\varphi \in \Psi$ for which the following property holds:\begin{equation*}g_i = \set{\unfold(\langle \ell_{g'_i}, \varphi \rangle,\bar{t})}{\bar{t} \in g'_i}\end{equation*}
\item there exist a unique general transition $\bar{g}_i \in \GTP$ andsome $\varphi \in \Phi$ for which the following property holds:\begin{align*}g_i &= \set{t}{\bar{t} \in \bar{g}_i, t \in E(G^*_{\Prog,S,T})} \\&= \set{(\langle\ell_{\bar{g}_i},\varphi\rangle,p,\varphi\land \tau_{g_i}, \eta, \langle\ell',\varphi'\rangle}{(\ell_g, p, \tau_g, \eta, \varphi') \in g'_i}\end{align*}
\todo{proof}
Let $t =(\langle \ell, \varphi\rangle, p, \tau, \eta, \_) \inE(G^*_{\Prog,S,T})$ be a transitions in the partial evaluation graph$G^*_{\Prog,S,T}$ of a \gls{pip} $\Prog$ with a component $T \subset\TP$. There exists a unique generaltransition $g \in \GTP$ with $\bar{t} \in g$. Let $ g = \{ t'_1, \dots,t'_n \}$. We set $\bar{g} = g$. All $t'_i$ share a common guard $\tau_g$and source location $\ell_g = \ell$.Let $1 \leq i \leq n$ an $t'_i = (\ell, p, \tau_g, \_, \_)$. Byconstruction of $\evaluate_\Prog,S,T$ there was added either atransition $t_i = \unfold(\langle\ell,\varphi\rangle, t_i) =(\langle\ell,\varphi\rangle, p, \varphi \land \tau_g, \_, \_)$ if $t'_i\in T$ or $t_i = (\langle\ell,\varphi\rangle, p, \varphi \land \tau_g,\_, \_)$ if $t'_i \not\in T$. In any case, all $t'_i$ share a commonguard $\tau_g' = \varphi \land \tau_g$ and the probabilities add up to1.
Let $\Prog$ as defined by Definition \ref{def:pip} and $G^*_{\Prog,\alpha}$be the partial evaluation graph of $\Prog$ for some abstraction function$\alpha$. A new \gls{pip} is defined as $\Prog' = (\PV, \Loc_{\Prog'},\GTPn, \langle l_0, \textit{true}\rangle)$ where $\Loc_{\Prog'} =E(G^*_{\Prog,\alpha}) \subset \vers_\Prog$, and $\GTPn =\mathcal{G}\mathcal{T}_{G^*_{\Prog,\alpha}} \subset 2^\VTP$. $\Prog'$ is
Let $\Prog$ as defined by Definition \ref{def:pip} and $G^*_{\Prog,S,T}$ bethe partial evaluation graph of $\Prog$ for some localized abstractionfunction $S$ and a component $T \subseteq \TP$. A new \gls{pip} is definedas $\Prog' = (\PV, \Loc_{\Prog'}, \GTPn, \langle l_0, \textit{true}\rangle)$where $\Loc_{\Prog'} = E(G^*_{\Prog,S,T}) \subset \vers_\Prog$, and $\GTPn =\mathcal{G}\mathcal{T}_{G^*_{\Prog,S,T}} \subset 2^\VTP$. $\Prog'$ is
Theorem \ref{thm:correctness} which will shows that runtime-complexity boundsfor the partial evaluation are runtime-complexity bounds of the originalprogram, which demonstrates the correctness of the construction.Lemma \ref{lem:constrfgeq} and Theorem \ref{thm:thightness} will show thatworst-case expected runtime-complexity doesn't get worse with partialevaluation.
Theorem \ref{thm:correctness} which will shows that any runtime-complexitybounds for the partial evaluation are runtime-complexity bounds of the originalprogram, which demonstrates the correctness of the construction. Lemma\ref{lem:constrfgeq} and Theorem \ref{thm:thightness} will show that worst-caseexpected runtime-complexity doesn't get worse with partial evaluation.
\section{History Dependent Abstractions}In \Sref{sec:partialevaluation} the abstraction the details of the abstractionwere omitted. Recall Definition \ref{def:abstraction}, an abstraction space mustbe finite and the abstraction function must map any probability to elements ofthis abstraction space. The abstraction function was kept simple, in order tokeep the proofs consise. In this section we will expand to take the evaluationhistory into account, which opens the possibility for more refined abstractionfunctions.\begin{definition}[History dependent abstraction]A history dependent abstraction function is a function $\G \rightarrow$\end{definition}In this section we will present the property-based abstractionproposed by \citeauthor{Domenech19}\cite{Domenech19} for non-probabilisticprograms but which can be applied equally to probabilistic programs.\todo{continue here}
one symbol is replaced by another is denoted with $[\cdot/\cdot]$.For example $(x<y)[x/1] = (1 < y)$. For a finite set $A =\{x_1,\dots,x_n\}$, the $n$ consecutive substitutions $[x_i/y(i)]$where the replacement expression $y$ varies only in some index $i$ is denotedusing a shortened notation: $[x_i/y_i]_{x_i \in A} =[x_1/y(1)] \dots [x_n/y(n)]$. For example$[x_i/i]_{x_i\in A} = [x_0/0]\dots[x_n/{}n]$, isthe substution where every variable is replaced by its index in $A = \{x_0,\dots, x_n\}$. The restriction of a function $f: A \rightarrow B$ to a smallerdomain $A' \subseteq A$ is denoted by $\varf|_{A'} : A' \rightarrow B$.
one symbol is replaced by another is denoted with $[\cdot/\cdot]$. For example$(x<y)[x/1] = (1 < y)$. For a finite set $A = \{x_1,\dots,x_n\}$, the $n$consecutive substitutions $[x_i/y(i)]$ where the replacement expression $y_i$varies only in some index $i$ is denoted using a shortened notation:$[x_i/y_i]_{x_i \in A} = [x_1/y_i] \dots [x_n/y(n)]$. For example,$[x_i/i]_{x_i\in A} = (\dots([x_0/0])\dots)[x_n/{}n]$ is the substitution whereevery variable is replaced by its index in $A = \{x_0, \dots, x_n\}$. Therestriction of a function $f: A \rightarrow B$ to a smaller domain $A' \subseteqA$ is denoted by $\varf|_{A'} : A' \rightarrow B$.
This section will define constraints that are special cases of formulas of\gls{fo} logic. In the following $\V$ denotes a finite set of variables $\V$.
This section will define constraints as a special cases of first-order formulas.In the following, $\V$ denotes a finite set of variables $\V$.
foundations on FO-logic we'll refer to the literature \cite{barwise1977}.Constraints are a subset of formulas in \gls{fo} logic, thus, there semantics
foundations on \gls{fo}-logic we refer the reader to \cite{barwise1977}.Constraints are a subset of formulas in \gls{fo} logic; thus, their semantics
\gls{fo} logic.\cite{barwise1977} In this thesis we will only consider integerarithmetic. Let $\Zs := (\Z, +, \cdot, 0, 1, \leq, <, =)$ be the structure of
\gls{fo} logic.\cite{barwise1977} In this thesis, we will only consider integerarithmetic. Let $\Zs := (\Z, +, \cdot, 0, 1, \leq)$ be the structure of
\item $1 < 0$ entails any constraints, because $\llbracket 1 < 0\rrbracket = \emptyset$ and the empty set is subset of any other
\item $1 < 0$ entails all constraints, because $\llbracket 1 < 0\rrbracket = \emptyset$ and the empty set is a subset of any other
The problem of determining if a set of constraints is satisfiable or findingvalid variable assignments for a given formula is part of \gls{smt} and is awell studied field in computer science.\cite{walsh2009smt,abraham17smt} Thetheories relevant for this thesis are the quantifier-free linear integerarithmetic (usually called \texttt{QF\_LIA}) and the quantifier-free non-linearinteger arithmetic (\texttt{QF\_NIA}).
The problem of determining whether a set of constraints is satisfiable orfinding valid variable assignments for a given formula is part of \gls{smt} andis a well studied field in computer science \cite{walsh2009smt,abraham17smt}.The theory relevant for this thesis is the quantifier-free linear integerarithmetic (usually called \texttt{QF\_LIA}).
Sometimes one is not interested in the result to all variables but only to asubset of variables. It will later become relevant when handling temporaryvariables that are only relevant to the scope of a transition. In \gls{fo} logicone would just existentially quantify the variable. However, the new \gls{fo}formula wouldn't be quantifier-free anymore. Instead a new formula is searchedthat has just the same solutions when ignoring the values for the irrelevantvariables.
Sometimes one is not interested in the result for all the variables but onlythose for a subset of variables. This will later become relevant when handlingtemporary variables that are only relevant to the scope of a transition. In\gls{fo} logic one would just existentially quantify the variable in theformula. However, the new \gls{fo} formula would not longer be quantifier-free.Instead, a new formula is sought that has just the same solutions when ignoringthe values for the irrelevant variables and that uses only the reduced variableset.
An update assigns a value to a variable after the update using the values ofvariables before the update. Constraints and assignments have no notion of time,whereas for updates there exists a \textit{before} and an \textit{after}.
An update assigns a value to a variable after the update, by taking into accountall the \textit{former} values of the variables. Constraints and assignmentshave no notion of time, whereas for updates there exists a \textit{before} andan \textit{after}.
Let $\Sigma = \{s : \V \rightarrow \Z\}$ be the set of full integerassignments, that are assignments which assign a value to every variable$v\in\V$. In contrast, partial assignments over a variable set $A \subseteq \V$assign values only to a subset of variables, set set of partial assignments overthe variables $A\subseteq\V$ is denoted by $\Sigma_A = \{s: A \rightarrow \Z\}$.
Let $\Sigma = \{s : \V \rightarrow \Z\}$ be the set of full integer assignments,which are assignments that assign a value to every variable $v\in\V$. Incontrast, partial assignments over a variable set $A \subseteq \V$ assign valuesonly to a subset of variables, and the set of partial assignments over thevariables $A\subseteq\V$ is denoted by $\Sigma_A = \{s: A \rightarrow \Z\}$.
For an integer polynomial $f\in\Z[A]$ over a variable set $A \subseteq \V$ and aassignment $s \in \Sigma_A$ we write $\varf(s)$ for the value that is found byevaluating the polynomial $\varf[x_i/s(x_i)]_{x\in A}$.
For an integer polynomial $f\in\Z[A]$ over a variable set $A \subseteq \V$ andan assignment $s \in \Sigma_A$ we write $\varf(s)$ for the value that is foundwhen evaluating the polynomial $\varf[x_i/s(x_i)]_{x\in A}$.
For an update described by the update function $u : A \rightarrow \Z[\V]$ and aassignment $s \in \Sigma$, we define a function $\rho_u: \Sigma \rightarrow\Sigma_A$. It computes the assignment after an update $u$ given an assignment
For an update described by the update function $u : A \rightarrow \Z[\V]$ and anassignment $s \in \Sigma$ we define a function $\rho_u: \Sigma \rightarrow\Sigma_A$. It computes the assignment after an update $u$ is given an assignment
with $A' \cap (A \union B) = \emptyset$. We define $u(\varphi) := ((\varphi\Land \LAnd_{x\in A} x' = u(x))|_{A'})[x'/x]_{x\in A}$. For any partial
with $A' \cap (A \union B) = \emptyset$. We write under misuse of notation\begin{equation*}u(\varphi) := ((\varphi \Land \LAnd_{x\in A} x' =u(x))|_{A'})[x'/x]_{x\in A}.\end{equation*}\end{definition}\begin{lemma}\label{lem:nonprop_update}Let $A, B \subseteq \V$. Given an assignment $s\in\Sigma$, a formula$\varphi \in \Phi$ and an update $u: A \rightarrow \Z[B]$. For any partial
Another way of reasoning about solutions is to see them as points in an$m$-dimensional space for variables $X \subseteq \V$ and $|X| = m$. Every
Another way of reasoning about satisfying assignments is to see them as pointsin an $m$-dimensional space for variables $A \subseteq \V$ and $|A| = m$. Every
used abstract domains are Polyhedra\cite{bagnara2005convexpolyhedron}Octagons\cite{mine2001wcre, mine2007arxiv, mine2006hosc,truchet2010synacs}, andthe Interval domain (also called Boxes)\cite{gurfinkel2010}.
used abstract domains are Polyhedra \cite{bagnara2005convexpolyhedron}Octagons \cite{mine2001wcre, mine2007arxiv, mine2006hosc,truchet2010synacs}, andthe Boxes \cite{gurfinkel2010}.
affine half-space in the euclidean space $\R^n$. As discussed in\Sref{sec:constraints} strictness of linear constraints and in turn openness ofhalf-spaces are not relevant to the integer domain.
affine half-space in the Euclidean space $\R^n$. As discussed in\Sref{sec:constraints}, the strictness of linear constraints and in turn theopenness of half-spaces are not relevant to the integer domain.
For completeness' sake, let's introduce a third abstract domain of Boxes. Aclosed interval $[a,b]$ with $a, b \in \R$ for a variable $v \in \V$ can be
For the sake of completeness, let us introduce a third abstract domain.A closed interval $[a,b]$ with $a, b \in \R$ for a variable $v \in \V$ can be
concepts needed to understand this thesis will be introduced, following thedefinitions from \citeauthor{billingsley2011}\cite{billingsley2011}. Foradditional information we'll refer to theliterature.\cite{kraaikamp2005modern, billingsley2011}
concepts needed to understand this thesis will be introduced here, following thedefinitions from \citeauthor{billingsley2011} \cite{billingsley2011}. Foradditional information we refer the reader to the literature\cite{kraaikamp2005modern, billingsley2011}.
Imagine a probabilistic experiment with some possible outcomes $\Omega$. Forexample, a coin-toss can have arbitrary many outcomes when taking the position onthe table into account, the fact that the coin might get lost in the process
Imagine a probabilistic experiment with several possible outcomes $\Omega$. Forexample, a coin toss can have many arbitrary outcomes when taking the positionon the table into account, the fact that the coin might get lost in the process,
outcomes. For example, the coin-toss on the table has a sub-set of outcomeswhere the coin's head is above. The probability measure would assign aprobability close to 50\% for a somewhat normal coin-toss.
outcomes. For example, the coin toss on the table has a subset of outcomeswhere the coin's head is uppermost. The probability measure would assign aprobability close to 50\% for a somewhat normal coin toss.
Formally a probability measure is defined on a $\sigma$-algebra which assertsthe intuitive property of a random event: if you can measure two subsets ofoutcomes (e.g. $A = \set{ \omega \in \Omega} {\text{ coin shows head}}$ and $B= \set{\omega \in \Omega} {\text{coin shows tail}}$) so you can for the union ofboth subsets ($A \union B = \set{\omega \in \Omega} {\text{coin shows head ortails}}$) and the probability of the combined outcomes is equal to the sum ofthe individual outcomes, if $A$ and $B$ are disjoint.
Formally, a probability measure is defined as a function on a $\sigma$-algebrathat asserts the intuitive property of a random event: if you can measure theprobability two subsets of outcomes (e.g. $A = \set{ \omega \in \Omega} {\text{coin shows head}}$ and $B = \set{\omega \in \Omega} {\text{coin showstail}}$) so you can measure the probability of the union of both subsets ($A\union B = \set{\omega \in \Omega} {\text{coin shows head or tails}}$) and theprobability of the combined outcomes is equal to the sum of the individualoutcomes, if $A$ and $B$ are disjoint.
\begin{definition}[$\sigma$-Algebra\cite{billingsley2011}]Let $\Omega$ be an arbitrary set. A set $A \subset 2^\Omega$ is called a
\begin{definition}[$\sigma$-Algebra \cite{billingsley2011}]Let $\Omega$ be an arbitrary set. A set $A \subseteq 2^\Omega$ is called a
A function $\mu: \F \rightarrow [0,1]$ on a $\sigma$-algebra $\F$ is a probabilitymeasure if it satisfies the following conditions:
A function $\mu: \F \rightarrow [0,1]$ on a $\sigma$-algebra $\F$ is aprobability measure if it satisfies the following conditions:
The distribution of a variable is a way of describing a random variable and theunderlying probability space. In case of discrete random variables thedistribution is sufficient the random variable. The underlying probability spacebecomes irrelevant at this point, as the specific events and their probabilitiesare not important to the outcome of the random variable, as long as theprobability of the outcome is fully defined.
The distribution of a random variable is a way of describing a random variableand the underlying probability space. In the case of discrete random variables,the distribution is sufficient to fully describe the random variable. Theunderlying probability space becomes irrelevant at this point, as the specificevents and their probabilities are not important to the outcome of the randomvariable, as long as the probability of the outcome is fully defined.
\begin{definition}[Distribution of a random variable\cite{billingsley2011}]\label{def:distribution}Let $X$ be a discrete random variable. The probability of a single value iswell defined by the distribution $\mu:F \rightarrow [0,1]$ as follows:\begin{align}P(X=r) = \mu(\{r\}) = P(\set{\omega \in \Omega}{X(\omega) = r})\end{align}
\begin{definition}[Distribution of a random variable \cite{billingsley2011}]\label{def:distribution}Let $X$ be a discrete random variable in a probability space $(\Omega, \F,P)$. The probability of observing a specific value is defined by thedistribution $\mu: \F \rightarrow [0,1]$ as follows:\begin{align*}\mu(r) = P(X=r) = P(\set{\omega \in \Omega}{X(\omega) = r})\end{align*}
\begin{definition}[Support\cite{billingsley2011}]For a random variable with distribution $\mu: $, the support is the smallest set$S$ for which $\mu(S) = 1$. Discrete random variables have a countable
\begin{definition}[Support \cite{billingsley2011}]For a random variable with distribution $\mu$, the support is the smallestset $S$ for which $\mu(S) = 1$. Discrete random variables have a countable
When considering a random variable one is usually interested the value to expectin general. For example most possible outcomes of a binomial distribution arevery improbable. This is described by the expected value.
When considering a random variable, one is usually interested in the value toexpect in general. For example, most possible outcomes of a binomialdistribution are very improbable. This is described by the expected value.
They will be briefly introduced in this section, following the definitionsfrom \cite{kraaikamp2005modern}. Examples for the various probabilitydistributions are displayed in \fref{fig:distributions}.
They will be briefly introduced in this section, following the definitions from\cite{kraaikamp2005modern}. Examples of the various probability distributionsare displayed in \fref{fig:distributions}.
$0$, for example decided by a coin-flip.\begin{definition}[Bernoulli Distribution]\label{def:bernoulli}
$0$, for example decided by a coin toss.\begin{definition}[Bernoulli distribution]\label{def:bernoulli}
The value of a random variable can approximated by a constraint for the boundsof the support of the probability distribution. For any probability distribution$D \in \D$ and a variable $d \in \V$, we define
The value of a random variable can be approximated by a constraint for thebounds of the support of the probability distribution. For any probabilitydistribution $D \in \D$ and a variable $d \in \V$, we define
where every configuration contains the location at which the program is and astate that holds the current values of the program variables. A transitioncontains a condition $\tau$, called \textit{guard}, that must be true for thecurrent state to be taken. Whenever a transition is taken it modifies thestate with some \textit{update} $\eta$. An update assigns a new value to eachprogram variable depending on the previous state. Every integer program startsat a unique location usually called $\ell_0$. Whenever no transition guard issatisfied the program terminates. The arguments to the program is given by somestarting state $s_0$, assigning a value for every program variable.
where every configuration contains the location which represents the programcounter, and a state that holds the current values of the programsvariables. A transition contains a condition $\tau$, called \textit{guard}, thatmust be true for the current state to be taken. Whenever a transition is takenit modifies the state with some \textit{update} $\eta$. An update assigns a newvalue to each program variable depending on the previous state. Every integerprogram starts at a unique location usually called $\ell_0$. Whenever notransition guard is satisfied the program terminates. The arguments to theprogram are given by some starting state $s_0$, assigning a value for everyprogram variable.
red). For simplicity, all other transitions are unique members of their owngeneral transition and the trivial probability of 1 is omitted.At location $\ell_1$ with a variable assignment $s(x) = 3$ threetransitions $t_2$, $t_3$ and $t_4$ are possible. The non-determinism isresolved first. Then if the general transition containing $t_2$ and $t_3$ isselected, a coin is thrown to decide whether to take $t_2$ or $t_3$.
red). For simplicity, all the other transitions are unique members of theirown general transition and the trivial probability of 1 is omitted. Atlocation $\ell_1$ with a variable assignment $s(x) = 3$, three transitions$t_2$, $t_3$ and $t_4$ are possible. The non-determinism is resolved first.Then if the general transition containing $t_2$ and $t_3$ is selected, acoin is thrown to decide whether to take $t_2$ or $t_3$.
from all the integers, but can be constraint by the guard, in which case thetransition is only taken if the non-deterministic sampling fulfills the guard.If the sampled value fulfills no transition's guard it means the programterminates. Non-deterministic sampling is resolved before the non-deterministicbranching.
from all the integers, but can be constrained by the guard, in which case thetransition is only taken if the non-deterministic sampling fulfils the guard. Ifthe sampled value fulfils no transition's guard, the program terminates.Non-deterministic sampling is resolved before non-deterministic branching.
\textsc{Bern}(0.5)$. The value of the Bernoulli distribution can take thevalue of 1 or 2 with a probability of 0.5 in both cases. In case that thedistribution is sampled to 1, the variable x is decremented, and stays thesame otherwise.
\textsc{Bern}(0.5)$. The Bernoulli distribution can take the value of 1 or 2with a probability of 0.5 in both cases. In the case that the distribution issampled to 1, the variable x is decremented, and stays the same otherwise.
of locations, $\T = \Loc \times [0,1] \times (\PV \rightarrow \Z[\V\union\D])\times \R \times \Loc$ be the set of transitions over locations $\Loc$. Let $\GT= 2^{\T}$ be set set of general transitions over $\Loc$.
of locations, and $\T = \Loc \times [0,1] \times (\PV \rightarrow\Z[\V\union\D]) \times \R \times \Loc$ be the set of transitions over locations$\Loc$. Let $\GT = 2^{\T}$ be a set of general transitions over $\Loc$.
transitions guard is satisfied. Since no normal transition leaves the location$l_\bot$ the run loops indefinitely on the location $\ell_\bot$. In addition a
transition's guard is satisfied. Since no normal transition leaves the location$l_\bot$ the run loops indefinitely on the location $\ell_\bot$. In addition, a
remembered as well. This won't be relevant for the partial evaluation but isimportant to define the probabilistic properties of a \gls{pip}.
remembered as well. This will not be relevant for the partial evaluation but itis important to define the probabilistic properties of a \gls{pip}.
accumulated rewards over a whole process. In literature\cite{puterman1994markov,puterman1990markov}, the properties of a given process with regard to thehighest achievable rewards is researched extensively.
accumulated rewards over a whole process. In theliterature \cite{puterman1994markov, puterman1990markov}, the properties of agiven process with regard to the highest achievable rewards have beenextensively researched.
complexity. If one defines runtime as reward, one can picture the policy for aMarkov decision process as an adversary. We call the policy \enquote{scheduler}.
complexity. If one defines runtime as a reward, one can picture the policy for aMarkov decision process as an adversary. We call the policy the\enquote{scheduler}.
We differentiate between two types of schedulers: First the scheduler that at agiven state always takes the same decision -- it has no memory of it's past
We differentiate between two types of schedulers: the first is a scheduler thatat a given state always takes the same decision -- it has no memory of its past
\{g_\bot\})$ is a history dependent scheduler if for every finite prefix$f\in\fpath_\Prog$ and configuration $c=(\ell,t,s) \in \confs_\Prog$, $\scheduler(fc)= (g, s')$ implies items \ref{itm:md1} to \ref{itm:md4} of Definition\ref{def:mscheduler} for the last configuration $c$.
\{g_\bot\})$ is a history dependent scheduler if every finite prefix$f\in\fpath_\Prog$ and configuration $c=(\ell,t,s) \in \confs_\Prog$,$\scheduler(fc) = (g, s')$ implies items \ref{itm:md1} to \ref{itm:md4} ofDefinition \ref{def:mscheduler} for the last configuration $c$.
other configuration are invalid starting configurations and get assigned a zeroprobability by the probability measure $\prSs : \confs_\Prog \rightarrow [0,1]$
other configuration are invalid starting configurations and they are assigned azero probability by the probability measure $\prSs : \confs_\Prog \rightarrow[0,1]$
Lets say the program ran up to a configuration $c=(\ell,t,s)$. The schedulerresolves the non-deterministic sampling and branching to $\scheduler(c) = (g,\tilde{s})$. The probability for the event $c \rightarrow c'$ that $c'$ followsthe configuration $c$ with $c' = (\ell', t', s')$ and $t' = (\ell, p, \tau,\eta, \_, \ell') \in g$ depends on three things: First the transition $t \in g$is taken with the probability $p$, second the probability that each programvariable $x\in\PV$ is updated to the value $s'(x)$ by the probabilistic update-- recall Definition \ref{def:prob_update}, third that the temporary variablesare sampled by the scheduler equal to the target state. The result is theprobability measure $\prSs: \confs_\Prog^2 \rightarrow [0,1]$.
Let us say that the program ran up to a configuration $c=(\ell,t,s)$. Thescheduler resolves the non-deterministic sampling and branching to$\scheduler(c) = (g, \tilde{s})$. The probability for the event $c \rightarrowc'$ that $c'$ follows the configuration $c$ with $c' = (\ell', t', s')$ and $t'= (\ell, p, \tau, \eta, \_, \ell') \in g$ depends on three things: First thetransition $t \in g$ is taken with the probability $p$; second the probabilitythat each program variable $x\in\PV$ is updated to the value $s'(x)$ by theprobabilistic update -- recall Definition \ref{def:prob_update}; third that thetemporary variables are sampled by the scheduler equal to the target state. Theresult is the probability measure $\prSs: \confs_\Prog^2 \rightarrow [0,1]$.
By a cylindrical construction one gets the final probability measure for theinfinite runs $\PrSs : \runs_\Prog \rightarrow [0,1]$ and the probability space$(\runs_\Prog, \F, \PrSs)$.
By a cylindrical construction one arrives at the final probability measure forthe infinite runs $\PrSs : \runs_\Prog \rightarrow [0,1]$ and the probabilityspace $(\runs_\Prog, \F, \PrSs)$.
That means, that it doesn't matter if we use Markovian or history dependentscheduler when considering the worst-case expected runtime complexity.
That means, that it doesn't matter whether we use Markovian or historydependent deterministic schedulers when considering the worst-case expectedruntime complexity.
We define $\nu^\scheduler$ to be the expected runtime for any scheduler$\scheduler$ -- Markovian or history dependent -- on a program $\Prog$with an initial state $s_0\in\Sigma$.
We define $\nu^\scheduler$ to be the expected runtime for anydeterministic scheduler $\scheduler$ -- Markovian or history dependent-- on a program $\Prog$ with an initial state $s_0\in\Sigma$.
infinitely many prefixes, since there can be infinitely manyassignments. We approach the set $\fpath_N$ with a series of sets $F_1,F_2, \dots \subseteq\fpath$ and
infinitely many prefixes, since there can be infinitely many assignmentscaused by probabilistic sampling from finite distributions. We approachthe set $\fpath_N$ with a series of finite sets $F_1, F_2, \dots\subseteq\fpath$ such that
in $\HDS_{F_i}$ exists. Let $\nu_i$ be the reward of this optimalscheduler.
$\scheduler_\text{opt} \in \MDS_{F_i}$ exists that maximises theexpected reward when limited to the finite set of configurations. Let$\nu_i$ be the reward of this optimal scheduler.
With $i$ approaching infinity the optimal scheduler must still always beas good or better than the history dependent scheduler $\scheduler$. Weget the following:
In the limit when $i$ approaches infinity, the optimal scheduler mustyield an expected no less than the history dependent scheduler$\scheduler$. We get the following inequality.
The question if a program terminates is a famous problem at the heart ofcomputer science known to be undecidable. The so-called Halting Problem isstrongly related to the search for upper and lower runtime complexity bounds,which is equally undecidable. Nevertheless, the answers to those questions arevery important in practice. For example a compiler might want to warn theprogrammer about faulty code sections that were not marked explicitly to runindefinitely, or the developer could be forced to prove the efficiency of hisprogram in critical scenarios. In an ever more complex world, with ever morecomplex algorithms, the need for automatic tools arised. Even though thequestion for runtime-complexity can not be answered in a general case, manytools were developed to automatically analyze the complexity of variousprogramming paradigms as tight and fast as possible. \cite{giesl2017aprove,
The question of whether a program terminates is a famous problem at the heart ofcomputer science and it is known to be undecidable. The so-called HaltingProblem is strongly related to the search for upper and lower runtime complexitybounds, and it is equally undecidable. Nevertheless, the answers to thesequestions are very important in practice. For example, a compiler might want towarn the programmer about faulty code sections that were not marked explicitlyto run indefinitely, or the developer could be forced to prove the efficiency ofhis program in critical scenarios. In an ever more complex world, with ever morecomplex algorithms, the need for automatic tools arise. Even though thequestion of runtime complexity cannot be answered in a general case, manytools have been developed to automatically analyse the complexity of variousprogramming paradigms as tightly and quickly as possible. \cite{giesl2017aprove,
Those tools are improved on constantly and they are regularly compared to eachother in the \gls{termcomp}\cite{termcomp2015,termcomp2019}, the goal being tofind as many, as tight bounds for a set of test programs as possible.% The tests are chosen to represent a large variety of commonly encountered% real-world programs, but also include known-to-be hard-to-solve problems in% order to challenge the competitors and improve their tools. Their collection of% test programs can be found in the \gls{tpdb} which is publicly% available\footnote{\url{https://github.com/TermCOMP/TPDB}}. During the latests% competition in 2022, eleven candidates lined up in twenty-eight% categories\cite{termcomp2022url}.
Those tools are being constantly improved on and they are regularly compared to eachother in the \gls{termcomp} \cite{termcomp2015,termcomp2019}, the goal being tofind as many, and as tight bounds as possible for a set of test programs.
Consider the program $\Prog_1$ displayed in \fref{fig:classic_c}. It containstwo program variables $x$, and $y$, and one temporary variable $u$, which isused to non-deterministically sample $x$ in the first transition $t_0$. Theprogram clearly terminates. The first loop increments $x$ up to 3 if $x$ ispositive to begin with. The second loop decrements $y$ while $y$ is positive.However, classical analysis using only \gls{mprf} fails to find a finiteruntime-complexity bound for this program \cite{giesl2022lncs}. With the help of\gls{cfr} on sub-\gls{scc} level, the program (see \fref{fig:classic_pe_c}) canbe transformed into an equivalent program where the analysis with \gls{mprf}succeeds at finding a finite runtime-complexity bound.
Consider the program $\Prog_1$ displayed in \fref{fig:classic_c}. It containstwo program variables $x, y$ and one temporary variables $u$ which is used tonon-deterministically sample $x$ in the first transition $t_0$. The programclearly terminates. The first loop increments $x$ up to 3 if $x$ is positive tobegin with. The second loop decrements $y$ while $y$ is positive. However, theclassical analysis using only \gls{mprf} fails to find a finiteruntime-complexity bounds for this program\cite{giesl2022lncs}. With the helpof \gls{cfr} on sub-\gls{scc} level, the program (see \fref{fig:classic_pe_c})can be transformed into an equivalent program where the analysis with\gls{mprf} succeeds at finding a finite runtime-complexity bound.
\citeauthor{Domenech19}\cite{Domenech19} and is called \gls{cfr}via partial evaluation and implemented in their analysis tooliRankfinder\cite{irankfinder2018wst}. It achieves very similar results to\gls{mprf} when used on the entire programs or whole \gls{scc}. Its real
\citeauthor{Domenech19} \cite{Domenech19} and it is called \gls{cfr}via partial evaluation. It was implemented in their analysis tooliRankfinder \cite{irankfinder2018wst}. It achieves very similar results to\gls{mprf} when used on entire programs or whole \glspl{scc}. Its real
specifically the loops where \gls{mprf} fail to find size bounds. This techniquewas presented by \citeauthor{giesl2022lncs}\cite{giesl2022lncs}and implemented in \gls{koat}\cite{giesl2014ijcar} the complexity analysis tool
specifically the loops where \glspl{mprf} fail to find size bounds. This techniquewas presented by \citeauthor{giesl2022lncs} \cite{giesl2022lncs}and implemented in \gls{koat} \cite{giesl2014ijcar} the complexity analysis tool
terminate, since a longer runtime gets increasingly unlikely. The currentversion of \gls{koat} fails to find finite runtime and expected complexitybounds for this program. It would be useful to transform $\Prog_2$ into anequivalent program like the one shown in \fref{fig:prob_pe_c} similarly to the\gls{cfr} technique presented by \citeauthor{giesl2022lncs}\cite{giesl2022lncs}and \citeauthor{Domenech19}\cite{Domenech19}.
terminate, since a longer runtime becomes increasingly unlikely. The currentversion of \gls{koat} fails to find a finite runtime and expected complexitybound for this program. It would be useful to transform $\Prog_2$ into anequivalent program like the one shown in \fref{fig:prob_pe_c}, similar to the\gls{cfr} technique presented by \citeauthor{giesl2022lncs} \cite{giesl2022lncs}and \citeauthor{Domenech19} \cite{Domenech19}.
\subsection{\acrshort{aprove}, \acrshort{koat}, and \acrshort{loat}} The toolsLoAT\cite{frohn2022ijcar} and KoAT\cite{brockschmidt2016acm} are developed atRWTH University and aim at finding lower and respectively upperruntime-complexity bounds on transition systems. At its core, KoAT computesbounds for subprograms using \gls{mprf} or \gls{twn}, and then putting thesubprograms together in order to generate upper runtime-complexity and sizebounds for the whole program. Optionally to improve the analysis, \gls{koat} canresort to partial evaluation as a control-flow refinement techniques on asub-\gls{scc} level\cite{giesl2022lncs}.\gls{apron} is the overall tool, taking part in competitions. It works onvarious input formats and transforms them accordingly to be used withspecialized tools. \gls{koat} and \gls{loat} under the hood of \gls{apron} forthe overall analysis.
\subsection{\acrshort{aprove}, \acrshort{koat}, and \acrshort{loat}}The tools \gls{loat} \cite{frohn2022ijcar} and \gls{koat}\cite{brockschmidt2016acm} were developed at RWTH University and they aim tofind lower and upper (respectively) runtime-complexity bounds on transitionsystems. At its core, \gls{koat} computes bounds for subprograms using\gls{mprf} or a \gls{twn}, and then puts the subprograms together in order togenerate upper runtime-complexity and size bounds for the whole program.Optionally to improve the analysis, \gls{koat} can resort to partial evaluationas a control-flow refinement technique on a sub-\gls{scc} level\cite{giesl2022lncs}. \gls{aprove} is the overall tool, and it has taken part incompetitions. It works on various input formats and transforms them accordinglyso that they can be used with specialised tools such as \gls{koat} and\gls{loat}.
\cite{gulwani2009}. A typical example of such a program can be seenin example \ref{ex:splitterloop}, where a single loop is replaced by twoindependent consecutive loops. iRankFinder implemented the control-flowrefinement technique by partial evaluation proposed by\cite{gallagher2019eptcs}.
\cite{gulwani2009}. iRankFinder implemented the control-flow refinementtechnique by partial evaluation proposed by \cite{gallagher2019eptcs}.
\gls{koat} borrowed the implementation of iRankFinder as a black-box which camewith a number of limitations\cite{giesl2022lncs}. Besides code-quality andperformance issues, the external library wouldn't be able to profit from future
\gls{koat} borrowed the implementation of iRankFinder as a black box, which camewith a number of limitations.\cite{giesl2022lncs} Besides code-quality andperformance issues, the external library would not be able to profit from future
\acrshort{koat} is an automatic complexity analysis tool for integer programs, whichwas recently extended to the analysis of probabilistic integer programs.Currently, \acrshort{koat} applies so-called control-flow-refinement viapartial evaluation on classic programs by calling the tool iRankFinderexternally.In this thesis, we extend partial evaluation to probabilistic programs. Apartially evaluated probabilistic integer program has the same expectedruntime-complexity as the original program. However, the transformed programmight be easier to analyse.The new algorithm is implemented natively in \acrshort{koat} and isintegrated into the existing analysis algorithm modular and on-demand. Ourevaluation shows that the native implementation is faster on classicalprograms. Furthermore, the new implementation infers tighterruntime complexity bounds for some probabilistic programs as well.
\acrshort{koat} is an automatic complexity analysis tool for integerprograms, which was recently extended to the analysis of probabilisticinteger programs. Currently, \acrshort{koat} applies so-called control-flowrefinement via partial evaluation to classic programs by calling the tooliRankFinder externally. In this thesis, we extend partial evaluation toprobabilistic programs. A partially evaluated probabilistic integer programhas the same expected runtime complexity as the original program. However,the transformed program might be easier to analyse. The new algorithm isimplemented natively in \acrshort{koat} and it is integrated into theexisting analysis algorithm modular and on demand. Our evaluation shows thatthe native implementation is faster on classical programs. Furthermore, thenew implementation also infers tighter runtime complexity bounds for someprobabilistic programs.
\newcommand{\lift}{\mathord{\uparrow}}