YUEGUKXBV6IKGZPTBXJXW2ANYGYSRJILQFLYCYQUYD54LJVZS4CQC
KDY46C5VOI2QWEXPE6J6BW3U2KEMH3IYPUNYKH5FAISTRCWBWO3AC
5GR6GP7ONTMALU3XHWDTWWIDIW2CGYOUOU6M3H7WMV56KCBI25AAC
HF25ZQRDZXNE2IM3YGE3NAPWGDE3U252H764IBIJSIZS3LRLYIMAC
567NUQ633R5ESTE2LWXVUGM7XEZGJ65IRKPOCDZJS3JE5RAMR5KAC
FKCEFKZYPSWFLC4GEG3L46BFTNSAMNMFD4VAGU6DADWAHYYIQIMQC
6PTHERE26NXSC3ZXSI5UYKWTMPYZD4WMYFBJKZE3MRRTKC7MIU6QC
Q3TJZVCBWD7FDMS4MWTT7VUNOI3R3MQCOU6JJCJ5UVIDJPXKMLWAC
R2A2OD7J5ATBRR7W4QPTZDVH6GGHQ7XBX2JE5R3GKVS5WIQ43O3AC
7JUQQYQCE7G3ESFJETNGEPBCYGR5NGMAWHFPWI24RSQJPFM5YCTQC
NEWG2Q2EYR3BOPWVN44EXIPVH2GH23BYFHJXUSNYWWT7WWDEAIKQC
KVSF5AFJEIMIYAC264OUAA56SGP5MFAUG3AECKOTUPIXPGULVFQQC
QIVGMR6INKBVT5IFCO2UJJQEO4PVW3ZM7IDTCTCOKOX7VKG7UPFAC
GNUONJQNZG2MNEE55RW3XOQP3UICEBOKLVLKV42H3K64IBGLUGIQC
QCVDPRXH57UFLU4YV3P2I5RQZGCBUKROYUZHDWOCHL43KDYN7MPAC
TKJMPTB73W5TLLTNUOC4FGRQ7DHE5GRBTL7C5QBXRCNQPMJKGEQAC
RVA3DOCNBWFBWA2MLC2TVQOEY25AUCYGEJSWMD72QRQ26AFLVZUAC
BGMT6NIKIDEGQ74DC4LPVBR7JCUYHHQBOZXHYXMO5F2UVEQ3N47QC
M3WK5HF3XU5MN2JKJMRJVDMC74F4MZAQ23FTGJRW4CGU5ZL72MHAC
YS5O2UANXCWRI3W7KMRKKK7OT7R6V3QDL4JS4DRLXM2EZUIPFALAC
A5NVU6DZYOG6GRXJU6DZLCUQTKA7LPUWOGQ4NCJSY6FJB6GSP3OAC
EIPHTX56WTRFBMBNA4VYMKK4L7GWHA4Q2AQ3BCG2ZGZB3DBRENBQC
UVN5HOW3H2OVESJ62UFNQNS6VXHI5TRV22HQ44EOYU42453SDD2QC
DDNESFMD4LGNTDGTCW27FQPCOR5Y5MJWN2TWCSIRNBK6QGBITBRQC
KUEZAMEH2M3K6EDHWMYXQMHZ2QTTH4KBM5CO7A7EB64ABEWRE6WQC
QMVJ2ZKX5ALAMMIL7E47ZQS67QFZJ7Z3HAY5G7X72KPEQEHWRGCQC
45HZPHVHYJ2BXATQ4MKCMPMMPZFBNJECY7JV3WX63BSA4CSQN6HAC
MQJMCLGFSBSFJ3JPCIHQSBZS4XZPGJI7NGGRASX5UVGJO6HES25AC
% TeX root = ../main.tex
\begin{tikzpicture}[tree]
\node[state,initial] (l0_0) {$\langle l_0, \textit{true}\rangle$};
\node[state] (l1_0) [below=1.2cm of l0_0] {$\langle l_1, \textit{true}\rangle$};
\node[state] (l2_0) [below right=of l1_0,anchor=west] {$\langle l_2, x>0 \rangle$};
\node[state] (l3_0) [below left=of l1_0,anchor=east] {$\langle l_3, x\leq0 \rangle$};
\node[state] (l1_1) [below left=of l2_0] {$\langle l_1, x \geq 0, y\leq 0\rangle$};
\node[state] (l1_2) [below right=of l2_0] {$\langle l_1, x > 0, y \geq 0\rangle$};
\node[state] (l3_1) [below left=of l1_1,anchor=east] {$\langle l_3, x=0,y\leq 0\rangle$};
\node[state] (l2_2) [below right=of l1_1] {$\langle l_2, x > 0,y\leq 0\rangle$};
% \node[state] (l2') [below left=of l1] {$\langle l_2, x \leq 0\rangle$};
% \node[state] (l3) [below right=of l2] {$\langle l_1, x > 2 \rangle$};
% \node[state] (l3') [below left=of l2] {$\langle l_2, \textit{false}\rangle$};
% \node[state] (l4) [below right=of l3] {};
% \node[state] (l4') [below left=of l3] {};
\path[->] (l0_0) edge node {$\textit{true}$} (l1_0)
(l1_0) edge node[ ]{$x > 0$} (l2_0)
edge node[swap]{$x \leq 0$ } (l3_0)
(l2_0) edge node[swap,anchor=east]{$y \leq 0, x := x-1$} (l1_1)
edge node[anchor=west]{$y > 0, y:=y-1$ } (l1_2)
% (l2) edge node[swap]{$x \leq 0$} (l3')
% edge node[] {$x > 0, x := x+1$ } (l3)
;
% \path[dotted,->]
% (l3) edge (l4')
% edge (l4)
;
\end{tikzpicture}
% TeX root = ../main.tex
\todo{unfinished}
\begin{tikzpicture}[program]
\node[state,initial] (l0) {$l_0$};
\node[state] (l1) [below=of l0] {$l_1$};
\node[state] (l2) [right=of l1] {$l_2$};
\node[state] (l3) [right=of l2] {$l_3$};
\node[state] (l4) [below=of l1] {$l_4$};
\path[->] (l0) edge node[swap] {$\textit{true}$} (l1)
(l1) edge node[swap] {$x > 0$} (l2)
edge node[swap] {$x \leq 0$} (l4)
(l2) edge node[swap] {$y > 0$} (l3)
edge node[swap] { \begin{aligned}
y &\leq 0 \\
x &:= x-1 \\
z &:= z+2 \\
\end{aligned}} (l1)
(l3) edge node[swap] { \begin{aligned}%
x &:= x+y \\
y &:= y-1
\end{aligned}} (l3);
\end{tikzpicture}
% TeX root = ../main.tex
\begin{tikzpicture}[program,initial above,font=\scriptsize]
\node[state,initial] (l0) {$l_0$};
\node[state] (l1) [below=2cm of l0] {$l_1$};
\node[state] (l1a) at ($(l1) - (3,1.5)$) {$l_{1a}$};
\node[state] (l2) [below=3cm of l1] {$l_2$};
\path[->]
(l0) edge node { $t_0: \eta(x) = u $ } (l1)
(l1) edge [out=140,in=120,looseness=2,draw=rot-75]
node[below=0.3cm,pos=0.35] {$t_{1a}:\begin{aligned} \tau = &1 \leq x \leq 3
\\ &\Land w = 0 \\ \eta(x) &= x + 1\end{aligned}$} (l1a)
(l1) edge [out=130,in=130,looseness=2,draw=rot-75] node[above,pos=0.4] {$t_{1b}:\begin{aligned} \tau = &1 \leq x \leq 3
\\ &\Land w = 0 \\ \eta(x) &= x + 1\end{aligned}$} (l1a)
(l1a) edge [out=150,in=210,looseness=10] node[swap]{$\begin{aligned}
t_{1b}:& \\ \tau = &1 \leq x \leq 3
\\ &\Land w = 0 \\ \eta(x) &= x + 1\end{aligned}$} (l1a)
(l1a) edge [bend right=15] node[swap] {$t_{2b}: \begin{aligned}\tau &= y > 0 \\
&\Land w = 1\end{aligned}$} (l2)
(l1) edge[bend left =20] node {$\begin{aligned}t_{2a}:& \\ \tau =& y > 0 \\
\Land& w = 1\end{aligned}$} (l2)
(l2) edge[bend left=20] node {$\begin{aligned}&t_3:\\ &\eta(y) = y - 1\end{aligned}$} (l1)
% (l1) edge node {$t_4:x \leq 0$} (l2)
% edge[loop above] node {$t_2:\begin{aligned} x &> 1 \\ x&:=
% x+1\end{aligned}$} ()
% edge[loop below] node {$t_3:\begin{aligned} x &> 0 \\ x&:=
% x-1\end{aligned}$} ()
;
\end{tikzpicture}
% TeX root = ../main.tex
\begin{tikzpicture}[program]
\node[state,initial] (l0) {$l_0$};
\node[state] (l1) [below = of l0] {$l_1$};
\node[state] (l2) [right= 3cm of l1] {$l_2$};
\node[state] (l3) [below = of l1] {$l_3$};
\path[->] (l0) edge node[swap] {$\textit{true}$} (l1)
(l1) edge node {$x > 0$} (l2)
edge node[swap] {$x \leq 0$} (l3)
(l2) edge[bend left=45]
node[anchor=north] {$t:\begin{aligned}y &> 0\\ y&:= y-1\end{aligned}$} (l1)
edge[bend right=45]
node[anchor=south] {$\begin{aligned}y &\leq 0\\ x&:=x-1\end{aligned}$} (l1)
;
\end{tikzpicture}
% TeX root = ../main.tex
\begin{tikzpicture}[program]
\node[state,initial] (l0) {$l_0$};
\node[state] (l1) [below=of l0] {$l_1$};
\node[state] (l2) [right=of l1] {$l_2$};
\node[state] (l3) [right=of l2] {$l_3$};
\node[state] (l4) [below=of l1] {$l_4$};
\path[->] (l0) edge node[swap] {$\textit{true}$} (l1)
(l1) edge node[swap] {$x > 0$} (l2)
edge node[swap] {$x \leq 0$} (l4)
(l2) edge node[swap] {$y > 0$} (l3)
edge node[swap] { \begin{aligned}
y &\leq 0 \\
x &:= x-1 \\
z &:= z+2 \\
\end{aligned}} (l1)
(l3) edge node[swap] { \begin{aligned}%
x &:= x+y \\
y &:= y-1
\end{aligned}} (l3);
\end{tikzpicture}
% TeX root = ../main.tex
\begin{tikzpicture}[tree]
\node[state] (l1) [below=1.2cm of l0] {$\langle l_1, \textit{true}\rangle$};
\node[state] (l4) [below right=of l3] {};
\node[state] (l4') [below left=of l3] {};
(l2) edge[gray]node[gray,swap]{$x \leq 0$} (l3')
;
\path[dotted,->]
(l3) edge[gray] (l4')
edge (l4)
;
\end{tikzpicture}
edge node[] {$\tau = x > 0; \eta(x) = x+1$ } (l3)
\path[->] (l0) edge node {$\tau = \textit{true}$} (l1)
(l1) edge node[swap]{$\tau = x \leq 0$} (l2')
edge node {$\tau = x > 0; \eta(x) = x+1$ } (l2)
\node[state] (l2) [below right=of l1] {$\langle \ell_1, x > 1 \rangle$};
\node[state] (l2') [below left=of l1] {$\langle \ell_2, x \leq 0\rangle$};
\node[state] (l3) [below right=of l2] {$\langle \ell_1, x > 2 \rangle$};
\node[state] (l3') [below left=of l2,gray]{$\langle \ell_2, \textit{false}\rangle$};
\node[state,initial] (l0) {$\langle \ell_0, \textit{true}\rangle$};
% TeX root = ../main.tex
\begin{tikzpicture}[program]
edge [loop right] node {%
$t_2: \begin{aligned}
\end{aligned}$ } ()
;
\end{tikzpicture}
\tau &= x > 0 \\ \eta(x)&= x+1
\path[->] (l0) edge node[swap] {$t_1: \tau = \texttt{true}$} (l1)
(l1) edge node[swap] {$t_3: \tau = x \leq 0$} (l2)
\node[state,initial] (l0) {$\ell_0$};
\node[state] (l1) [below = of l0] {$\ell_1$};
\node[state] (l2) [below = of l1] {$\ell_2$};
% TeX root = ../main.tex
\begin{tikzpicture}[program,initial above,font=\scriptsize]
\path[->]
(l1) edge [out=110,in=150,looseness=10,draw=rot-75]
\Land w = 0\end{aligned}$} (l1)
(l1) edge [out=160,in=200,looseness=10,draw=rot-75]
\Land w = 1\end{aligned}$} (l2)
(l2) edge[bend left=30] node {$t_{3}:\begin{aligned}
\end{aligned}$} (l1)
% (l2) edge[bend left=30,draw=rwth-75] node[swap] {$t_{3a}: p=0.5$} (l1)
% (l1) edge node {$t_4:x \leq 0$} (l2)
% edge[loop above] node {$t_2:\begin{aligned} x &> 1 \\ x&:=
% x+1\end{aligned}$} ()
% edge[loop below] node {$t_3:\begin{aligned} x &> 0 \\ x&:=
% x-1\end{aligned}$} ()
;
\end{tikzpicture}
p&=0.5 \\ \eta_3(y) &= y - \textsc{Bern}(0.5)
node[swap]{$ t_{1b}: \begin{aligned}p&=0.5 \\ \tau_1&= 1 \leq x \leq 3
\Land w = 0 \\ \eta_{1b}(x)&= x + 1\end{aligned}$} (l1)
(l1) edge[bend left =30] node {$t_{2}:\begin{aligned}\tau_2 =& y > 0
node[swap]{$t_{1a}:\begin{aligned} p&=0.5 \\ \tau_1&= 1 \leq x \leq 3
(l0) edge node { $t_0: \eta_0(x) = u $ } (l1)
\node[state,initial] (l0) {$\ell_0$};
\node[state] (l1) [below=2cm of l0] {$\ell_1$};
\node[state] (l2) [below=3cm of l1] {$\ell_2$};
% TeX root = ../main.tex
\begin{tikzpicture}[tree,
font=\scriptsize,
every state/.append style={rectangle,rounded corners=10}]
\node[state,initial] (l0) {$\langle \ell_0, \texttt{true}\rangle$};
\node[state] (l1) [below=1.2cm of l0] {$\langle l_1, \textit{true}\rangle$};
\node[state] (l2) [below right=of l1] {$\langle \ell_1, x > 1 \rangle$};
\node[state] (l2') [below left=of l1] {$\langle \ell_2, x \leq 0\rangle$};
\node[state] (l3) [below right=of l2] {$\langle \ell_1, x > 2 \rangle$};
\node[state] (l3') [below left=of l2,gray]{$\langle \ell_2, \texttt{false}\rangle$};
\node[] (l4) [below right=of l3] {};
\node[] (l4') [below left=of l3] {};
\path[->] (l0) edge node {$\tau = \texttt{true}$} (l1)
(l1) edge node[swap]{$\tau = x \leq 0$} (l2')
edge node {$\tau = x > 0; \eta(x) = x+1$ } (l2)
(l2) edge[gray]node[gray,swap]{$x \leq 0$} (l3')
edge node[] {$\tau = x > 0; \eta(x) = x+1$ } (l3)
;
\path[dotted,->]
(l3) edge[gray] (l4')
edge (l4)
;
\end{tikzpicture}
% TeX root = ../main.tex
\begin{tikzpicture}[program,font=\scriptsize]
\node[state,initial] (l0) {$\ell_0$};
\node[state] (l1) [below = of l0] {$\ell_1$};
\node[state] (l2) [below = of l1] {$\ell_2$};
\path[->] (l0) edge node[swap] {$t_1: \tau = \texttt{true}$} (l1)
(l1) edge node[swap] {$t_3: \tau = x \leq 0$} (l2)
edge [loop right] node {%
$t_2: \begin{aligned}
\tau &= x > 0 \\ \eta(x)&= x+1
\end{aligned}$ } ()
;
\end{tikzpicture}
% TeX root = ../main.tex
\begin{tikzpicture}[program,initial above,font=\scriptsize]
\node[state,initial] (l0) {$\ell_0$};
\node[state] (l1) [below=2cm of l0] {$\ell_1$};
\node[state] (l2) [below=3cm of l1] {$\ell_2$};
\path[->]
(l0) edge node { $t_0: \eta_0(x) = u $ } (l1)
(l1) edge [out=110,in=150,looseness=10,draw=rot-75]
node[swap]{$t_{1a}:\begin{aligned} p&=0.5 \\ \tau_1&= 1 \leq x \leq 3
\Land w = 0\end{aligned}$} (l1)
(l1) edge [out=160,in=200,looseness=10,draw=rot-75]
node[swap]{$ t_{1b}: \begin{aligned}p&=0.5 \\ \tau_1&= 1 \leq x \leq 3
\Land w = 0 \\ \eta_{1b}(x)&= x + 1\end{aligned}$} (l1)
(l1) edge[bend left =30] node {$t_{2}:\tau_2 = y > 0
\Land w = 1$} (l2)
(l2) edge[bend left=30] node {$t_{3}:\begin{aligned}
p&=0.5 \\ \eta_3(y) &= y - \textsc{Bern}(0.5)
\end{aligned}$} (l1)
% (l2) edge[bend left=30,draw=rwth-75] node[swap] {$t_{3a}: p=0.5$} (l1)
% (l1) edge node {$t_4:x \leq 0$} (l2)
% edge[loop above] node {$t_2:\begin{aligned} x &> 1 \\ x&:=
% x+1\end{aligned}$} ()
% edge[loop below] node {$t_3:\begin{aligned} x &> 0 \\ x&:=
% x-1\end{aligned}$} ()
;
\end{tikzpicture}
% TeX root = ../main.tex
\begin{tikzpicture}[tree,initial above,font=\scriptsize,
every state/.append style={rectangle,rounded corners=10}]
\node[state,initial] (l0) {$\langle\ell_0,\texttt{true}\rangle$};
\node[state] (l1) [below=3cm of l0] {$\langle\ell_1,\texttt{true}\rangle$};
\node[state] (l2) [below=3cm of l1] {$\langle\ell_2,\texttt{true}\rangle$};
\path[->]
(l0) edge node { $t_0: \eta_0(x) = u $ } (l1)
(l1) edge [out=110,in=150,looseness=6,draw=rot-75]
node[swap]{$t_{1a}:\begin{aligned} p&=0.5 \\ \tau_1&= 1 \leq x \leq 3
\Land w = 0\end{aligned}$} (l1)
(l1) edge [out=160,in=200,looseness=6,draw=rot-75]
node[swap]{$ t_{1b}: \begin{aligned}p&=0.5 \\ \tau_1&= 1 \leq x \leq 3
\Land w = 0 \\ \eta_{1b}(x)&= x + 1\end{aligned}$} (l1)
(l1) edge[bend left =30] node {$t_{2}:\begin{aligned}\tau_2 =& y > 0
\Land w = 1\end{aligned}$} (l2)
(l2) edge[bend left=30] node {$t_{3}:\begin{aligned}
p&=0.5 \\ \eta_3(y) &= y - \textsc{Bern}(0.5)
\end{aligned}$} (l1)
;
\end{tikzpicture}
% TeX root = ../main.tex
\begin{tikzpicture}[program,initial above,font=\scriptsize,
every state/.append style={rectangle,rounded corners=10},
]
\node[state,initial] (l0t) {$\langle \ell_0, \texttt{true} \rangle$};
\node[state] (l1t) [below=2cm of l0t] {$\langle \ell_1, \texttt{true} \rangle$};
\node[state] (l1p134) [below=2cm of l1t] {$\langle \ell_1, \lbrace \psi_1, \psi_3, \psi_4 \rbrace \rangle$};
\node[state] (l1p124) [right=4cm of l1p134] {$\langle \ell_1, \lbrace \psi_1, \psi_2, \psi_4 \rbrace \rangle$};
\node[state] (l2py) [left=4cm of l1p134] {$\langle \ell_2, y > 0 \rangle$};
\node[state] (l2p134y) [below=2cm of l1p134] {$\langle \ell_2, \psi_1 \land \psi_3 \land \psi_4 \land y>0 \rangle$};
\node[state] (l2p124y) [right=4cm of l2p134y] {$\langle \ell_2, \psi_1 \land \psi_2 \land \psi_4 \land y>0 \rangle$};
\node[state] (l1p56) [left=4cm of l2p1345] {$\langle \ell_1, \lbrace \psi_5, \psi_6 \rbrace \rangle$};
\node[state,] (l1p13456) [below=2cm of l2p1345] {$\langle \ell_1, \lbrace \psi_1, \psi_3, \psi_4, \psi_5, \psi_6 \rbrace \rangle$};
\node[state,] (l1p12456) [right=4cm of l1p13456] {$\langle \ell_1, \lbrace \psi_1, \psi_2, \psi_4, \psi_5, \psi_6 \rbrace \rangle$};
% Neu in Bild 2
\node[state] (l2p13456y) [below=2.5cm of l1p13456] {$\left\langle\ell_2,\begin{aligned} \psi_1 &\land \psi_3 \land \psi_4 \land\\ \psi_5 &\land \psi_6 \land y > 0 \end{aligned}\right\rangle$};
\node[state] (l2p56y) [below=3cm of l1p56] {$\langle \ell_2, \psi_5 \land \psi_6 \land y > 0 \rangle$};
\node[state] (l2p12456y) [right=4cm of l2p13456y] {$\left\langle\ell_2, \begin{aligned}\psi_1 &\land \psi_2 \land \psi_4 \land \\ \psi_5 &\land \psi_6 \land y > 0 \end{aligned}\right\rangle$};
% Transitions
\path[->]
(l0t) edge[] node (t01) { $t_{0,1}$ } (l1t)
(l1t) edge[] node[swap] (t21) { $t_{2,1}$ } (l2p5)
(l1t) edge[bend right=20] node[swap] (t1a1) { $t_{1a,1}$ } (l1p134)
(l1t) edge[] node (t1b1) { $t_{1b,1}$ } (l1p124) %FEHLT IN DER SKIZZE
(l2py) edge[] node[swap] (t31) { $t_{3,1}$ } (l1p56)
(l1p134) edge[bend right=10] node[swap] (t1b2) { $t_{1b,2}$ } (l1p124)
(l1p134) edge[loop above,transform canvas={xshift=5mm}] node[swap] (t1a2) { $t_{1a,2}$ } ()
(l1p134) edge[] node (t22) { $t_{2,2}$ } (l2p134y)
(l1p124) edge[bend right=10] node[swap] (t1a3) { $t_{1a,3}$ } (l1p134)
(l1p124) edge[loop above] node (t1b3) { $t_{1b,3}$ } ()
(l1p124) edge[] node (t23) { $t_{2,3}$ } (l2p124y)
(l1p56) edge[bend right=30] node (t1a6) { $t_{1a,6}$ } (l1p13456)
(l1p56) edge[out=290,in=205] node[swap,pos=.40] (t1b6) { $t_{1b,6}$ } (l1p12456)
(l2p134y) edge[] node[swap] (t32) { $t_{3,2}$ } (l1p13456)
(l2p124y) edge[] node[swap] (t33) { $t_{3,3}$ } (l1p12456)
(l1p13456) edge[loop above,transform canvas={xshift=12mm}] node[swap] (t1a4) { $t_{1a,4}$ } ()
(l1p12456) edge[loop above,transform canvas={xshift=12mm}] node[swap] (t1b5) { $t_{1b,5}$ } ()
(l1p13456) edge[bend left=10] node (t1b4) { $t_{1b,4}$ } (l1p12456)
(l1p12456) edge[bend left=10] node (t1a5) { $t_{1a,5}$ } (l1p13456)
% (l1p56) edge[rot, bend right=20] node[swap] (t25) { $t_{2,5}$ } (l2p5)
(l1p56) edge[bend left=15,transform canvas={xshift=-4mm}] node[] (t26) { $t_{2,6}$ } (l2p56y)
(l2p56y) edge[bend left=15,transform canvas={xshift=-4mm}] node[] (t36) { $t_{3,6}$ } (l1p56)
% (l1p13456) edge[bend right=20] node[swap] (t24) { $t_{2,4}$ } (l2p1345)
(l1p13456) edge[bend left=20] node[pos=.55] (t26) { $t_{2,4}$ } (l2p13456y)
(l2p13456y) edge[bend left=20] node[pos=.45] (t34) { $t_{3,4}$ } (l1p13456)
% (l1p12456) edge[bend right=20] node[swap] (t26) { $t_{2,6}$ } (l2p1245)
(l1p12456) edge[bend right=20] node[swap] (t25) { $t_{2,5}$ } (l2p12456y)
(l2p12456y) edge[bend right=20] node[swap] (t35) { $t_{3,5}$ } (l1p12456)
;
\end{tikzpicture}
% TeX root = ../main.tex
\begin{tikzpicture}[program,initial above,font=\scriptsize,
every state/.append style={rectangle,rounded corners=10},
every initial by arrow/.style={grun}
]
\node[state,initial,grun] (l0t) {$\langle \ell_0, \texttt{true} \rangle$};
\node[state,grun] (l1t) [below=2cm of l0t] {$\langle \ell_1, \texttt{true} \rangle$};
\node[state,rwth] (l1p134) [below=2cm of l1t] {$\langle \ell_1, \lbrace \psi_1, \psi_3, \psi_4 \rbrace \rangle$};
\node[state,rwth] (l1p124) [right=4cm of l1p134] {$\langle \ell_1, \lbrace \psi_1, \psi_2, \psi_4 \rbrace \rangle$};
\node[state,rwth] (l2p5) [left=4cm of l1p134] {$\langle \ell_2, \lbrace \psi_5 \rbrace \rangle$};
\node[state,rot] (l2p1345) [below=2cm of l1p134] {$\langle \ell_2, \lbrace \psi_1, \psi_3, \psi_4, \psi_5 \rbrace \rangle$};
\node[state,rot] (l2p1245) [right=4cm of l2p1345] {$\langle \ell_2, \lbrace \psi_1, \psi_2, \psi_4, \psi_5 \rbrace \rangle$};
\node[state,rot] (l1p56) [left=4cm of l2p1345] {$\langle \ell_1, \lbrace \psi_5, \psi_6 \rbrace \rangle$};
\node[state,] (l1p13456) [below=2cm of l2p1345] {$\langle \ell_1, \lbrace \psi_1, \psi_3, \psi_4, \psi_5, \psi_6 \rbrace \rangle$};
\node[state,] (l1p12456) [right=4cm of l1p13456] {$\langle \ell_1, \lbrace \psi_1, \psi_2, \psi_4, \psi_5, \psi_6 \rbrace \rangle$};
% Transitions
\path[->]
(l0t) edge[grun] node (t01) { $t_{0,1}$ } (l1t)
(l1t) edge[rwth] node[swap] (t21) { $t_{2,1}$ } (l2p5)
(l1t) edge[rwth,bend right=20] node[swap] (t1a1) { $t_{1a,1}$ } (l1p134)
(l1t) edge[rwth] node (t1b1) { $t_{1b,1}$ } (l1p124)
(l2p5) edge[rot, bend right=20] node[swap] (t31) { $t_{3,1}$ } (l1p56)
(l1p134) edge[rot] node (t22) { $t_{2,2}$ } (l2p1345)
(l1p134) edge[rot, bend right=10] node[swap] (t1b2) { $t_{1b,2}$ } (l1p124)
(l1p134) edge[rot,loop above,transform canvas={xshift=5mm}] node[swap] (t1a2) { $t_{1a,2}$ } ()
(l1p134) edge[rot] node (t22) { $t_{2,2}$ } (l2p1345)
(l1p124) edge[rot, bend right=10] node[swap] (t1a3) { $t_{1a,3}$ } (l1p134)
(l1p124) edge[rot,loop above] node (t1b3) { $t_{1b,3}$ } ()
(l1p124) edge[rot] node (t23) { $t_{2,3}$ } (l2p1245)
(l1p56) edge[rot, bend right=20] node[swap] (t25) { $t_{2,5}$ } (l2p5)
(l1p56) edge[bend right=30] node (t1a5) { $t_{1a,5}$ } (l1p13456)
(l1p56) edge[out=270,in=210] node[swap] (t1b5) { $t_{1b,5}$ } (l1p12456)
(l2p1345) edge[bend right=20] node[swap] (t32) { $t_{3,2}$ } (l1p13456)
(l1p13456) edge[bend right=20] node[swap] (t24) { $t_{2,4}$ } (l2p1345)
(l2p1245) edge[bend right=20] node[swap] (t33) { $t_{3,3}$ } (l1p12456)
(l1p12456) edge[bend right=20] node[swap] (t26) { $t_{2,6}$ } (l2p1245)
(l1p13456) edge[loop above,transform canvas={xshift=12mm}] node[swap] (t1a4) { $t_{1a,4}$ } ()
(l1p12456) edge[loop above,transform canvas={xshift=12mm}] node[swap] (t1b6) { $t_{1b,6}$ } ()
(l1p13456) edge[bend left=10] node (t1b4) { $t_{1b,4}$ } (l1p12456)
(l1p12456) edge[bend left=10] node (t1a6) { $t_{1a,6}$ } (l1p13456)
;
% General transitions
\node[grun] [right=1cm of t01] {$G_1$};
\node[rwth] [above right=1cm of t1b1] {$G_2$};
\node[rot] [right=1cm of t23] {$G_3$};
\end{tikzpicture}
% TeX root = ../main.tex
\begin{tikzpicture}[program,initial above,font=\scriptsize,
every state/.append style={rectangle,rounded corners=10},
]
\node[state,initial] (l0t) {$\langle \ell_0, \texttt{true} \rangle$};
\node[state] (l1t) [below=2cm of l0t] {$\langle \ell_1, \texttt{true} \rangle$};
\node[state] (l2t) [below=2cm of l1t] {$\langle \ell_2, \texttt{true} \rangle$};
\node[state,grun] (l1p134) [right=4cm of l1t] {$\langle \ell_1, \lbrace \psi_1, \psi_3, \psi_4 \rbrace \rangle$};
\node[state,grun] (l1p124) [right=4cm of l2t] {$\langle \ell_1, \lbrace \psi_1, \psi_2, \psi_4 \rbrace \rangle$};
% Transitions
\path[->]
(l0t) edge[] node (t0) { $t_{0}$ } (l1t)
(l1t) edge[bend left=20] node (t1) { $t_{2}$ } (l2t)
(l2t) edge[bend left=20] node (t2) { $t_{3}$ } (l1t)
(l1t) edge[grun, bend left=10] node (t1a1) { $t_{1a,1}$ } (l1p134)
(l1t) edge[grun, bend left=10] node[pos=0.35] (t1b1) { $t_{1b,1}$ } (l1p124)
(l1p134) edge[rot, bend left=10] node[pos=0.65] (t21) { $t_{2,1}$ } (l2t)
(l1p124) edge[rot, bend left=10] node (t22) { $t_{2,2}$ } (l2t)
(l1p134) edge[rot,loop above] node (t1a2) { $t_{1a,2}$ } ()
(l1p124) edge[rot,loop below] node (t1b3) { $t_{1b,3}$ } ()
(l1p134) edge[rot, bend right=20,transform canvas={xshift=5mm}] node[swap] (t1b2) { $t_{1b,2}$ } (l1p124)
(l1p124) edge[rot, bend right=20,transform canvas={xshift=5mm}] node[swap] (t1a3) { $t_{1a,3}$ } (l1p134)
;
% General transitions
\node[] [below right=1cm of l0t] {$G_0$};
\node[grun] [above left=1cm of l1p134] {$G_1$};
\node[rot] [below right=2cm of l1p134] {$G_2$};
\end{tikzpicture}
\gls{koat}s analysis goal is to find tight upper bounds for \glspl{pip}. The
analysis techniques used such as \acrfull{mprf}, have their limits and cannot
always find tight bounds for every program, which motivates control-flow
refinement in order to find an equivalent program to the original one, that
opens the possibility for the analysis to find tighter runtime-complexity
The goal of \gls{koat}'s analysis is to find tight upper bounds for \glspl{pip}.
The analysis techniques used, such as \acrfullpl{mprf}, have their limits and
cannot always find tight bounds for every program. This motivates control-flow
refinement in order to find a program that is equivalent to the original one,
which opens the possibility for the analysis to find tighter runtime-complexity
Naturally the control-flow refinement will add and replace transitions and
locations, hence the number runtime-complexity bounds and size-bounds of each
transition won't be preserved. However we will show, that each transition in the
refined program is a similar to a transition in the original program and hence
every run in the new program has a run of equal runtime-complexity and
probability in the original program and vice-versa.
Naturally, the control-flow refinement will add and replace transitions and
locations. We will show that each transition in the refined program is similar
to a transition in the original program and consequently every run in the new
program has a run of equal run~time complexity and probability in the original
program and vice-versa.
evaluation technique presented by
\citeauthor{Domenech19}\cite{Domenech19} for \acrlong{pip}. The
partial evaluation will be constructed in a similar way but explicitly taking
probability and general transitions into account.
evaluation technique presented by \citeauthor{Domenech19} \cite{Domenech19} for
\glspl{pip}. The partial evaluation will be constructed in a similar way but
explicitly taking probability and general transitions into account.
The construction is done in three steps. First the single evaluation step
(unfolding) of transitions in a probabilistic program is presented. Second, a
possibly infinite evaluation graph is constructed that will finally be collapsed
into a finite evaluation graph by the use of an abstraction layer.
The construction is done in three steps. First, an unfolding operation is
presented. Second, an evaluation step is presented, which uses the unfolding.
Third, the evaluation step is repeated until a fix-point is found. The unfolding
and evaluation will be adapted multiple times in order to 1. find finite
partial evaluation graphs, and 2. evaluate only the relevant parts of the
program.
Afterwards we will show that the partial evaluation is sound for \gls{pip} and
prove that the expected runtime-complexity is preserved and thus also the
Afterwards, we will show that the partial evaluation is sound for \glspl{pip}
and prove that the expected runtime complexity is preserved and thus also the
\{t_2\}$, and assigns the value for $w$ accordingly.
The program can alter between the loops non-deterministically after every
iteration. Since both loops are independent of one another, the
non-determinism doesn't change anything about the expected
runtime-complexity of the program.
On the general transition $g_1 = \{t_{1a}, t_{1b}\}$ at location $\ell_1$
the program branches probabilistically, with a probability of $p=0.5$. On
the transition $t_3$, the value with which $y$ is decremented is sampled
\{t_2\}$, and assigns the value for $w$ accordingly. The program can
alternate between the loops non-deterministically after every iteration.
Since both loops are independent of one another, the non-determinism does
not change anything about the expected runtime complexity of the program. On
the general transition $g_1 = \{t_{1a}, t_{1b}\}$ at location $\ell_1$ the
program branches probabilistically, with a probability of $p=0.5$. On the
transition $t_3$, the value with which $y$ is decremented is sampled
impossible: First, the number of starting states is infinite; second the program
is not guaranteed to terminate and hence the simulation of a specific run might
not terminate either.
For non-deterministic and probabilistic programs, the problem gets even more
complex, since the transition taken does not only depend on the current state
impossible: First, the number of starting states is infinite, and second the
program is not guaranteed to terminate, hence the simulation of a specific run
might not terminate either.
For non-deterministic and probabilistic programs, the problem becomes even more
complex, since the transitions taken do not only depend on the current state
simulating every single possible run, it would be nice to treat \emph{similar}
states as one. Instead of simulating a single assignment, the set of all
possible assignments after a step is described by a set of constraints. The
resulting graph is called a \emph{partial evaluation graph}. It was formalized
for \glspl{chc} by \citeauthor{gallagher2019eptcs}\cite{gallagher2019eptcs}. In
this thesis the definitions are adapted to better fit the algorithm and the
formalism of \Glspl{pip}.
simulating every single possible run, it would be more efficient to treat
\emph{similar} states as one. Instead of simulating a single assignment, the set
of all possible assignments after a step is described by a set of constraints.
The resulting graph is called a \emph{partial evaluation graph}. It was
formalised for \glspl{chc} by
\citeauthor{gallagher2019eptcs} \cite{gallagher2019eptcs}. In this thesis, the
definitions are adapted to better fit the algorithm and the formalism of
\Glspl{pip}.
\ell, \texttt{true}\rangle \in \vers_\Prog$. Obviously, the lift of a program is
not useful for the analysis, as it doesn't capture any interesting properties of
the program, besides what is already known in the original program. In the
following we will present a construction for a partial evaluation graph using
unfolding and abstraction.
\ell, \texttt{true}\rangle \in \vers_\Prog$.
\begin{definition}
We define the lift $G\lift\in \PEG$ of a graph $G \in \G$ denoted as
\begin{equation*}
G\lift = (\set{\langle\ell,\texttt{true}\rangle}{\ell \in V(G)},
\set{(\langle\ell,\texttt{true}\rangle,p,\tau,\eta,\langle\ell',
\texttt{true})}{t \in E(G)})
\end{equation*}
\end{definition}
\begin{example}\label{ex:lift}
The lift of the graph $G_{\Prog_3}$ of the program $\Prog_3$ is displayed in
\fref{fig:ex_lift}. It has the very same structure as the original program.
\begin{figure}[ht]
\centering
\input{figures/ch3_p3_lift}
\caption{Lift of the graph $G_{\Prog_3}$ of the program
$\Prog_3$.\label{fig:ex_lift}}
\end{figure}
\end{example}
Obviously, the lift of a program is not useful for the analysis, as it does not
capture any interesting properties of the program, besides what is already
known in the original program. In the following we will present a construction
for a partial evaluation graph using unfolding and abstraction.
probabilistic sampling is over-approximated with an non-probabilistic update
$\tilde{\eta}: \PV \rightarrow Z[\V]$ and a constraint denoted as
probabilistic sampling is over-approximated with a non-probabilistic update
$\tilde{\eta}: \PV \rightarrow \Z[\V]$ and a constraint denoted as
We define a function $\evaluate_\Prog(T) : \G_\Prog \rightarrow \G_\Prog $ that
unfolds all versions $\langle \ell, \varphi \rangle$ in the graph with each
outgoing transitions $t \in \TPout(\ell)$.
We define a function $\evaluate_\Prog : \G_\Prog \rightarrow \G_\Prog $ that
unfolds all versions $\langle \ell, \varphi \rangle \in V(G)$ in the graph $G
\in \G_\Prog$ with each outgoing transition $t \in \TPout(\ell)$.
\input{figures/ch3_example1_program.tex}
\caption{A Program $\Prog$\label{fig:ch3_inf_prog}}
\input{figures/ch3_p4_program.tex}
\caption{A Program $\Prog_4$\label{fig:ch3_inf_prog}}
\input{figures/ch3_example1_tree.tex}
\caption{Extract of the infinite evaluation graph of
$\Prog$\label{fig:ch3_inf_tree}}
\input{figures/ch3_p4_tree.tex}
\caption{Extract of the infinite evaluation \\graph of $\Prog_4$\label{fig:ch3_inf_tree}}
\begin{lemma}
Let $\alpha: \Phi \rightarrow \A$ be an abstraction function over a finite
abstraction space $\A\subset\C$. The partial evaluation graph $G_\alpha^*$
exists and is finite.
% \begin{lemma}
% Let $\alpha: \Phi \rightarrow \A$ be an abstraction function over a finite
% abstraction space $\A\subset\C$. The partial evaluation graph $G_\alpha^*$
% exists and is finite.
\begin{proof}
The abstraction $\alpha$ can map to only finitely many values in the
finite abstraction space $\A$. Every evaluation step adds only versions
from a reduced and finite set of version $\Loc_\Prog \times \A \subset
\vers_\Prog$. Hence the set of vertices of partial evaluation graph
equally finite. A transition is added after a version $\langle \ell,
\_\rangle$ for every outgoing transitions $t \in \TPout(\ell)$ only.
Since $\TPout(\ell) \subseteq \T_\Prog$ is finite, only finitely many
transitions can exist between any two versions.
The graph grows with every evaluation step, hence the limit exists and
it must be reached in a finite number of evaluation steps.
\end{proof}
\end{lemma}
% \begin{proof}
% The abstraction $\alpha$ can map to only finitely many values in the
% finite abstraction space $\A$. Every evaluation step adds only versions
% from a reduced and finite set of version $\Loc_\Prog \times \A \subset
% \vers_\Prog$. Hence the set of vertices of partial evaluation graph
% equally finite. A transition is added after a version $\langle \ell,
% \_\rangle$ for every outgoing transitions $t \in \TPout(\ell)$ only.
% Since $\TPout(\ell) \subseteq \T_\Prog$ is finite, only finitely many
% transitions can exist between any two versions.
% The graph grows with every evaluation step, hence the limit exists and
% it must be reached in a finite number of evaluation steps.
% \end{proof}
% \end{lemma}
\psi_1 = 1 \leq x, \hspace{1cm} \neg\psi_1 = 1 > x\\
\psi_2 = 2 \leq x, \hspace{1cm} \neg\psi_2 = 2 > x\\
\psi_3 = x \leq 3, \hspace{1cm} \neg\psi_3 = x > 3\\
\psi_4 = x \leq 4, \hspace{1cm} \neg\psi_4 = x > 4\\
\psi_5 = y > 0,\hspace{1cm} \neg\psi_5 = y \leq 0\\
\psi_6 = y \geq 0,\hspace{1cm} \neg\psi_6 = y < 0\\
\A = \{\psi_1,\neg\psi_1,\dots,\psi_6,\neg\psi_6\}
\begin{aligned}
\psi_1 &= 1 \leq x, \hspace{1cm} \neg\psi_1 = 1 > x,\\
\psi_2 &= 2 \leq x, \hspace{1cm} \neg\psi_2 = 2 > x,\\
\psi_3 &= x \leq 3, \hspace{1cm} \neg\psi_3 = x > 3,\\
\psi_4 &= x \leq 4, \hspace{1cm} \neg\psi_4 = x > 4,\\
\psi_5 &= y > 0,\hspace{1cm} \neg\psi_5 = y \leq 0,\\
\psi_6 &= y \geq 0,\hspace{1cm} \neg\psi_6 = y < 0, \text{ and}
\end{aligned}\\
\A_\Psi =
\{\psi_1,\neg\psi_1,\dots,\psi_6,\neg\psi_6,\texttt{true},\texttt{false}\}.
partial evaluation graph is shown in \fref{fig:ex_prob_fpe}.\todo{update
figure}
\begin{figure}
partial evaluation graph is shown in \fref{fig:ex_prob_fpe}.
\begin{figure}[ht]
evaluation step, and just show the final evaluation graph. It looks very
similar to before, but we saved some entailment checks on location $\ell_2$,
in return the versions at $\ell_2$ are not collapsed anymore.
evaluation step, and just show the final evaluation graph in
\fref{fig:ex_localized_pba} It looks very similar to before, but we saved
some entailment checks on location $\ell_2$, in return the versions at
$\ell_2$ are not collapsed anymore.
\begin{figure}[ht]
\centering
\input{figures/ch3_p3_full_pe_localized}
\caption{Partial evaluation of $\Prog_3$ with a localized property based
abstraction $S_{\bot,\Psi_{\ell_1},\bot}$.\label{fig:ex_localized_pba}}
\end{figure}
The property based abstraction crafted in Example \ref{ex:localized_abstr} was
Clearly, a global abstraction function is a special case of a localized
abstraction function, where every location is abstracted with the same
abstraction function. How to select properties for the localized property-based
abstraction is discussed in \Cref{ch:implementation}.
The property based abstraction crafted in Example \ref{ex:localized_pba} was
per loop in the original program. This is formalized by the following lemma.
per loop in the original program. This is formalized by the lemma
\ref{lem:localized_pba_finite}. But before, we will adapt the evaluation one
last time. Instead of evaluating the entire program, the evaluation will unfold
only transitions in a given component $T$. The component is not required to be
connected. How to select the component for best results ins discussed in
\Cref{ch:implementation}.
\begin{lemma}
Let $S_\alpha$ be a dynamic abstraction function. The partial evaluation
graph $G_{S_\alpha}^*$ is finite if every loop $L$ in $\Prog$ contains a
location $\ell \in L$ such that
$\set{S_\alpha(\langle\ell,\varphi\rangle)}{\varphi \in \Phi}$ is finite.
\begin{definition}[Component-level Evaluation]
Let $\Prog$ be a \gls{pip} and $T \subseteq \T_\Prog$ be a component of
$\Prog$. For a localized abstraction $S : \vers_\Prog \rightarrow \C$ an
evaluation step $\evaluate_{\Prog,S,T} : \G_\Prog \rightarrow \G_\Prog$ is
defined as
\begin{proof}
\todo{Proof}
\end{proof}
\end{lemma}
\begin{align*}
\evaluate_{\Prog,S}(G) = G
&+ \set{ \unfold_{S}(\langle \ell, \varphi \rangle, t)}{ \langle \ell,
\varphi\rangle \in E(G) \text{, and } t \in \TPout(\ell) \cap T}\\
&+ \set{
(\langle\ell,\varphi\rangle,p,\tau,\eta,\langle\ell',\texttt{true}\rangle)}{
\langle \ell, \varphi\rangle \in E(G) \text{, and } t \in
\TPout(\ell) \backslash T}
\end{align*}
\end{definition}
Clearly, a global abstraction function is a special case of a dynamic
abstraction function, where every location is abstracted with the same
abstraction function. How to select properties for the localized property-based
abstraction is discussed in \Cref{ch:implementation}.
The partial evaluation is bootstrapped with the lift of the program with the
component removed.
\begin{equation*}
G_0 = (G_\Prog - T)\lift
\end{equation*}
The component-level evaluation is then repeated on $G_0$ until a fix-point is
found.
\begin{equation*}
G_{S,T}^* = \evaluate_{\Prog,S}^*(G_0) =
\lim_{n\rightarrow\infty}\evaluate_{\Prog,S}^n(G_0)
\end{equation*}
Next, we will adapt the evaluation a final time. Instead of evaluating the
entire program, the evaluation will unfold only transitions in a given component
$T$. The component is not required to be connected. How to select the component
for best results ins discussed in \Cref{ch:implementation}.
\begin{example}\label{ex:component_pe}
In this example the partial evaluation of $\Prog_3$ is restricted to the
component $T = \{t_{1a}, t_{1b}\}$, because the two loops are the only
problematic transitions during analysis with \gls{mprf}s. Also we reduce the
number of properties. The evaluation uses the localized property-based
abstraction $S_{\bot,\Psi_{\ell_1},\bot}$ with $\Psi_1 = {\psi_1, \psi_2,
\psi_3, \psi_4}$ where
\begin{align*}
\psi_1 = 1 \leq x, \hspace{1cm} \neg\psi_1 = 1 > x\\
\psi_2 = 2 \leq x, \hspace{1cm} \neg\psi_2 = 2 > x\\
\psi_3 = x \leq 3, \hspace{1cm} \neg\psi_3 = x > 3\\
\psi_4 = x \leq 4, \hspace{1cm} \neg\psi_4 = x > 4\\
\end{align*}
\todo{continue here}
The program is lifted as shown in Example \ref{ex:lift} and
\fref{fig:ex_lift}, but with the transitions $t_{1a}$ and $t_{1b}$ removed.
The partial evaluation starts on this graph $G_0$ with
\begin{align*}
V(G_0) &= \{\langle\ell_0,\texttt{true}\rangle,\langle\ell_1,
\texttt{true}\rangle,\langle\ell_2,\texttt{true}\rangle\}\\
E(G_0) &= \{t_0,t_2,t_3\}
\end{align*}
We will proceed with proving, that the graph can be used for constructing an new
program $\Prog'$ with equal expected runtime-complexity.
The transitions $t_{1a}$ and $t_{1b}$ are unfolded already in the first
evaluation step. We find to new transitions by unfolding $t_{1a}, t_{1b} \in
\TPout(\ell_1) \cap T$ from $\langle\ell_1,\texttt{true}\rangle$
\begin{align*}
t_{1a,1} &= (\langle \ell_1, \texttt{true}\rangle, 0.5, \tau_1,
\eta_{1a}, \langle \ell_1, \{\psi_1,\psi_3,\psi_4\}\rangle) \\
t_{1b,1} &= (\langle \ell_1, \texttt{true}\rangle, 0.5, \tau_1,
\eta_{1b},\langle \ell_1, \{\psi_1,\psi_2,\psi_4\}\rangle) \\
G_1 &= G_0 + \{t_{1a,1},t_{1b,1}\}
\end{align*}
Note, the transition $t_2$ is not in $T$. Instead of unfolding it, the
transition $t_2' = (\langle\ell_1,\texttt{true}\rangle,1,\tau_2,\eta_2,
\langle\ell_2,\texttt{true}\rangle)$ is added to the graph, which is already
contained as the lift of $t_2$. The same holds for $t_0$ and $t_3$.
In the second step the new versions $\langle \ell_1,
\{\psi_1,\psi_3,\psi_4\}\rangle$ and $\langle \ell_1,
\{\psi_1\psi_2,\psi_4\}\rangle$ are unfolded. We find new transitions
\begin{align*}
t_{1a,2} &= (\langle \ell_1, \{\psi_1,\psi_3,\psi_4\}\rangle, 0.5,
\tau_1 \Land \psi_1 \Land \psi_3 \Land \psi_4, \eta_{1a}, \langle
\ell_1, \{\psi_1,\psi_3,\psi_4\}\rangle) \\
t_{1b,2} &= (\langle \ell_1, \{\psi_1,\psi_3,\psi_4\}\rangle, 0.5,
\tau_1 \Land \psi_1 \Land \psi_3 \Land \psi_4, \eta_{1b}, \langle
\ell_1, \{\psi_1,\psi_2,\psi_4\}\rangle) \\
t_{2,1} &= (\langle \ell_1, \{\psi_1,\psi_3,\psi_4\}\rangle, 1, \tau_2
\Land \psi_1 \Land \psi_3 \Land \psi_4, \eta_2,\langle \ell_2,
\texttt{true}\rangle)\\
t_{1a,3} &= (\langle \ell_1, \{\psi_1,\psi_2,\psi_4\}\rangle, 0.5,
\tau_1 \Land \psi_1 \Land \psi_2 \Land \psi_4, \eta_{1a}, \langle
\ell_1, \{\psi_1,\psi_3,\psi_4\}\rangle) \\
t_{1b,3} &= (\langle \ell_1, \{\psi_1,\psi_2,\psi_4\}\rangle, 0.5,
\tau_1 \Land \psi_1 \Land \psi_2 \Land \psi_4, \eta_{1b}, \langle
\ell_1, \{\psi_1,\psi_2,\psi_4\}\rangle) \\
t_{2,2} &= (\langle \ell_1, \{\psi_1,\psi_2,\psi_4\}\rangle, 1, \tau_2
\Land \psi_1 \Land \psi_2 \Land \psi_4, \eta_2, \langle \ell_2,
\texttt{true}\rangle) \\
G_2 &= G_1+ \{t_{1a,2},t_{1b,2},t_{2,1},t_{1a,3},t_{1b,3},t_{2,2}\}
\end{align*}
Note, the transition $t_{2,1}$ and $t_{2,2}$ are new transitions but they
transition to the trivial version instead beeing unfolded due to $t_2
\not\in\T$.
The evaluation ends here, since no new versions were added in the previous
step. We get $G_3 = G_2 = G^*_{\Prog_3,S_{\bot,\Psi_{\ell_1},\bot}}$.
The full evaluation graph is displayed in \fref{fig:ex_component_pe}.
\begin{figure}[ht]
\centering
\input{figures/ch3_p3_component_pe}
\caption{Component-level partial evaluation graph of $\Prog_3$ from
\fref{fig:ex_prob} on a component $T = \{t_{1a},t_{1b}\}$ and with a
localized property-based abstraction
$S_{\bot,\Psi_{\ell_1},\bot}$.\label{fig:ex_component_pe}}
\end{figure}
\end{example}
\rem{
In practice versions that have no incoming transitions in $G_0$ are removed,
before the evaluation, to avoid unnecessary unfoldings. If the version is
reachable in the partial evaluation nonetheless, it is readded by the
evaluation and evaluated on in later step.
}
The partial evaluation over the entire program is a special case of the
component level evaluation with a component $T = \T_\Prog$. All following lemmas
and theorems hold equally for a (global) abstraction and partial evaluations of
the entire program.
Let $G^*_{\Prog,\alpha}$ be the evaluation graph of a \gls{pip} $\Prog$ for
any abstraction function $\alpha$. For every transition $t = (\langle \ell_1,
\varphi_1\rangle, p, \tau, \eta, \langle \ell_2, \varphi_2\rangle) \in
E(G^*_{\Prog,\alpha})$ one can find a unique transition denoted as
Let $G^*_{\Prog,S}$ be the evaluation graph of a \gls{pip} $\Prog$ for any
localized abstraction function $S$. For every transition $t = (\langle
\ell_1, \varphi_1\rangle, p, \tau, \eta, \langle \ell_2, \varphi_2\rangle)
\in E(G^*_{\Prog,\alpha})$ one can find a unique transition denoted as
with Lemma \ref{lem:original_transition}.
with Lemma \ref{lem:original_transition}. In Example \ref{ex:global_abstr} and
\ref{ex:component_pe} the localized property-based abstraction was carefully
crafted so that the evaluation graph is finite. As was already foreshadowed the
partial evaluation graph is for a localized abstraction function is finite when at least one location
per loop in the original program is abstracted to a finite abstraction space.
\begin{lemma}\label{lem:localized_pba_finite}
Let $S$ be a localized abstraction function. The partial evaluation graph
$G_{\Prog,S}^*$ is finite if every loop $L$ in $\Prog$ contains a location
$\ell \in L$ such that $\set{S(\langle\ell,\varphi\rangle)}{\varphi \in
\Phi}$ is finite.
\begin{proof}
Let $G_{\Prog,S}^*$ be the partial evaluation graph of a \gls{pip}
$\Prog$ with a localized abstraction $S$, and let
$\set{S(\langle\ell,\varphi\rangle)}{\varphi \in \Phi}$ for some $\ell
\in L$ on any loop $L$ of $Prog$.
The proof will be done by contradiction. Assume the partial evaluation
graph $G_{\Prog,S}^*$ is infinite.
We can construct an infinite path $p = v_0v_1\dots = \langle\ell_0
\_\rangle\langle\ell_1,\_\rangle\dots \in \vers_\Prog^\omega$, where all
versions on this path are pairwise different. Let $n \in \N$. Since
$\evaluate_\Prog^*$ doesn't converge there must exist a fresh version
$v_n \in E(G_n) \backslash E(G_{n-1})$ that was added by unfolding or
copying a transition $t_n =(\ell_{n-1},\_, \_, \_,\ell_n)$ from a
version $v_{n-1} = \langle \ell_{n-1},\_\rangle \in E(G_{n-1})$.
$v_{n-1}$ must have been freshly added in $G_{n-1}$ or else $v_n$ would
have been part of $G_{n-1}$ already. By backwards induction we can
create a path $v_0v_1\dots{}v_n$ of $G_n$ for arbitrarily large $n \in N$
with pairwise different versions which implies that there exists an
infinite path $p = v_0v_1\dots = \langle\ell_0
\_\rangle\langle\ell_1,\_\rangle\dots \in \vers_\Prog^\omega$ with
pairwise different versions in $G_{\Prog,S}^*$ as well.
Every version was added by unfolding or copying a transition $t_i
=(\ell_{i-1},\_, \_, \_,\ell_i) \in \T_\Prog$. Consequently, there must
exist an infinite path $p' = \ell_0\ell_1\dots \in \Loc_\Prog^\omega$
and since $\Prog$ is finite the path must contain a loop $L \in \Loc^*$
that is appears an infinite number of times on $p'$.
By assumption there must exists a location $\ell^* \in L$ for which
$\set{S(\langle\ell,\varphi\rangle)}{\varphi \in \Phi}$ is finite. At
the same time versions $\langle\ell^*,\_\rangle$ must appear an infinite
number of times on $p$, and there can be only abstracted to finitely
many possible versions, which is a contradiction to the versions on $p$
being pairwise different.
$G_{\Prog,S}^*$ cannot be infinite.
\end{proof}
\end{lemma}
We will proceed with proving, that the partial evaluation graph of a component
level partial evaluation can be used for constructing an new program $\Prog'$
with equal expected runtime-complexity. All results equally hold for partial
evaluation of entire program and independently of the choice of the (global or
localized) abstraction function as long as the localized evaluation function
asserts a finite partial evaluation graph by Lemma
\ref{lem:localized_pba_finite}.
Let $G^*_{\Prog,\alpha}$ be the evaluation graph of a \gls{pip} $\Prog$ with
general transitions $\GTP$ and the abstraction function $\alpha$. The
transitions of $E(G^*_{\Prog,\alpha})$ can be split into finite pairwise
Let $G^*_{\Prog,S,T}$ be the evaluation graph of a \gls{pip} $\Prog$ with a
localized abstraction function $S$ on a component $T \subseteq \TP$. The
transitions of $E(G^*_{\Prog,S,T})$ can be split into finite pairwise
\item there exist a unique general transition $\bar{g_i} \in \GTP$ and
some $\varphi \in \Psi$ for which the following property holds:
\begin{equation*}
g_i = \set{\unfold(\langle \ell_{g'_i}, \varphi \rangle,
\bar{t})}{\bar{t} \in g'_i}
\end{equation*}
\item there exist a unique general transition $\bar{g}_i \in \GTP$ and
some $\varphi \in \Phi$ for which the following property holds:
\begin{align*}
g_i &= \set{t}{\bar{t} \in \bar{g}_i, t \in E(G^*_{\Prog,S,T})} \\
&= \set{(\langle\ell_{\bar{g}_i},\varphi\rangle,p,\varphi
\land \tau_{g_i}, \eta, \langle\ell',\varphi'\rangle}{
(\ell_g, p, \tau_g, \eta, \varphi') \in g'_i}
\end{align*}
\todo{proof}
Let $t =(\langle \ell, \varphi\rangle, p, \tau, \eta, \_) \in
E(G^*_{\Prog,S,T})$ be a transitions in the partial evaluation graph
$G^*_{\Prog,S,T}$ of a \gls{pip} $\Prog$ with a component $T \subset
\TP$. There exists a unique general
transition $g \in \GTP$ with $\bar{t} \in g$. Let $ g = \{ t'_1, \dots,
t'_n \}$. We set $\bar{g} = g$. All $t'_i$ share a common guard $\tau_g$
and source location $\ell_g = \ell$.
Let $1 \leq i \leq n$ an $t'_i = (\ell, p, \tau_g, \_, \_)$. By
construction of $\evaluate_\Prog,S,T$ there was added either a
transition $t_i = \unfold(\langle\ell,\varphi\rangle, t_i) =
(\langle\ell,\varphi\rangle, p, \varphi \land \tau_g, \_, \_)$ if $t'_i
\in T$ or $t_i = (\langle\ell,\varphi\rangle, p, \varphi \land \tau_g,
\_, \_)$ if $t'_i \not\in T$. In any case, all $t'_i$ share a common
guard $\tau_g' = \varphi \land \tau_g$ and the probabilities add up to
1.
Let $\Prog$ as defined by Definition \ref{def:pip} and $G^*_{\Prog,\alpha}$
be the partial evaluation graph of $\Prog$ for some abstraction function
$\alpha$. A new \gls{pip} is defined as $\Prog' = (\PV, \Loc_{\Prog'},
\GTPn, \langle l_0, \textit{true}\rangle)$ where $\Loc_{\Prog'} =
E(G^*_{\Prog,\alpha}) \subset \vers_\Prog$, and $\GTPn =
\mathcal{G}\mathcal{T}_{G^*_{\Prog,\alpha}} \subset 2^\VTP$. $\Prog'$ is
Let $\Prog$ as defined by Definition \ref{def:pip} and $G^*_{\Prog,S,T}$ be
the partial evaluation graph of $\Prog$ for some localized abstraction
function $S$ and a component $T \subseteq \TP$. A new \gls{pip} is defined
as $\Prog' = (\PV, \Loc_{\Prog'}, \GTPn, \langle l_0, \textit{true}\rangle)$
where $\Loc_{\Prog'} = E(G^*_{\Prog,S,T}) \subset \vers_\Prog$, and $\GTPn =
\mathcal{G}\mathcal{T}_{G^*_{\Prog,S,T}} \subset 2^\VTP$. $\Prog'$ is
Theorem \ref{thm:correctness} which will shows that runtime-complexity bounds
for the partial evaluation are runtime-complexity bounds of the original
program, which demonstrates the correctness of the construction.
Lemma \ref{lem:constrfgeq} and Theorem \ref{thm:thightness} will show that
worst-case expected runtime-complexity doesn't get worse with partial
evaluation.
Theorem \ref{thm:correctness} which will shows that any runtime-complexity
bounds for the partial evaluation are runtime-complexity bounds of the original
program, which demonstrates the correctness of the construction. Lemma
\ref{lem:constrfgeq} and Theorem \ref{thm:thightness} will show that worst-case
expected runtime-complexity doesn't get worse with partial evaluation.
\section{History Dependent Abstractions}
In \Sref{sec:partialevaluation} the abstraction the details of the abstraction
were omitted. Recall Definition \ref{def:abstraction}, an abstraction space must
be finite and the abstraction function must map any probability to elements of
this abstraction space. The abstraction function was kept simple, in order to
keep the proofs consise. In this section we will expand to take the evaluation
history into account, which opens the possibility for more refined abstraction
functions.
\begin{definition}[History dependent abstraction]
A history dependent abstraction function is a function $\G \rightarrow$
\end{definition}
In this section we will present the property-based abstraction
proposed by \citeauthor{Domenech19}\cite{Domenech19} for non-probabilistic
programs but which can be applied equally to probabilistic programs.
\todo{continue here}
one symbol is replaced by another is denoted with $[\cdot/\cdot]$.
For example $(x<y)[x/1] = (1 < y)$. For a finite set $A =
\{x_1,\dots,x_n\}$, the $n$ consecutive substitutions $[x_i/y(i)]$
where the replacement expression $y$ varies only in some index $i$ is denoted
using a shortened notation: $[x_i/y_i]_{x_i \in A} =
[x_1/y(1)] \dots [x_n/y(n)]$. For example
$[x_i/i]_{x_i\in A} = [x_0/0]\dots[x_n/{}n]$, is
the substution where every variable is replaced by its index in $A = \{x_0,
\dots, x_n\}$. The restriction of a function $f: A \rightarrow B$ to a smaller
domain $A' \subseteq A$ is denoted by $\varf|_{A'} : A' \rightarrow B$.
one symbol is replaced by another is denoted with $[\cdot/\cdot]$. For example
$(x<y)[x/1] = (1 < y)$. For a finite set $A = \{x_1,\dots,x_n\}$, the $n$
consecutive substitutions $[x_i/y(i)]$ where the replacement expression $y_i$
varies only in some index $i$ is denoted using a shortened notation:
$[x_i/y_i]_{x_i \in A} = [x_1/y_i] \dots [x_n/y(n)]$. For example,
$[x_i/i]_{x_i\in A} = (\dots([x_0/0])\dots)[x_n/{}n]$ is the substitution where
every variable is replaced by its index in $A = \{x_0, \dots, x_n\}$. The
restriction of a function $f: A \rightarrow B$ to a smaller domain $A' \subseteq
A$ is denoted by $\varf|_{A'} : A' \rightarrow B$.
This section will define constraints that are special cases of formulas of
\gls{fo} logic. In the following $\V$ denotes a finite set of variables $\V$.
This section will define constraints as a special cases of first-order formulas.
In the following, $\V$ denotes a finite set of variables $\V$.
foundations on FO-logic we'll refer to the literature \cite{barwise1977}.
Constraints are a subset of formulas in \gls{fo} logic, thus, there semantics
foundations on \gls{fo}-logic we refer the reader to \cite{barwise1977}.
Constraints are a subset of formulas in \gls{fo} logic; thus, their semantics
\gls{fo} logic.\cite{barwise1977} In this thesis we will only consider integer
arithmetic. Let $\Zs := (\Z, +, \cdot, 0, 1, \leq, <, =)$ be the structure of
\gls{fo} logic.\cite{barwise1977} In this thesis, we will only consider integer
arithmetic. Let $\Zs := (\Z, +, \cdot, 0, 1, \leq)$ be the structure of
\item $1 < 0$ entails any constraints, because $\llbracket 1 < 0
\rrbracket = \emptyset$ and the empty set is subset of any other
\item $1 < 0$ entails all constraints, because $\llbracket 1 < 0
\rrbracket = \emptyset$ and the empty set is a subset of any other
The problem of determining if a set of constraints is satisfiable or finding
valid variable assignments for a given formula is part of \gls{smt} and is a
well studied field in computer science.\cite{walsh2009smt,abraham17smt} The
theories relevant for this thesis are the quantifier-free linear integer
arithmetic (usually called \texttt{QF\_LIA}) and the quantifier-free non-linear
integer arithmetic (\texttt{QF\_NIA}).
The problem of determining whether a set of constraints is satisfiable or
finding valid variable assignments for a given formula is part of \gls{smt} and
is a well studied field in computer science \cite{walsh2009smt,abraham17smt}.
The theory relevant for this thesis is the quantifier-free linear integer
arithmetic (usually called \texttt{QF\_LIA}).
Sometimes one is not interested in the result to all variables but only to a
subset of variables. It will later become relevant when handling temporary
variables that are only relevant to the scope of a transition. In \gls{fo} logic
one would just existentially quantify the variable. However, the new \gls{fo}
formula wouldn't be quantifier-free anymore. Instead a new formula is searched
that has just the same solutions when ignoring the values for the irrelevant
variables.
Sometimes one is not interested in the result for all the variables but only
those for a subset of variables. This will later become relevant when handling
temporary variables that are only relevant to the scope of a transition. In
\gls{fo} logic one would just existentially quantify the variable in the
formula. However, the new \gls{fo} formula would not longer be quantifier-free.
Instead, a new formula is sought that has just the same solutions when ignoring
the values for the irrelevant variables and that uses only the reduced variable
set.
An update assigns a value to a variable after the update using the values of
variables before the update. Constraints and assignments have no notion of time,
whereas for updates there exists a \textit{before} and an \textit{after}.
An update assigns a value to a variable after the update, by taking into account
all the \textit{former} values of the variables. Constraints and assignments
have no notion of time, whereas for updates there exists a \textit{before} and
an \textit{after}.
Let $\Sigma = \{s : \V \rightarrow \Z\}$ be the set of full integer
assignments, that are assignments which assign a value to every variable
$v\in\V$. In contrast, partial assignments over a variable set $A \subseteq \V$
assign values only to a subset of variables, set set of partial assignments over
the variables $A\subseteq\V$ is denoted by $\Sigma_A = \{s: A \rightarrow \Z\}$.
Let $\Sigma = \{s : \V \rightarrow \Z\}$ be the set of full integer assignments,
which are assignments that assign a value to every variable $v\in\V$. In
contrast, partial assignments over a variable set $A \subseteq \V$ assign values
only to a subset of variables, and the set of partial assignments over the
variables $A\subseteq\V$ is denoted by $\Sigma_A = \{s: A \rightarrow \Z\}$.
For an integer polynomial $f\in\Z[A]$ over a variable set $A \subseteq \V$ and a
assignment $s \in \Sigma_A$ we write $\varf(s)$ for the value that is found by
evaluating the polynomial $\varf[x_i/s(x_i)]_{x\in A}$.
For an integer polynomial $f\in\Z[A]$ over a variable set $A \subseteq \V$ and
an assignment $s \in \Sigma_A$ we write $\varf(s)$ for the value that is found
when evaluating the polynomial $\varf[x_i/s(x_i)]_{x\in A}$.
For an update described by the update function $u : A \rightarrow \Z[\V]$ and a
assignment $s \in \Sigma$, we define a function $\rho_u: \Sigma \rightarrow
\Sigma_A$. It computes the assignment after an update $u$ given an assignment
For an update described by the update function $u : A \rightarrow \Z[\V]$ and an
assignment $s \in \Sigma$ we define a function $\rho_u: \Sigma \rightarrow
\Sigma_A$. It computes the assignment after an update $u$ is given an assignment
with $A' \cap (A \union B) = \emptyset$. We define $u(\varphi) := ((\varphi
\Land \LAnd_{x\in A} x' = u(x))|_{A'})[x'/x]_{x\in A}$. For any partial
with $A' \cap (A \union B) = \emptyset$. We write under misuse of notation
\begin{equation*}
u(\varphi) := ((\varphi \Land \LAnd_{x\in A} x' =
u(x))|_{A'})[x'/x]_{x\in A}.
\end{equation*}
\end{definition}
\begin{lemma}\label{lem:nonprop_update}
Let $A, B \subseteq \V$. Given an assignment $s\in\Sigma$, a formula
$\varphi \in \Phi$ and an update $u: A \rightarrow \Z[B]$. For any partial
Another way of reasoning about solutions is to see them as points in an
$m$-dimensional space for variables $X \subseteq \V$ and $|X| = m$. Every
Another way of reasoning about satisfying assignments is to see them as points
in an $m$-dimensional space for variables $A \subseteq \V$ and $|A| = m$. Every
used abstract domains are Polyhedra\cite{bagnara2005convexpolyhedron}
Octagons\cite{mine2001wcre, mine2007arxiv, mine2006hosc,truchet2010synacs}, and
the Interval domain (also called Boxes)\cite{gurfinkel2010}.
used abstract domains are Polyhedra \cite{bagnara2005convexpolyhedron}
Octagons \cite{mine2001wcre, mine2007arxiv, mine2006hosc,truchet2010synacs}, and
the Boxes \cite{gurfinkel2010}.
affine half-space in the euclidean space $\R^n$. As discussed in
\Sref{sec:constraints} strictness of linear constraints and in turn openness of
half-spaces are not relevant to the integer domain.
affine half-space in the Euclidean space $\R^n$. As discussed in
\Sref{sec:constraints}, the strictness of linear constraints and in turn the
openness of half-spaces are not relevant to the integer domain.
For completeness' sake, let's introduce a third abstract domain of Boxes. A
closed interval $[a,b]$ with $a, b \in \R$ for a variable $v \in \V$ can be
For the sake of completeness, let us introduce a third abstract domain.
A closed interval $[a,b]$ with $a, b \in \R$ for a variable $v \in \V$ can be
concepts needed to understand this thesis will be introduced, following the
definitions from \citeauthor{billingsley2011}\cite{billingsley2011}. For
additional information we'll refer to the
literature.\cite{kraaikamp2005modern, billingsley2011}
concepts needed to understand this thesis will be introduced here, following the
definitions from \citeauthor{billingsley2011} \cite{billingsley2011}. For
additional information we refer the reader to the literature
\cite{kraaikamp2005modern, billingsley2011}.
Imagine a probabilistic experiment with some possible outcomes $\Omega$. For
example, a coin-toss can have arbitrary many outcomes when taking the position on
the table into account, the fact that the coin might get lost in the process
Imagine a probabilistic experiment with several possible outcomes $\Omega$. For
example, a coin toss can have many arbitrary outcomes when taking the position
on the table into account, the fact that the coin might get lost in the process,
outcomes. For example, the coin-toss on the table has a sub-set of outcomes
where the coin's head is above. The probability measure would assign a
probability close to 50\% for a somewhat normal coin-toss.
outcomes. For example, the coin toss on the table has a subset of outcomes
where the coin's head is uppermost. The probability measure would assign a
probability close to 50\% for a somewhat normal coin toss.
Formally a probability measure is defined on a $\sigma$-algebra which asserts
the intuitive property of a random event: if you can measure two subsets of
outcomes (e.g. $A = \set{ \omega \in \Omega} {\text{ coin shows head}}$ and $B
= \set{\omega \in \Omega} {\text{coin shows tail}}$) so you can for the union of
both subsets ($A \union B = \set{\omega \in \Omega} {\text{coin shows head or
tails}}$) and the probability of the combined outcomes is equal to the sum of
the individual outcomes, if $A$ and $B$ are disjoint.
Formally, a probability measure is defined as a function on a $\sigma$-algebra
that asserts the intuitive property of a random event: if you can measure the
probability two subsets of outcomes (e.g. $A = \set{ \omega \in \Omega} {\text{
coin shows head}}$ and $B = \set{\omega \in \Omega} {\text{coin shows
tail}}$) so you can measure the probability of the union of both subsets ($A
\union B = \set{\omega \in \Omega} {\text{coin shows head or tails}}$) and the
probability of the combined outcomes is equal to the sum of the individual
outcomes, if $A$ and $B$ are disjoint.
\begin{definition}[$\sigma$-Algebra\cite{billingsley2011}]
Let $\Omega$ be an arbitrary set. A set $A \subset 2^\Omega$ is called a
\begin{definition}[$\sigma$-Algebra \cite{billingsley2011}]
Let $\Omega$ be an arbitrary set. A set $A \subseteq 2^\Omega$ is called a
A function $\mu: \F \rightarrow [0,1]$ on a $\sigma$-algebra $\F$ is a probability
measure if it satisfies the following conditions:
A function $\mu: \F \rightarrow [0,1]$ on a $\sigma$-algebra $\F$ is a
probability measure if it satisfies the following conditions:
The distribution of a variable is a way of describing a random variable and the
underlying probability space. In case of discrete random variables the
distribution is sufficient the random variable. The underlying probability space
becomes irrelevant at this point, as the specific events and their probabilities
are not important to the outcome of the random variable, as long as the
probability of the outcome is fully defined.
The distribution of a random variable is a way of describing a random variable
and the underlying probability space. In the case of discrete random variables,
the distribution is sufficient to fully describe the random variable. The
underlying probability space becomes irrelevant at this point, as the specific
events and their probabilities are not important to the outcome of the random
variable, as long as the probability of the outcome is fully defined.
\begin{definition}[Distribution of a random variable\cite{billingsley2011}]\label{def:distribution}
Let $X$ be a discrete random variable. The probability of a single value is
well defined by the distribution $\mu:F \rightarrow [0,1]$ as follows:
\begin{align}
P(X=r) = \mu(\{r\}) = P(\set{\omega \in \Omega}{X(\omega) = r})
\end{align}
\begin{definition}[Distribution of a random variable \cite{billingsley2011}]\label{def:distribution}
Let $X$ be a discrete random variable in a probability space $(\Omega, \F,
P)$. The probability of observing a specific value is defined by the
distribution $\mu: \F \rightarrow [0,1]$ as follows:
\begin{align*}
\mu(r) = P(X=r) = P(\set{\omega \in \Omega}{X(\omega) = r})
\end{align*}
\begin{definition}[Support\cite{billingsley2011}]
For a random variable with distribution $\mu: $, the support is the smallest set
$S$ for which $\mu(S) = 1$. Discrete random variables have a countable
\begin{definition}[Support \cite{billingsley2011}]
For a random variable with distribution $\mu$, the support is the smallest
set $S$ for which $\mu(S) = 1$. Discrete random variables have a countable
When considering a random variable one is usually interested the value to expect
in general. For example most possible outcomes of a binomial distribution are
very improbable. This is described by the expected value.
When considering a random variable, one is usually interested in the value to
expect in general. For example, most possible outcomes of a binomial
distribution are very improbable. This is described by the expected value.
They will be briefly introduced in this section, following the definitions
from \cite{kraaikamp2005modern}. Examples for the various probability
distributions are displayed in \fref{fig:distributions}.
They will be briefly introduced in this section, following the definitions from
\cite{kraaikamp2005modern}. Examples of the various probability distributions
are displayed in \fref{fig:distributions}.
$0$, for example decided by a coin-flip.
\begin{definition}[Bernoulli Distribution]\label{def:bernoulli}
$0$, for example decided by a coin toss.
\begin{definition}[Bernoulli distribution]\label{def:bernoulli}
The value of a random variable can approximated by a constraint for the bounds
of the support of the probability distribution. For any probability distribution
$D \in \D$ and a variable $d \in \V$, we define
The value of a random variable can be approximated by a constraint for the
bounds of the support of the probability distribution. For any probability
distribution $D \in \D$ and a variable $d \in \V$, we define
where every configuration contains the location at which the program is and a
state that holds the current values of the program variables. A transition
contains a condition $\tau$, called \textit{guard}, that must be true for the
current state to be taken. Whenever a transition is taken it modifies the
state with some \textit{update} $\eta$. An update assigns a new value to each
program variable depending on the previous state. Every integer program starts
at a unique location usually called $\ell_0$. Whenever no transition guard is
satisfied the program terminates. The arguments to the program is given by some
starting state $s_0$, assigning a value for every program variable.
where every configuration contains the location which represents the program
counter, and a state that holds the current values of the programs
variables. A transition contains a condition $\tau$, called \textit{guard}, that
must be true for the current state to be taken. Whenever a transition is taken
it modifies the state with some \textit{update} $\eta$. An update assigns a new
value to each program variable depending on the previous state. Every integer
program starts at a unique location usually called $\ell_0$. Whenever no
transition guard is satisfied the program terminates. The arguments to the
program are given by some starting state $s_0$, assigning a value for every
program variable.
red). For simplicity, all other transitions are unique members of their own
general transition and the trivial probability of 1 is omitted.
At location $\ell_1$ with a variable assignment $s(x) = 3$ three
transitions $t_2$, $t_3$ and $t_4$ are possible. The non-determinism is
resolved first. Then if the general transition containing $t_2$ and $t_3$ is
selected, a coin is thrown to decide whether to take $t_2$ or $t_3$.
red). For simplicity, all the other transitions are unique members of their
own general transition and the trivial probability of 1 is omitted. At
location $\ell_1$ with a variable assignment $s(x) = 3$, three transitions
$t_2$, $t_3$ and $t_4$ are possible. The non-determinism is resolved first.
Then if the general transition containing $t_2$ and $t_3$ is selected, a
coin is thrown to decide whether to take $t_2$ or $t_3$.
from all the integers, but can be constraint by the guard, in which case the
transition is only taken if the non-deterministic sampling fulfills the guard.
If the sampled value fulfills no transition's guard it means the program
terminates. Non-deterministic sampling is resolved before the non-deterministic
branching.
from all the integers, but can be constrained by the guard, in which case the
transition is only taken if the non-deterministic sampling fulfils the guard. If
the sampled value fulfils no transition's guard, the program terminates.
Non-deterministic sampling is resolved before non-deterministic branching.
\textsc{Bern}(0.5)$. The value of the Bernoulli distribution can take the
value of 1 or 2 with a probability of 0.5 in both cases. In case that the
distribution is sampled to 1, the variable x is decremented, and stays the
same otherwise.
\textsc{Bern}(0.5)$. The Bernoulli distribution can take the value of 1 or 2
with a probability of 0.5 in both cases. In the case that the distribution is
sampled to 1, the variable x is decremented, and stays the same otherwise.
of locations, $\T = \Loc \times [0,1] \times (\PV \rightarrow \Z[\V\union\D])
\times \R \times \Loc$ be the set of transitions over locations $\Loc$. Let $\GT
= 2^{\T}$ be set set of general transitions over $\Loc$.
of locations, and $\T = \Loc \times [0,1] \times (\PV \rightarrow
\Z[\V\union\D]) \times \R \times \Loc$ be the set of transitions over locations
$\Loc$. Let $\GT = 2^{\T}$ be a set of general transitions over $\Loc$.
transitions guard is satisfied. Since no normal transition leaves the location
$l_\bot$ the run loops indefinitely on the location $\ell_\bot$. In addition a
transition's guard is satisfied. Since no normal transition leaves the location
$l_\bot$ the run loops indefinitely on the location $\ell_\bot$. In addition, a
remembered as well. This won't be relevant for the partial evaluation but is
important to define the probabilistic properties of a \gls{pip}.
remembered as well. This will not be relevant for the partial evaluation but it
is important to define the probabilistic properties of a \gls{pip}.
accumulated rewards over a whole process. In literature\cite{puterman1994markov,
puterman1990markov}, the properties of a given process with regard to the
highest achievable rewards is researched extensively.
accumulated rewards over a whole process. In the
literature \cite{puterman1994markov, puterman1990markov}, the properties of a
given process with regard to the highest achievable rewards have been
extensively researched.
complexity. If one defines runtime as reward, one can picture the policy for a
Markov decision process as an adversary. We call the policy \enquote{scheduler}.
complexity. If one defines runtime as a reward, one can picture the policy for a
Markov decision process as an adversary. We call the policy the
\enquote{scheduler}.
We differentiate between two types of schedulers: First the scheduler that at a
given state always takes the same decision -- it has no memory of it's past
We differentiate between two types of schedulers: the first is a scheduler that
at a given state always takes the same decision -- it has no memory of its past
\{g_\bot\})$ is a history dependent scheduler if for every finite prefix
$f\in\fpath_\Prog$ and configuration $c=(\ell,t,s) \in \confs_\Prog$, $\scheduler(fc)
= (g, s')$ implies items \ref{itm:md1} to \ref{itm:md4} of Definition
\ref{def:mscheduler} for the last configuration $c$.
\{g_\bot\})$ is a history dependent scheduler if every finite prefix
$f\in\fpath_\Prog$ and configuration $c=(\ell,t,s) \in \confs_\Prog$,
$\scheduler(fc) = (g, s')$ implies items \ref{itm:md1} to \ref{itm:md4} of
Definition \ref{def:mscheduler} for the last configuration $c$.
other configuration are invalid starting configurations and get assigned a zero
probability by the probability measure $\prSs : \confs_\Prog \rightarrow [0,1]$
other configuration are invalid starting configurations and they are assigned a
zero probability by the probability measure $\prSs : \confs_\Prog \rightarrow
[0,1]$
Lets say the program ran up to a configuration $c=(\ell,t,s)$. The scheduler
resolves the non-deterministic sampling and branching to $\scheduler(c) = (g,
\tilde{s})$. The probability for the event $c \rightarrow c'$ that $c'$ follows
the configuration $c$ with $c' = (\ell', t', s')$ and $t' = (\ell, p, \tau,
\eta, \_, \ell') \in g$ depends on three things: First the transition $t \in g$
is taken with the probability $p$, second the probability that each program
variable $x\in\PV$ is updated to the value $s'(x)$ by the probabilistic update
-- recall Definition \ref{def:prob_update}, third that the temporary variables
are sampled by the scheduler equal to the target state. The result is the
probability measure $\prSs: \confs_\Prog^2 \rightarrow [0,1]$.
Let us say that the program ran up to a configuration $c=(\ell,t,s)$. The
scheduler resolves the non-deterministic sampling and branching to
$\scheduler(c) = (g, \tilde{s})$. The probability for the event $c \rightarrow
c'$ that $c'$ follows the configuration $c$ with $c' = (\ell', t', s')$ and $t'
= (\ell, p, \tau, \eta, \_, \ell') \in g$ depends on three things: First the
transition $t \in g$ is taken with the probability $p$; second the probability
that each program variable $x\in\PV$ is updated to the value $s'(x)$ by the
probabilistic update -- recall Definition \ref{def:prob_update}; third that the
temporary variables are sampled by the scheduler equal to the target state. The
result is the probability measure $\prSs: \confs_\Prog^2 \rightarrow [0,1]$.
By a cylindrical construction one gets the final probability measure for the
infinite runs $\PrSs : \runs_\Prog \rightarrow [0,1]$ and the probability space
$(\runs_\Prog, \F, \PrSs)$.
By a cylindrical construction one arrives at the final probability measure for
the infinite runs $\PrSs : \runs_\Prog \rightarrow [0,1]$ and the probability
space $(\runs_\Prog, \F, \PrSs)$.
That means, that it doesn't matter if we use Markovian or history dependent
scheduler when considering the worst-case expected runtime complexity.
That means, that it doesn't matter whether we use Markovian or history
dependent deterministic schedulers when considering the worst-case expected
runtime complexity.
We define $\nu^\scheduler$ to be the expected runtime for any scheduler
$\scheduler$ -- Markovian or history dependent -- on a program $\Prog$
with an initial state $s_0\in\Sigma$.
We define $\nu^\scheduler$ to be the expected runtime for any
deterministic scheduler $\scheduler$ -- Markovian or history dependent
-- on a program $\Prog$ with an initial state $s_0\in\Sigma$.
infinitely many prefixes, since there can be infinitely many
assignments. We approach the set $\fpath_N$ with a series of sets $F_1,
F_2, \dots \subseteq\fpath$ and
infinitely many prefixes, since there can be infinitely many assignments
caused by probabilistic sampling from finite distributions. We approach
the set $\fpath_N$ with a series of finite sets $F_1, F_2, \dots
\subseteq\fpath$ such that
in $\HDS_{F_i}$ exists. Let $\nu_i$ be the reward of this optimal
scheduler.
$\scheduler_\text{opt} \in \MDS_{F_i}$ exists that maximises the
expected reward when limited to the finite set of configurations. Let
$\nu_i$ be the reward of this optimal scheduler.
With $i$ approaching infinity the optimal scheduler must still always be
as good or better than the history dependent scheduler $\scheduler$. We
get the following:
In the limit when $i$ approaches infinity, the optimal scheduler must
yield an expected no less than the history dependent scheduler
$\scheduler$. We get the following inequality.
The question if a program terminates is a famous problem at the heart of
computer science known to be undecidable. The so-called Halting Problem is
strongly related to the search for upper and lower runtime complexity bounds,
which is equally undecidable. Nevertheless, the answers to those questions are
very important in practice. For example a compiler might want to warn the
programmer about faulty code sections that were not marked explicitly to run
indefinitely, or the developer could be forced to prove the efficiency of his
program in critical scenarios. In an ever more complex world, with ever more
complex algorithms, the need for automatic tools arised. Even though the
question for runtime-complexity can not be answered in a general case, many
tools were developed to automatically analyze the complexity of various
programming paradigms as tight and fast as possible. \cite{giesl2017aprove,
The question of whether a program terminates is a famous problem at the heart of
computer science and it is known to be undecidable. The so-called Halting
Problem is strongly related to the search for upper and lower runtime complexity
bounds, and it is equally undecidable. Nevertheless, the answers to these
questions are very important in practice. For example, a compiler might want to
warn the programmer about faulty code sections that were not marked explicitly
to run indefinitely, or the developer could be forced to prove the efficiency of
his program in critical scenarios. In an ever more complex world, with ever more
complex algorithms, the need for automatic tools arise. Even though the
question of runtime complexity cannot be answered in a general case, many
tools have been developed to automatically analyse the complexity of various
programming paradigms as tightly and quickly as possible. \cite{giesl2017aprove,
Those tools are improved on constantly and they are regularly compared to each
other in the \gls{termcomp}\cite{termcomp2015,termcomp2019}, the goal being to
find as many, as tight bounds for a set of test programs as possible.
% The tests are chosen to represent a large variety of commonly encountered
% real-world programs, but also include known-to-be hard-to-solve problems in
% order to challenge the competitors and improve their tools. Their collection of
% test programs can be found in the \gls{tpdb} which is publicly
% available\footnote{\url{https://github.com/TermCOMP/TPDB}}. During the latests
% competition in 2022, eleven candidates lined up in twenty-eight
% categories\cite{termcomp2022url}.
Those tools are being constantly improved on and they are regularly compared to each
other in the \gls{termcomp} \cite{termcomp2015,termcomp2019}, the goal being to
find as many, and as tight bounds as possible for a set of test programs.
Consider the program $\Prog_1$ displayed in \fref{fig:classic_c}. It contains
two program variables $x$, and $y$, and one temporary variable $u$, which is
used to non-deterministically sample $x$ in the first transition $t_0$. The
program clearly terminates. The first loop increments $x$ up to 3 if $x$ is
positive to begin with. The second loop decrements $y$ while $y$ is positive.
However, classical analysis using only \gls{mprf} fails to find a finite
runtime-complexity bound for this program \cite{giesl2022lncs}. With the help of
\gls{cfr} on sub-\gls{scc} level, the program (see \fref{fig:classic_pe_c}) can
be transformed into an equivalent program where the analysis with \gls{mprf}
succeeds at finding a finite runtime-complexity bound.
Consider the program $\Prog_1$ displayed in \fref{fig:classic_c}. It contains
two program variables $x, y$ and one temporary variables $u$ which is used to
non-deterministically sample $x$ in the first transition $t_0$. The program
clearly terminates. The first loop increments $x$ up to 3 if $x$ is positive to
begin with. The second loop decrements $y$ while $y$ is positive. However, the
classical analysis using only \gls{mprf} fails to find a finite
runtime-complexity bounds for this program\cite{giesl2022lncs}. With the help
of \gls{cfr} on sub-\gls{scc} level, the program (see \fref{fig:classic_pe_c})
can be transformed into an equivalent program where the analysis with
\gls{mprf} succeeds at finding a finite runtime-complexity bound.
\citeauthor{Domenech19}\cite{Domenech19} and is called \gls{cfr}
via partial evaluation and implemented in their analysis tool
iRankfinder\cite{irankfinder2018wst}. It achieves very similar results to
\gls{mprf} when used on the entire programs or whole \gls{scc}. Its real
\citeauthor{Domenech19} \cite{Domenech19} and it is called \gls{cfr}
via partial evaluation. It was implemented in their analysis tool
iRankfinder \cite{irankfinder2018wst}. It achieves very similar results to
\gls{mprf} when used on entire programs or whole \glspl{scc}. Its real
specifically the loops where \gls{mprf} fail to find size bounds. This technique
was presented by \citeauthor{giesl2022lncs}\cite{giesl2022lncs}
and implemented in \gls{koat}\cite{giesl2014ijcar} the complexity analysis tool
specifically the loops where \glspl{mprf} fail to find size bounds. This technique
was presented by \citeauthor{giesl2022lncs} \cite{giesl2022lncs}
and implemented in \gls{koat} \cite{giesl2014ijcar} the complexity analysis tool
terminate, since a longer runtime gets increasingly unlikely. The current
version of \gls{koat} fails to find finite runtime and expected complexity
bounds for this program. It would be useful to transform $\Prog_2$ into an
equivalent program like the one shown in \fref{fig:prob_pe_c} similarly to the
\gls{cfr} technique presented by \citeauthor{giesl2022lncs}\cite{giesl2022lncs}
and \citeauthor{Domenech19}\cite{Domenech19}.
terminate, since a longer runtime becomes increasingly unlikely. The current
version of \gls{koat} fails to find a finite runtime and expected complexity
bound for this program. It would be useful to transform $\Prog_2$ into an
equivalent program like the one shown in \fref{fig:prob_pe_c}, similar to the
\gls{cfr} technique presented by \citeauthor{giesl2022lncs} \cite{giesl2022lncs}
and \citeauthor{Domenech19} \cite{Domenech19}.
\subsection{\acrshort{aprove}, \acrshort{koat}, and \acrshort{loat}} The tools
LoAT\cite{frohn2022ijcar} and KoAT\cite{brockschmidt2016acm} are developed at
RWTH University and aim at finding lower and respectively upper
runtime-complexity bounds on transition systems. At its core, KoAT computes
bounds for subprograms using \gls{mprf} or \gls{twn}, and then putting the
subprograms together in order to generate upper runtime-complexity and size
bounds for the whole program. Optionally to improve the analysis, \gls{koat} can
resort to partial evaluation as a control-flow refinement techniques on a
sub-\gls{scc} level\cite{giesl2022lncs}.
\gls{apron} is the overall tool, taking part in competitions. It works on
various input formats and transforms them accordingly to be used with
specialized tools. \gls{koat} and \gls{loat} under the hood of \gls{apron} for
the overall analysis.
\subsection{\acrshort{aprove}, \acrshort{koat}, and \acrshort{loat}}
The tools \gls{loat} \cite{frohn2022ijcar} and \gls{koat}
\cite{brockschmidt2016acm} were developed at RWTH University and they aim to
find lower and upper (respectively) runtime-complexity bounds on transition
systems. At its core, \gls{koat} computes bounds for subprograms using
\gls{mprf} or a \gls{twn}, and then puts the subprograms together in order to
generate upper runtime-complexity and size bounds for the whole program.
Optionally to improve the analysis, \gls{koat} can resort to partial evaluation
as a control-flow refinement technique on a sub-\gls{scc} level
\cite{giesl2022lncs}. \gls{aprove} is the overall tool, and it has taken part in
competitions. It works on various input formats and transforms them accordingly
so that they can be used with specialised tools such as \gls{koat} and
\gls{loat}.
\cite{gulwani2009}. A typical example of such a program can be seen
in example \ref{ex:splitterloop}, where a single loop is replaced by two
independent consecutive loops. iRankFinder implemented the control-flow
refinement technique by partial evaluation proposed by
\cite{gallagher2019eptcs}.
\cite{gulwani2009}. iRankFinder implemented the control-flow refinement
technique by partial evaluation proposed by \cite{gallagher2019eptcs}.
\gls{koat} borrowed the implementation of iRankFinder as a black-box which came
with a number of limitations\cite{giesl2022lncs}. Besides code-quality and
performance issues, the external library wouldn't be able to profit from future
\gls{koat} borrowed the implementation of iRankFinder as a black box, which came
with a number of limitations.\cite{giesl2022lncs} Besides code-quality and
performance issues, the external library would not be able to profit from future
\acrshort{koat} is an automatic complexity analysis tool for integer programs, which
was recently extended to the analysis of probabilistic integer programs.
Currently, \acrshort{koat} applies so-called control-flow-refinement via
partial evaluation on classic programs by calling the tool iRankFinder
externally.
In this thesis, we extend partial evaluation to probabilistic programs. A
partially evaluated probabilistic integer program has the same expected
runtime-complexity as the original program. However, the transformed program
might be easier to analyse.
The new algorithm is implemented natively in \acrshort{koat} and is
integrated into the existing analysis algorithm modular and on-demand. Our
evaluation shows that the native implementation is faster on classical
programs. Furthermore, the new implementation infers tighter
runtime complexity bounds for some probabilistic programs as well.
\acrshort{koat} is an automatic complexity analysis tool for integer
programs, which was recently extended to the analysis of probabilistic
integer programs. Currently, \acrshort{koat} applies so-called control-flow
refinement via partial evaluation to classic programs by calling the tool
iRankFinder externally. In this thesis, we extend partial evaluation to
probabilistic programs. A partially evaluated probabilistic integer program
has the same expected runtime complexity as the original program. However,
the transformed program might be easier to analyse. The new algorithm is
implemented natively in \acrshort{koat} and it is integrated into the
existing analysis algorithm modular and on demand. Our evaluation shows that
the native implementation is faster on classical programs. Furthermore, the
new implementation also infers tighter runtime complexity bounds for some
probabilistic programs.
\newcommand{\lift}{\mathord{\uparrow}}