7JUQQYQCE7G3ESFJETNGEPBCYGR5NGMAWHFPWI24RSQJPFM5YCTQC
YXLEIBX6XXIGQETRE23A6PBYT4Z5UWWIR55LFSLCHWC6D2FM4SPQC
YS5O2UANXCWRI3W7KMRKKK7OT7R6V3QDL4JS4DRLXM2EZUIPFALAC
567NUQ633R5ESTE2LWXVUGM7XEZGJ65IRKPOCDZJS3JE5RAMR5KAC
KUEZAMEH2M3K6EDHWMYXQMHZ2QTTH4KBM5CO7A7EB64ABEWRE6WQC
MQJMCLGFSBSFJ3JPCIHQSBZS4XZPGJI7NGGRASX5UVGJO6HES25AC
EWCCR4CPDE536DBLSJHXPJI67C4EQOWRJZR7D57P5XBV4ONPKXMQC
RVA3DOCNBWFBWA2MLC2TVQOEY25AUCYGEJSWMD72QRQ26AFLVZUAC
X7SPUV4CXRX7D5KKL6UVGHC7JWRWCA4QAJVVQISE64UEU7GXJNRQC
UVN5HOW3H2OVESJ62UFNQNS6VXHI5TRV22HQ44EOYU42453SDD2QC
TKJMPTB73W5TLLTNUOC4FGRQ7DHE5GRBTL7C5QBXRCNQPMJKGEQAC
QIVGMR6INKBVT5IFCO2UJJQEO4PVW3ZM7IDTCTCOKOX7VKG7UPFAC
DDNESFMD4LGNTDGTCW27FQPCOR5Y5MJWN2TWCSIRNBK6QGBITBRQC
JCK77YLTPL6VV5ZOTMGUJSFZ5DNMS3MZGP5UQLXYW6WSMEK6GAQAC
KVSF5AFJEIMIYAC264OUAA56SGP5MFAUG3AECKOTUPIXPGULVFQQC
QCVDPRXH57UFLU4YV3P2I5RQZGCBUKROYUZHDWOCHL43KDYN7MPAC
R2A2OD7J5ATBRR7W4QPTZDVH6GGHQ7XBX2JE5R3GKVS5WIQ43O3AC
M3WK5HF3XU5MN2JKJMRJVDMC74F4MZAQ23FTGJRW4CGU5ZL72MHAC
45HZPHVHYJ2BXATQ4MKCMPMMPZFBNJECY7JV3WX63BSA4CSQN6HAC
M5WHUJXX7KNK425RXFM2U6WT3M4PARJFO7T7MLN2IBDEYDKPXTMAC
GNUONJQNZG2MNEE55RW3XOQP3UICEBOKLVLKV42H3K64IBGLUGIQC
A5NVU6DZYOG6GRXJU6DZLCUQTKA7LPUWOGQ4NCJSY6FJB6GSP3OAC
B5T3HFKSQF3T4T6Z2KZOA5ZDMI6TVON7BS7KKB3EAGGYAS7RJPBAC
EIPHTX56WTRFBMBNA4VYMKK4L7GWHA4Q2AQ3BCG2ZGZB3DBRENBQC
6PTHERE26NXSC3ZXSI5UYKWTMPYZD4WMYFBJKZE3MRRTKC7MIU6QC
QMVJ2ZKX5ALAMMIL7E47ZQS67QFZJ7Z3HAY5G7X72KPEQEHWRGCQC
\usepackage[ngerman,english]{babel}
%
% Acronyms
\PassOptionsToPackage{acronym}{glossaries}
\usepackage{glossaries}
\PassOptionsToPackage{ngerman,english}{babel}
\RequirePackage{babel}
% TeX root = ../main.tex
\begin{tikzpicture}[program]
\node[state,initial] (l0) {$l_0$};
\node[state] (l1) [below=of l0] {$l_1$};
\node[state] (l2) [below left =of l1] {$l_2$};
\node[state] (l3) [below=of l2] {$l_3$};
\node[state] (l4) [below right=of l3] {$l_4$};
\node[state] (l5) [right=of l4] {$l_5$};
\node[state] (l6) [above right=of l5] {$l_6$};
\node[state] (l7) [above=of l6] {$l_7$};
\node[state] (l8) [above left=of l7] {$l_8$};
\node[state] (l9) [below right=of l2] {$l_9$};
\node[state] (l10) [right=of l9] {$l_{10}$};
% \path[->] (l0) edge node[swap] {$\textit{true}$} (l1)
% (l1) edge node[swap] {$x > 0$} (l2)
% edge node[swap] {$x \leq 0$} (l4)
% (l2) edge node[swap] {$y > 0$} (l3)
% edge node[swap] {$\begin{aligned}
% y &\leq 0 \\
% x &:= x-1 \\
% z &:= z+2 \\
% \end{aligned}$} (l1)
% (l3) edge node[swap] { \begin{aligned}%
% x &:= x+y \\
% y &:= y-1
% \end{aligned}} (l3)
;
\end{tikzpicture}
% TeX root = ../main.tex
\begin{tikzpicture}[scale=0.5]
\begin{axis}[
axis lines = center,
axis on top,
xmin=-3.3,
xmax=3.3,
ymin=0,
ymax=5.3,
y=1cm,
x=1cm,
ytick={-0,...,5},
xtick={-3,...,3},
xlabel = x,
ylabel = y,
font=\tiny
]
\addplot [ name path = A, thick ] {x + 4};
\addplot [ name path = B, thick ] {x + 1};
\addplot [ name path = C, thick ] {-x + 4};
\addplot [ name path = D, thick ] {-x + 1};
\addplot[name path=top,draw=none] {5.3};
\addplot[name path=bottom,draw=none] {0};
\addplot[pattern=north west lines,pattern color=blue!30] fill
between[of=A and top];
\addplot[pattern=north west lines,pattern color=blue!30] fill
between[of=C and top];
\addplot[pattern=north west lines,pattern color=blue!30] fill
between[of=B and bottom];
\addplot[pattern=north west lines,pattern color=blue!30] fill
between[of=D and bottom];
\addplot[only marks,mark=o] coordinates {
(0, 4)
(-1, 3) (0, 3) (1, 3)
(-1, 2) (0, 2) (1, 2)
(0, 1)
};
\addplot[fill=red,fill opacity=0.20] fill between [of=B and bottom,soft
clip={domain=0:1.5}];
\addplot[fill=red,fill opacity=0.20] fill between [of=D and bottom,soft
clip={domain=-1.5:0}];
\end{axis}
\end{tikzpicture}
% TeX root = ../main.tex
\begin{tikzpicture}[scale=0.5]
\begin{axis}[
axis lines = center,
axis on top,
xmin=-3.3,
xmax=3.3,
ymin=-3.3,
ymax=3.3,
y=1cm,
x=1cm,
ytick={-3,...,3},
xtick={-3,...,3},
xlabel = x,
ylabel = y,
font=\tiny
]
\addplot [
% domain=-3:3,
name path = A,
thick,
% pattern=north east lines,
] {0.5*x + 1};
\addplot[name path=C,draw=none] {3.3};
\addplot [
% domain={-1.65:3},
name path = B,
thick,
% pattern=north east lines,
] {0.5*x*x-2};
\addplot[name path=D,draw=none] {-3.3};
\addplot[pattern=north west lines,pattern color=blue!30] fill between[of=A and C];
\addplot[pattern=north east lines,pattern color=red!30] fill between[of=B and D];
\addplot[only marks,mark=o] coordinates {
(2, 2)
(0, 1) (1, 1) (2, 1)
(-1, 0) (0, 0) (1, 0)
(-1, -1) (0, -1) (1, -1)
};
\end{axis}
\end{tikzpicture}
% TeX root = ../main.tex
\begin{tikzpicture}[scale=0.5]
\begin{axis}[
axis lines = center,
axis on top,
xmin=-3.3,
xmax=3.3,
ymin=-3.3,
ymax=3.3,
y=1cm,
x=1cm,
ytick={-3,...,3},
xtick={-3,...,3},
xlabel = x,
ylabel = y,
font=\tiny
]
\addplot [
% domain={-1.65:3},
name path = B,
thick,
% pattern=north east lines,
] {0.5*x*x-2};
\addplot[name path=D,draw=none] {-3.3};
\addplot[pattern=north east lines,pattern color=red!30] fill between[of=B and D];
\addplot[only marks,mark=o] coordinates {
(-3, 3) (-2, 3) (-1, 3) (0, 3) (1, 3) (2, 3) (3, 3)
(-2, 2) (-1, 2) (0, 2) (1, 2) (2, 2)
(-2, 1) (-1, 1) (0, 1) (1, 1) (2, 1)
(-2, 1) (-1, 1) (0, 1) (1, 1) (2, 1)
(-1, 0) (0, 0) (1, 0)
(-1, -1) (0, -1) (1, -1)
};
\end{axis}
\end{tikzpicture}
% TeX root = ../main.tex
\begin{tikzpicture}[scale=0.5]
\begin{axis}[
axis lines = center,
axis on top,
xmin=-3.3,
xmax=3.3,
ymin=-3.3,
ymax=3.3,
y=1cm,
x=1cm,
ytick={-3,...,3},
xtick={-3,...,3},
xlabel = x,
ylabel = y,
font=\tiny
]
\addplot [
% domain=-3:3,
name path = A,
thick,
% pattern=north east lines,
] {0.5*x + 1};
\addplot[name path=C,draw=none] {3.3};
\addplot[pattern=north west lines,pattern color=blue!30] fill between[of=A and C];
\addplot[only marks,mark=o] coordinates {
(2, 2) (3, 2)
(0, 1) (1, 1) (2, 1) (3, 1)
(-2, -0) (-1, -0) (0, -0) (1, -0) (2, -0) (3, -0)
(-3,-1) (-2, -1) (-1, -1) (0, -1) (1, -1) (2, -1) (3, -1)
(-3,-2) (-2, -2) (-1, -2) (0, -2) (1, -2) (2, -2) (3, -2)
(-3,-3) (-2, -3) (-1, -3) (0, -3) (1, -3) (2, -3) (3, -3)
};
\addplot[only marks,mark=o,red!80] coordinates { (0, -2) };
\end{axis}
\end{tikzpicture}
\node[state,initial] (l0) {$l_0$};
\node[state] (l1) [right=3cm of l0] {$l_1$};
\node[state] (l2) [right=3cm of l1] {$l_2$};
\node[state,initial] (l0) {$\ell_0$};
\node[state] (l1) [right=4cm of l0] {$\ell_1$};
\node[state] (l2) [right=4cm of l1] {$\ell_2$};
\node[state,initial] (l0) {$l_0$};
\node[state] (l1) [right=3cm of l0] {$l_1$};
\node[state] (l2) [right=3cm of l1] {$l_2$};
\node[state,initial] (l0) {$\ell_0$};
\node[state] (l1) [right=4cm of l0] {$\ell_1$};
\node[state] (l2) [right=4cm of l1] {$\ell_2$};
edge[in=35,out=80,looseness=15,draw=rot-75] node[above right] {$t_3:\begin{aligned}
p&=0.5 \\x &> 0 \\ x&:=
x+2\end{aligned}$} (l1)
edge[loop below] node {$t_4:\begin{aligned} x &> 0 \\ x&:=
x-1\end{aligned}$} ()
edge[in=35,out=80,looseness=15,draw=rot-75] node[above
right] {$t_3:\begin{aligned} p&=0.5 \\\tau &= x > 0 \\
\eta(x)&= x+2\end{aligned}$} (l1)
edge[loop below] node {$t_4:\begin{aligned} \tau&= x > 0
\\ \eta(x)&= x-1\end{aligned}$} ()
\node[state,initial] (l0) {$l_0$};
\node[state] (l1) [right=3cm of l0] {$l_1$};
\node[state] (l2) [right=3cm of l1] {$l_2$};
\node[state,initial] (l0) {$\ell_0$};
\node[state] (l1) [right=4cm of l0] {$\ell_1$};
\node[state] (l2) [right=4cm of l1] {$\ell_2$};
\node[state,initial] (l0) {$l_0$};
\node[state] (l1) [right=3cm of l0] {$l_1$};
\node[state] (l2) [right=3cm of l1] {$l_2$};
\node[state,initial] (l0) {$\ell_0$};
\node[state] (l1) [right=4cm of l0] {$\ell_1$};
\node[state] (l2) [right=4cm of l1] {$\ell_2$};
\path[->] (l0) edge node {$t_1:\textit{true}$} (l1)
(l1) edge node {$t_4:x \leq 0$} (l2)
edge[loop above] node {$t_2:\begin{aligned} x &> 1 \\ x&:=
x+1\end{aligned}$} ()
edge[loop below] node {$t_3:\begin{aligned} x &> 0 \\ x&:=
x-1\end{aligned}$} ()
\path[->] (l0) edge node {$t_1:\tau = \texttt{true}$} (l1)
(l1) edge node {$t_4:\tau = x \leq 0$} (l2)
edge[loop above] node {$t_2:\begin{aligned} \tau&= x > 1 \\
\eta(x)&= x+1\end{aligned}$} ()
edge[loop below] node {$t_3:\begin{aligned} \tau&= x > 0 \\
\eta(x)&= x-1\end{aligned}$} ()
% TeX root = ../main.tex
{
\RestyleAlgo{plain}
\SetAlgoNoLine
\LinesNotNumbered
\begin{algorithm}[H]
$x = u$\;
\lIf{$1 \leq x \leq 3$}{ $x = x+1 \oplus_\frac{1}{2} x = x$}
\lWhile{$1 \leq x \leq 3$}{ $x = x+1 \oplus_\frac{1}{2} x = x$ }
\lWhile{$y \geq 0$}{ $y = y-1 \oplus_\frac{1}{2} y = y$ }
\end{algorithm}
}
% TeX root = ../main.tex
\begin{tikzpicture}[program,initial above,font=\scriptsize]
\node[state,initial] (l0) {$l_0$};
\node[state] (l1) [below=2cm of l0] {$l_1$};
\node[state] (l1a) at ($(l1) - (3,1.5)$) {$l_{1a}$};
\node[state] (l2) [below=3cm of l1] {$l_2$};
\path[->]
(l0) edge node { $t_0: \eta(x) = u $ } (l1)
(l1) edge [out=140,in=120,looseness=2,draw=rot-75]
node[below=0.3cm,pos=0.35] {$t_{1a}:\begin{aligned} \tau = &1 \leq x \leq 3
\\ &\Land w = 0 \\ \eta(x) &= x + 1\end{aligned}$} (l1a)
(l1) edge [out=130,in=130,looseness=2,draw=rot-75] node[above,pos=0.4] {$t_{1b}:\begin{aligned} \tau = &1 \leq x \leq 3
\\ &\Land w = 0 \\ \eta(x) &= x + 1\end{aligned}$} (l1a)
(l1a) edge [out=150,in=210,looseness=10] node[swap]{$\begin{aligned}
t_{1b}:& \\ \tau = &1 \leq x \leq 3
\\ &\Land w = 0 \\ \eta(x) &= x + 1\end{aligned}$} (l1a)
(l1a) edge [bend right=15] node[swap] {$t_{2b}: \begin{aligned}\tau &= y > 0 \\
&\Land w = 1\end{aligned}$} (l2)
(l1) edge[bend left =20] node {$\begin{aligned}t_{2a}:& \\ \tau =& y > 0 \\
\Land& w = 1\end{aligned}$} (l2)
(l2) edge[bend left=20] node {$\begin{aligned}&t_3:\\ &\eta(y) = y - 1\end{aligned}$} (l1)
% (l1) edge node {$t_4:x \leq 0$} (l2)
% edge[loop above] node {$t_2:\begin{aligned} x &> 1 \\ x&:=
% x+1\end{aligned}$} ()
% edge[loop below] node {$t_3:\begin{aligned} x &> 0 \\ x&:=
% x-1\end{aligned}$} ()
;
\end{tikzpicture}
}
% TeX root = ../main.tex
\begin{tikzpicture}[program,initial above,font=\scriptsize]
\node[state,initial] (l0) {$l_0$};
\node[state] (l1) [below=2cm of l0] {$l_1$};
\node[state] (l2) [below=3cm of l1] {$l_2$};
\path[->]
(l0) edge node { $t_0: \eta(x) = u $ } (l1)
(l1) edge [out=110,in=150,looseness=10,draw=rot-75]
node[swap]{$t_{1a}:\begin{aligned} p&=0.5 \\ \tau&= 1 \leq x \leq 3
\Land w = 0\end{aligned}$} (l1)
(l1) edge [out=160,in=200,looseness=10,draw=rot-75]
node[swap]{$ t_{1b}: \begin{aligned}p&=0.5 \\ \tau&= 1 \leq x \leq 3
\Land w = 0 \\ \eta(x)&= x + 1\end{aligned}$} (l1)
(l1) edge[bend left =30] node {$t_{2}:\begin{aligned}\tau =& y > 0
\Land w = 1\end{aligned}$} (l2)
(l2) edge[bend left=30] node {$t_{3}:\begin{aligned}
p&=0.5 \\ \eta(y) &= y - \textsc{Bern}(0.5)
\end{aligned}$} (l1)
% (l2) edge[bend left=30,draw=rwth-75] node[swap] {$t_{3a}: p=0.5$} (l1)
% (l1) edge node {$t_4:x \leq 0$} (l2)
% edge[loop above] node {$t_2:\begin{aligned} x &> 1 \\ x&:=
% x+1\end{aligned}$} ()
% edge[loop below] node {$t_3:\begin{aligned} x &> 0 \\ x&:=
% x-1\end{aligned}$} ()
;
\end{tikzpicture}
}
% TeX root = ../main.tex
\begin{tikzpicture}[program,initial above,font=\scriptsize]
\node[state,initial] (l0) {$l_0$};
\node[state] (l1) [below=2cm of l0] {$l_1$};
\node[state] (l1a) at ($(l1) - (3,1.5)$) {$l_{1a}$};
\node[state] (l2) [below=3cm of l1] {$l_2$};
\path[->]
(l0) edge node { $t_0: \eta(x) = u $ } (l1)
(l1) edge [bend right=15] node[swap] {$t_{1a}:\begin{aligned} \tau = &1 \leq x \leq 3
\\ &\Land w = 0 \\ \eta(x) &= x + 1\end{aligned}$} (l1a)
(l1a) edge [out=150,in=210,looseness=10] node[swap]{$\begin{aligned}
t_{1b}:& \\ \tau = &1 \leq x \leq 3
\\ &\Land w = 0 \\ \eta(x) &= x + 1\end{aligned}$} (l1a)
(l1a) edge [bend right=15] node[swap] {$t_{2b}: \begin{aligned}\tau &= y > 0 \\
&\Land w = 1\end{aligned}$} (l2)
(l1) edge[bend left =20] node {$\begin{aligned}t_{2a}:& \\ \tau =& y > 0 \\
\Land& w = 1\end{aligned}$} (l2)
(l2) edge[bend left=20] node {$\begin{aligned}&t_3:\\ &\eta(y) = y - 1\end{aligned}$} (l1)
% (l1) edge node {$t_4:x \leq 0$} (l2)
% edge[loop above] node {$t_2:\begin{aligned} x &> 1 \\ x&:=
% x+1\end{aligned}$} ()
% edge[loop below] node {$t_3:\begin{aligned} x &> 0 \\ x&:=
% x-1\end{aligned}$} ()
;
\end{tikzpicture}
}
% TeX root = ../main.tex
\begin{tikzpicture}[program,initial above,font=\scriptsize]
\node[state,initial] (l0) {$l_0$};
\node[state] (l1) [below=2cm of l0] {$l_1$};
\node[state] (l2) [below=3cm of l1] {$l_2$};
\path[->]
(l0) edge node { $t_0: \eta(x) = u $ } (l1)
(l1) edge [out=150,in=210,looseness=10] node[swap]{$\begin{aligned}
t_1:& \\\tau = &1 \leq x \leq 3
\\ &\Land w = 0 \\ \eta(x) &= x + 1\end{aligned}$} (l1)
(l1) edge[bend left =30] node {$\begin{aligned}t_{2}:& \\ \tau =& y > 0 \\
\Land& w = 1\end{aligned}$} (l2)
(l2) edge[bend left=30] node {$t_3: \eta(y) = y - 1$} (l1)
% (l1) edge node {$t_4:x \leq 0$} (l2)
% edge[loop above] node {$t_2:\begin{aligned} x &> 1 \\ x&:=
% x+1\end{aligned}$} ()
% edge[loop below] node {$t_3:\begin{aligned} x &> 0 \\ x&:=
% x-1\end{aligned}$} ()
;
\end{tikzpicture}
\begin{apdxlemma}{\ref{lem:entailment}}\label{apdx:lem:entailment}
$\varphi_1$ entails $\varphi_2$ if and only if $\varphi_1 \Land
\neg\varphi_2$ is unsatisfiable.
\begin{proof}
\enquote{$\Rightarrow$} Let $\varphi_1$ entails $\varphi_2$, and assume
an assignment $\beta: \V \rightarrow \Z$ exists such that $\beta \models
\varphi_1 \Land \neg \varphi_2$.
\begin{align*}
\Rightarrow &\beta \models \varphi_1 \\
\Rightarrow & \beta \in \llbracket \varphi_1 \rrbracket \\
\Rightarrow & \beta \in \llbracket \varphi_2 \rrbracket \\
\Rightarrow & \beta \models \varphi_2 \\
\Rightarrow & \beta \not\models \neg\varphi_2 \\
\Rightarrow & \beta \not\models \varphi_1 \Land \neg\varphi_2 \lightning
\end{align*}
By contradiction $\varphi_1 \Land \neg\varphi_2$ is unsatisfiable.
\enquote{$\Leftarrow$} Let $\varphi_1 \Land \neg\varphi_2$ is
unsatisfiable and assume $\llbracket \varphi_1 \rrbracket \not\subseteq
\llbracket \varphi_1 \rrbracket$. There exists an assignment
\begin{align*}
& \beta \in \llbracket \varphi_1\rrbracket \\
\Rightarrow&\beta \models \varphi_1 \\
\end{align*}
and
\begin{align*}
&\beta \not\in\llbracket\varphi_2\rrbracket \\
\Rightarrow& \beta \not\models \varphi_2 \\
\Rightarrow& \beta \models \neg\varphi_2
\end{align*}
Combined $\beta \models \varphi_1 \Land\neg \varphi_2 \lightning$
By contradiction $\varphi_1$ entails $\varphi_2$.
\end{proof}
\end{apdxlemma}
\begin{apdxlemma}{\ref{lem:nonprop_update}}\label{apdx:lem:nonprop_update}
Given an assignment $s\in\Sigma$, a formula $\varphi$ and an update $u: A
\rightarrow \Z[\V]$. Without loss of generality $A' = \set{x'}{x \in A}$ is
a set of fresh variables with $A' \cap \V = \emptyset$. We define
$u(\varphi) := ((\varphi \Land \LAnd_{x\in A} x' = u(x))|_{A'})[x'/x]_{x\in
A}$. For any partial assignment $s' = u(s)|_A$ and if $s \models \varphi$,
then $s' \models u(\varphi)$.
\begin{proof}
\begin{align}
s' &\models ((\varphi \Land \LAnd_{x\in A} x' =
u(x))|_{A'})[x'/x]_{x\in A} \nonumber\\
\Leftrightarrow & \models ((\varphi \Land \LAnd_{x\in A} x' =
u(x))|_{A'})[x'/x]_{x\in A}[x/s'(x)]_{x\in A} \nonumber\\
\Leftrightarrow & \models ((\varphi \Land \LAnd_{x\in A} x' =
u(x))|_{A'})[x'/s'(x)]_{x\in A}\label{eq:chaining_subst}\\
\Leftrightarrow & \models ((\varphi \Land \LAnd_{x\in A} x' =
u(x)))[x'/s'(x)]_{x\in A} \label{eq:dropping_projection}\\
\Leftrightarrow & \models ((\varphi \Land \LAnd_{x\in A} x' =
u(x)))[x'/\bigl(\rho_u(s)\bigr)(x)]_{x\in A}\nonumber\\
\Leftrightarrow & \models ((\varphi \Land \LAnd_{x\in A} x' =
u(x)))[x'/\bigl(u(x)\bigr)(s)]_{x\in A}\nonumber\\
\Leftrightarrow & \models
(\varphi[x'/\bigl(u(x)\bigr)(s)]_{x_\in A}) \Land (\LAnd_{x\in A}
x'[x'/\bigl(u(x)\bigr)(s)]_{x_\in A} = u(x)[x'/\bigl(u(x)\bigr)(s)]_{x\in
A}) \nonumber\\
\Leftrightarrow & \models
\varphi \Land \LAnd_{x\in A} x'[x'/\bigl(u(x)\bigr)(s)] =
u(x) \label{eq:dropping_substitutions}\\
\Leftrightarrow & \models
\varphi \Land \LAnd_{x\in A} \bigl(u(x)\bigr)(s) = u(x)\nonumber\\
\Leftrightarrow & \models
\varphi[x/s(x)]\Land \LAnd_{x\in A}
u(x)[x/s(x)] =
u(x)\label{eq:definition_of_update}\\
\Leftarrow & \models
\varphi[x/s(x)]\Land \LAnd_{x\in A}
u(x)[x/s(x)][x/s(x)] =
u(x)[x/s(x)]\label{eq:adding_model}\\
\Leftrightarrow & \models
(\varphi\Land \LAnd_{x\in A} u(x) = u(x))[x/s(x)]
\label{eq:dedup_substitution}\\
\Leftrightarrow & \models
\varphi[x/s(x)]\nonumber\\
\Leftrightarrow s & \models \varphi\label{eq:assumption}
\end{align}
Equation \ref{eq:chaining_subst} holds, because the substitutions can be
chained. Equation \ref{eq:dropping_projection} holds, because the
projection removed no variables contained in the substitution. Hence they
are just implicitly existentially quantified and not affected otherwise.
In equation \ref{eq:dropping_substitutions} the substitutions can be
removed, because $\varphi$ and $u(x)$ do not contain any affected
variables, as they were freshly generated. In Equation
\ref{eq:definition_of_update} the update is just replaced by it's
definition. Equation \ref{eq:adding_model} holds because if $s$ is a
model for the formula, the formula is satisfiable. The inverse is not
necessary true. Applying the substitution $[x/s(x)]$ a second
time, does nothing in Equation \ref{eq:dedup_substitution} because
$s(x) \in \Z$ and specifically do not contain any variables $x\in A$.
Afterwards, the substitution is moved to the outside. Equation
\ref{eq:assumption} holds by assumption.
\end{proof}
\end{apdxlemma}
projections of constraints and handling polyhedra we recurred \gls{apron} and
the \gls{ppl}\cite{bagnara2008socp}, which can be used as an abstract domain in
apron.
projections of constraints and handling polyhedra we recurred
\gls{apron}\cite{apron} and the \gls{ppl}\cite{bagnara2008socp}, which can be
used as an abstract domain in apron.
to performance reasons, but made available through a new sub command
\texttt{cfr}. It might be used by other tools in the future, that want to profit
from partial evaluation on probabilistic programs.
to performance reasons, but made available through a new sub command. It might
be used by other tools in the future, that want to profit from partial
evaluation on probabilistic programs.
\subsection{Loops}
\subsection{Sub-\gls{scc} evaluation}
% \begin{comment}
% Base Algorithm
% fun find_loops_in(P)
% loop_heads = {}
% // Dijkstra
% on loop_back from l' -> l_h:
% loop_heads := loop_heads + l_h
% return loop_heads
% end
% fun heuristic(L: Loop)
% // same as original paper
% end
% fun partial_evaluation(Program P = (PV, L, GT, l_0))
% let lh := loop_heads(P)
% let properties := heuristic(P)
% let initial_state := (l_0, [])
% let P' := ({l_0}, l_0, {locations: P.l_0, {}) // todo
% for gt in GT:
% let gt' = {}
% for t in gt:
% (s', l_t') := evaluate(P, s, t)
% if l_t \in lh:
% else :
% gt' := gt' +
% end
% end
% Claims:
% Every path in P has an equivalent path in the evaluation graph EG
% Every path in EG has an equivalent path in P'
% SCC Algorithm:
% similar to above but stops at SCC boundaries.
% \end{comment}
% \section{Modules}
% \section{Command-Line Interface}
% The techniques described above were implemented natively in Koat2. They are
% available as part of the normal analysis via the flags \texttt{--cfr-native}.
% The old option for control-flow refinement \texttt{--cfr} was renamed to
% \texttt{--cfr-ir}, and can probably be removed in the future.
% During normal analysis the control-flow refinement is done on every \gls{scc}
% and the bounds of the resulting scc is compared to the previous bounds.
% A full program can be refined using the native control-flow refinement with a
% separate command \texttt{cfr}. For the new control-flow refinement the option is
% added to switch between the various method to select location for abstraction
% and the abstraction method itself (see \Sref{sec:abstraction}). An summary of
% the compatible options is given in \tref{tab:options}.
\subsection{Selecting the Abstract domain}
In the implementation, the projection is computed using an abstract domain.
Thanks to the \gls{apron}\cite{apron} library, the new implementation is generic
over the specific abstract domain used. We decided to use the abstract domain of
polyhedra (see. \Sref{ssec:polyhedra}), since they are exact on linear
constraints. In practice only small components are partially evaluated, making
the performance disadvantage acceptable. \gls{apron} uses the
\gls{ppl}\cite{bagnara2008socp} under the hood for polyhedra. Other abstract
domains supported by \gls{apron} might be used in the future. In addition to the
projection operation, the library supports updates as-well, which is used when
possible. The \gls{ppl} ignores all non-linear constraints, over-approximating
the set of constraints in the process. In turn the projection is
over-approximated as well. One could decide to over-approximate non-linear
constraints manually, in order to achieve tighter projections. For example
quadratic polynomials have an easy to compute minimum and maximum, which could
be transformed into a single linear constraint.
% As discussed in \Sref{sec:updateinvariants} the polyhedra computed during
% evaluation are tighter than the invariants computed during preprocessing. In
% order to update the invariants after the partial evaluation a new option
% \texttt{--update-invariants} is added and activated by default.
\subsection{Loops}
The abstraction relies on the fact that every loop contains at least one
location marked for abstraction. The loop detection is implemented using the
algorithm described by \citeauthor{johnson1975}\cite{johnson1975}. By default
the first location of the loop is selected, which by design of
\citeauthor{johnson1975}'s algorithm is the smallest location with regard to an
arbitrary order. In our implementation the locations were ordered topologically
from the start of the program.
% \begin{table}
% \center
% \begin{tabular}[ccc]
% \hline
% \begin{comment}
% analysis --cfr=native,update-invariants, --o
% --cfr=irankfinder,
% --cfr=off
Optionally, the \gls{fvs} can be computed on the detected loops to minimize the
number of locations marked for abstraction.
% prob-analysis --cfr=native
\subsection{Property-based abstraction}
We implemented the property based abstraction (recall
\Sref{sec:property_based_abstraction}) proposed by
\citeauthor{domenech2019arxiv}. The heuristic uses backwards-propagation as one
source of properties. Our implementation applies the backwards-propagation for
every loop a location marked for abstraction is on, possibly aggregating
properties from multiple loops.
% cfr --native
% --irankfinder
% \end{comment}
The properties are projected onto the program variables, in order to reduce
their number, and because values for temporary variables do not propagate
throughout the program. The entailment checks of properties are done using
Z3 and Lemma \ref{lem:entailment}. Since a set of constraints is checked
against many properties, an incremental solver is used in order avoid
recomputing the model for the set of constraints every time.
Once a set of constraints is abstracted, it is replaced by the set of entailed
properties.
% \hline
% \end{tabular}
% \caption{}
% \end{table}
Recall Algorithm \ref{alg:evaluate_abstr}. When adding a new transition to the
partial evaluation graph, we check, if the version is already present in the
graph. Instead of checking for equivalence on every version, we use structural
equality, which might not detect equivalent versions on non-abstracted
constraints, but works for the abstracted versions with properties, since the
properties are reused during abstraction.
\gls{koat}s analysis goal is to find tight upper bounds for \gls{pip}. The
analysis techniques used such as \acrfull{mprf}, have their limits and cannot
always find tight bounds for every program, which motivates control-flow
refinement in order to find an equivalent program to the original one, that
opens the possibility for the analysis to find tighter runtime-complexity
bounds.
\gls{koat}s analysis goal is to find tight upper bounds for \gls{pip} and
possibly non-probabilistic and non-deterministic integer programs that are a
specialization of the first. The analysis techniques used such as
\gls{mprf},\todo{more} have their limits and cannot always find tight bounds for
every program, which motivates control-flow refinement in order to find an
equivalent program to the original one, that opens the possibility for the
analysis to to find tighter bounds.
Recall \ref{def:runtime_complexity}; the newly constructed refined program shall
have they have the same worst-case expected runtime complexity as the original
program which implies that runtime-complexity bounds found for the refined
program are also runtime-complexity bounds for the original one. Naturally the
control-flow refinement will add and replace transitions and locations, hence
the number runtime-complexity bounds and size-bounds of each transition won't
be preserved. However we will show, that each transition in the refined program
is a similar to a transition in the original program and hence every run in the
new program has a run of equal runtime-complexity and probability in the
original program and vice-versa.
Recall \ref{def:overallcomplexity}; the newly constructed refined program shall
have they have the same overall runtime complexity as the original program,
which implies that complexity bounds found for the refined program are also
complexity bounds for the original one.
Naturally the control-flow refinement will add and replace transitions and
locations, hence the runtime and size complexity per each transition won't be
preserved. However we will show, that each transition in the refined program is
a copy of a transition in the original program, and that complexity of an
original transition is equal to the sum of its copies.
evaluation presented by \citeauthor{domenech2019arxiv}\cite{domenech2019arxiv}
for \acrlong{pip}. The partial evaluation will be constructed in a similar
way but explicitly taking probability and general transitions into account.
evaluation technique presented by
\citeauthor{domenech2019arxiv}\cite{domenech2019arxiv} for \acrlong{pip}. The
partial evaluation will be constructed in a similar way but explicitly taking
probability and general transitions into account.
possibly infinite evaluation tree is constructed that will finally be collapsed
into a finite evaluation graph by an abstraction layer.
possibly infinite evaluation graph is constructed that will finally be collapsed
into a finite evaluation graph by the use of an abstraction layer.
Afterwards we will show that the partial evaluation is sound for \gls{pip} and
prove that the expected runtime-complexity is preserved and thus also the
worst-case runtime-complexity of non-probabilistic programs.
\begin{example}
\begin{figure}
\centering
\input{figures/ch1_prob}
\caption{Representation of $\Prog_3$, a variataion of $\Prog_2$ from
\fref{fig:prob_c}, as a transition graph, with non-deterministic
branching.\label{fig:ex_prob}}
\end{figure}
The examples in this chapter will follow through using a variation of the
probabilistic program $\Prog_2$ from the introduction (\fref{fig:prob_c}),
which we call $\Prog_3$. $\Prog_3$ is visually represented as a transition
graph in \fref{fig:ex:prob}. The program uses a set of variables $\V =
\{x,y,u,w\}$ and has the program variables $\PV_{\Prog_3} = \{x,y\}$. $u,w$
are temporary variables whose values reassigned by the scheduler on every
transition and whose values do not propagate throughout the program.
Afterwards we will show that the partial evaluation is sound for probabilistic
integer programs and prove that the overall expected runtime complexity
(\cref{def:past}) is equally preserved as the overall (worst-case) runtime
complexity (\cref{def:runtimebounds}).
In transition $t_0$ the value of $x$ is set by non-deterministic sampling
via the temporary variable $u$. At location $\ell_1$ the scheduler decides
between one of the general transitions $g_1 = \{t_{1a}, t_{1b}\}$ or $g_2 =
\{t_2\}$, and assigns the value for $w$ accordingly.
The program can alter between the loops non-deterministically after every
iteration. Since both loops are independent of one another, the
non-determinism doesn't change anything about the expected
runtime-complexity of the program.
On the general transition $g_1 = \{t_{1a}, t_{1b}\}$ at location $\ell_1$
the program branches probabilistically, with a probability of $p=0.5$. On
the transition $t_3$, the value with which $y$ is decremented is sampled
probabilistically from a Bernoulli distribution, where both possible
outcomes have the same probability $p=0.5$.
\end{example}
During a normal execution a program visits one location after another and
changes state with every transition. The configuration at a given point of the
execution can be described by tuple of location and assignment. Simulating
every possible run of a program from every possible starting state is
impossible: First, the number of starting states is infinite; second the
program is not guaranteed to terminate and hence the simulation of a specific
run might not terminate either.
For non-deterministic and probabilistic programs, the problem get's even more
complex, since the transition taken does not only depend on the current state
but also on the answer of the scheduler and some random event.
Instead of simulating every single possible run, it would be nice to treat
\enquote{similar} states as one. Instead of simulating a single assignment,
the set of all possible assignments after a step is described by a polyhedron.
The resulting graph is called a \textit{partial evaluation graph}. It was
formalized for \gls{chc} by \citeauthor{gallagher2019eptcs}. In this thesis the
definitions are adapted to better fit the algorithm and the formalisms of
\Glspl{pip}.
During a normal execution a program visits locations one after another and
changes its state with every transition. The configuration at a given point of
the execution can be described by tuples of locations and assignments.
Simulating every possible run of a program from every possible starting state is
impossible: First, the number of starting states is infinite; second the program
is not guaranteed to terminate and hence the simulation of a specific run might
not terminate either.
For non-deterministic and probabilistic programs, the problem gets even more
complex, since the transition taken do not only depend on the current state but
also on the answer of the scheduler and some random event. Instead of simulating
every single possible run, it would be nice to treat \emph{similar} states as
one. Instead of simulating a single assignment, the set of all possible
assignments after a step is described by a set of constraints. The resulting
graph is called a \emph{partial evaluation graph}. It was formalized for
\glspl{chc} by \citeauthor{gallagher2019eptcs}\cite{gallagher2019eptcs}. In this
thesis the definitions are adapted to better fit the algorithm and the formalism
of \Glspl{pip}.
In order to guarantee termination, as will be elaborated further later on, a
finite set of abstracted values (not to be confused with abstract domains, such
as polyhedra etc.) is required. For now the set of abstracted values shall be a
finite set $\A \subset \C$.
% In order to guarantee termination of the refinement, as will be elaborated further later on, a
% finite set of abstracted values (not to be confused with abstract domains, such
% as polyhedra etc.) is required. For now the set of abstracted values shall be a
% finite set $\A \subset \C$.
Given a \gls{pip} $\Prog$ with locations $\Loc$, transitions $\T$ and general
transitions $\GT$, the versions are the vertices of the partial evaluation
graph. A version is composed of location from $\Loc$ and either an abstracted
value or a non-abstracted constraint.
% Given a \gls{pip} $\Prog$ with locations $\Loc$, transitions $\T$ and general
% transitions $\GT$, the versions are the vertices of the partial evaluation
% graph. A version is composed of location from $\Loc$ and either an abstracted
% value or a non-abstracted constraint.
The set of \textit{constraint versions} is the set of tuples over the
locations $L$ from $P$ and constraints $\C$. The set of \textit{abstract
versions} is the set of tuples over the locations $L$ from $P$ and some
finite abstraction space $A\subseteq\C$. Since the set of locations and the
abstraction space are both finite, so are the abstract versions. The set of
versions is the union of constraint and abstract versions.
The set of \textit{versions} is the set of tuples over the
locations $\Loc_\Prog$ and constraints $\C$:
For clarity sake, versions will be denoted with angles $\langle \cdot
\rangle$. Since elements of the abstraction space are constraints as well.
The set notation with curly braces is used for explicitly abstracted
constraints like so $\langle l, \{\psi_1, \dots\}\rangle$ and the formula
notation without brackets is used for non-abstracted values like so $\langle
l, \varphi\rangle$. An abstracted version can be lifted to a non-abstracted
version trivially.
% For clarity sake, versions will be denoted with angles $\langle \cdot
% \rangle$. Since elements of the abstraction space are constraints as well.
% The set notation with curly braces is used for explicitly abstracted
% constraints like so $\langle l, \{\psi_1, \dots\}\rangle$ and the formula
% notation without brackets is used for non-abstracted values like so $\langle
% l, \varphi\rangle$. An abstracted version can be lifted to a non-abstracted
% version trivially.
Every version describes a subset of configurations of the program $P$. We write
$\llbracket \langle l, \varphi \rangle \rrbracket$ for the set of
configurations $\set{(l,\_,s)}{s \models \varphi}$.
% Every version describes a subset of configurations of the program $P$. We write
% $\llbracket \langle l, \varphi \rangle \rrbracket$ for the set of
% configurations $\set{(l,\_,s)}{s \models \varphi}$.
The edges of the partial evaluation graph are transitions $\T_{PE}$ analogously
defined to Definition \ref{def:pip}, except that they connect versions and not
locations.
The versions are the edges/locations of the partial evaluation graph. The edges
of the partial evaluation graph are transitions $\T_{PE}$ analogously defined to
Definition \ref{def:pip}, except that they connect versions and not locations.
A \Gls{pip} $P$ can be lifted to a trivial partial evaluation graph by replacing
every location $l \in L$ by the trivial version $\langle l,
\textit{true}\rangle$. Obviously the lift of program is not useful for the
analysis, as it doesn't capture any interesting properties of the program,
A \Gls{pip} $P$ can be lifted to a trivial partial evaluation graph by
replacing every location $\ell \in \Loc_\Prog$ by the trivial version $\langle
\ell, \texttt{true}\rangle$. Obviously the lift of a program is not useful for
the analysis, as it doesn't capture any interesting properties of the program,
\langle l', \varphi'\rangle$ is computed by unfolding a transition $t \in \T$
from the original \gls{pip} that starts in the same location $l \in \Loc$ as the
version $v = \langle l, \varphi\rangle$. The resulting constraint $\phi'$
contains all states $s' \in \Sigma$ with $s' \models \varphi'$ reachable via the
transition from a state in the source version, i.e. $\prSs((l, s, \_)
\rightarrow (l',s',t)) > 0$.
\langle l', \varphi'\rangle \in \vers_\Prog$ is computed by unfolding a
transition $t \in \T_\Prog$ from the original \gls{pip} that starts in the same
location $\ell \in \Loc$ as the source version $v = \langle \ell, \varphi\rangle
\in \vers_\Prog$. The resulting constraint $\varphi'$ contains all states $s'
\in \Sigma$ with $s' \models \varphi'$ wich are reachable via the transition $t$
from a state $s \models \varphi$ in the source version, i.e., $\prSs((\ell, s, \_)
\rightarrow (\ell',s',t)) > 0$.
An unfolding operation $\unfold : \C \times (\PV \rightarrow
\Z[\V\union\D)]) \rightarrow \C$ of a formula $\varphi \in \C$ with an
update $\eta: \PV\rightarrow\Z[\V \union \D]$ is defined as
An unfolding operation $\unfold : \Phi_\PV \times (\PV \rightarrow
\Z[\V\union\D)]) \rightarrow \Phi_\PV$ of a formula $\varphi \in \Phi_\PV$
with an update $\eta: \PV\rightarrow\Z[\V \union \D]$ is defined as
\unfold(\varphi, \eta) = \begin{cases}
\tilde{\eta}(\varphi \Land \lfloor\eta\rceil)|_{\PV} & \text{if }
\tilde{\eta}(\varphi \Land \lfloor\eta\rceil) \text{ is
SAT}\\
\textit{false} & \text{otherwise}
\end{cases}
\unfold(\varphi, \eta) = \tilde{\eta}(\varphi \Land \lfloor\eta\rceil)
For any $\varphi\in2^\C$, assignments $s, s' \in \Sigma$ and probabilistic
update $\eta : \PV \rightarrow \Z[\V\union\D]$, if $\models\varphi$ then
For any $\varphi\in\Phi_\PV$, probabilistic update $\eta : \PV \rightarrow
\Z[\V\union\D]$, if there exists an $s\in \Sigma$ with $s\models\varphi$
then
\item \label{itm:unfold1} $\models\unfold(\varphi, \eta)$.
\item \label{itm:unfold2} If additionally $s \models \varphi$ and $s' \in
\support_s(\eta)$,
then $s' \models \unfold(\varphi, \eta)$.
\item \label{itm:unfold1} $\unfold(\varphi, \eta)$ is satisfiable.
\item \label{itm:unfold2} If $s' \in \support_s(\eta)$ for some $s' \in
\Sigma$, then $s' \models \unfold(\varphi, \eta)$.
$\models\unfold(\varphi, \eta)$ follows directly from the existence of
$s \models \varphi$ due to satisfiability and $s' \in \support_s(\eta)$
since the support cannot be empty.
The satisfiability of $\unfold(\varphi, \eta)$ follows directly from the
existence of $s \models \varphi$ due to satisfiability and $s' \in
\support_s(\eta)$ since the support cannot be empty.
\begin{example}
Consider transition $t_3$ of $\Prog_3$ from \fref{fig:ex_prob}. $t_3$
has an probabilistic update $\eta(y) = y - \textsc{Bern}(0.5)$ and
implicitly $\eta(x) = x$.
Imagine a constraint $y > 0$, that is unfolded with the probabilistic update
$\eta$. The update is approximated by $(\tilde{\eta},\lfloor\eta\rceil)$
with
\begin{align*}
\tilde{\eta}(x) &= x' \\
\tilde{\eta}(y) &= y' \\
\lfloor\eta\rceil &= (x' = x \Land y' = y - d \Land 0 \leq d \leq 1)
\end{align*}
The unfolding computes the following new constraint.
\begin{align*}
\tilde{\eta}(\varphi \Land \lfloor\eta\rceil) &= \tilde{\eta}(x > 0 \Land
x' = x \Land y' = y - d \Land 0 \leq d \leq 1) \\
&= (y > 0 \Land x' = x \Land y' = y - d \Land 0 \leq d \leq 1 \Land x'' =
x' \Land y'' = y')|_{\PV''}[x''/x]_{x\in\PV}\\
&= (y'' \geq 0)[x''/x]_{x\in\PV'}\\
&= (y \geq 0)
\end{align*}
Any satisfying assignment for $\varphi$ must assign $y$ a value larger than
zero and after the update, the value of the assignment must be larger or
equal to zero. Since $x$ is neither constrained before the update nor
updated in any way, it remains unconstrained after the unfolding.
\end{example}
Using the unfolding, one can derive an unfolding operation $\unfold^*: \vers
\times \T \rightarrow 2^{\T_{PE}}$ that constructs a new transition between the
versions copying the guard $\tau$, update $\eta$, and probability $p$ from the
original transition. Since unsatisfiable versions cannot be reached they are
ignored and no transition is created.
Using the unfolding, one can derive an unfolding operation $\unfold:
\vers_\Prog \times \T \rightarrow 2^{\T_{PE}}$ that constructs a new transition
between the versions copying the guard $\tau$, update $\eta$, and probability
$p$ from the original transition.
% Since unsatisfiable versions cannot be
% reached they are ignored and no transition is created.
% \begin{align*}
% \unfold^*(\langle l,\varphi\rangle, t) =
% \begin{cases}
% \{(\langle \ell, \varphi\rangle, p, \tau\Land\varphi, \eta,\langle \ell',\varphi'\rangle
% \} & \text{if } \begin{aligned}[t]
% &t = (\ell, p, \tau, \eta, \ell')\text {, and} \\
% &\varphi' = \unfold(\varphi \land \tau, \eta) \text{ is SAT}
% \end{aligned} \\
% \emptyset & \text{ otherwise}
% \end{cases}
% \end{align*}
\begin{align*}
\unfold(\langle l,\varphi\rangle, t) =
\{(\langle \ell, \varphi\rangle, p, \tau\Land\varphi, \eta,\langle \ell',\varphi'\rangle
\} \\ \text{ for } t = (\ell, p, \tau, \eta, \ell')\text { and }
\varphi' = \unfold(\varphi \land \tau, \eta)
\end{align*}
\unfold^*(\langle l,\varphi\rangle, t) =
\begin{cases}
\{(\langle l, \varphi\rangle, p, \tau, \eta,\langle l',\varphi'\rangle
\} & \text{if } \begin{aligned}[t]
&t = (l, p, \tau, \eta, l'), p \in [0,1], \tau \in 2^\C,\\
&\eta: \PV \rightarrow \Z[\V\union\D]\text {, and} \\
&\varphi' = \unfold(\varphi \land \tau, \eta) \text{ is SAT}
\end{aligned} \\
\{\} & \text{ otherwise}
\end{cases}
\unfold
\begin{lemma} [Existance, Similarity, Reachability of unfolded transitions]
\label{lem:existance_and_similarity}
Let $l \in \Loc_\Prog$, $t' \in \T_\Prog$, $s \in \Sigma$, and $\varphi$ be
an \gls{fo}-formula over variables $\PV$. Moreover let $t = (l, p, \tau,
\eta, l')$ with locations $l' \in \Loc_\Prog$, a probability $p \in
[0,1]$ and an update $\eta: \PV \rightarrow \Z[\V\union\D]$.
\begin{lemma} [Existence, Similarity, Reachability of unfolded transitions]
\label{lem:existence_and_similarity}
Let $\ell \in \Loc_\Prog$, $t' \in \T_\Prog$, $s \in \Sigma$, and $\varphi$
be an \gls{fo}-formula over variables $\PV$. Moreover, let $t = (\ell, p,
\tau, \eta, \ell')$ with locations $\ell' \in \Loc_\Prog$, a probability $p
\in [0,1]$ and an update $\eta: \PV \rightarrow \Z[\V\union\D]$.
For the sub-SCC level evaluation Algorithm \ref{alg:abstr_eval} is adapted to
stop the evaluation when leaving the component and instead of starting only from
the initial location of the program, the evaluation starts from every
entrytransition to the program. The exact algorithm is displayed in Algorithm
\ref{alg:eval_component}.
\begin{algorithm}
\caption{Evaluate the component $S \subseteq \T_\Prog$ of a Program $\Prog$
with abstraction $\alpha$.\label{alg:eval_component}}
\KwData{ A \gls{pip} $\Prog$, component $S$, abstraction function $\alpha$,
and abstraction oracle $S_\alpha$}
\KwResult{A Graph $G$, and a set of pairwise disjunct general transitions
$\GTG \subset \GT_\text{PE}$}
$\ET \leftarrow \Tout(S)$\;
\SetKwFunction{FEvaluate}{evaluate}
\SetKwProg{Fn}{Function}{:}{end}
$\GTG \leftarrow \emptyset$\;
\Fn{\FEvaluate{$G$, $v$}}{
$\langle l, \varphi\rangle \leftarrow v$\;
\For{$g \in \GTout(l)$} {
$g' \leftarrow \unfold^*(v, g)$\label{alg:abstr_line_unfold_g}\;
\If {$g^* \neq \emptyset$ \label{alg:abstr_line_filteremptyg}}{
$g^* \leftarrow \emptyset$\;
\For{$t = (v, p, \tau, \eta, \langle l', \varphi\rangle) \in g$}{
\uIf {$g \in \ET$} {
$t \leftarrow (v, p, \tau, \eta, \langle l',
\texttt{true}\rangle)$\label{alg:abstr_line_exit}\;
}
\ElseIf {$S_\alpha$} {
$t \leftarrow (v, p, \tau, \eta, \langle l',
\alpha(\varphi)\rangle)$\label{alg:abstr_line_abstraction}\;
}
$g^* \leftarrow g^* + t$\label{alg:abstr_line_addtog}\;
\uIf {$v' \not\in V(G)$}{
$G \leftarrow G + t$\;
$G \leftarrow \FEvaluate{G, v'}$\;
}
\Else {
$G \leftarrow G + t$\label{alg:abstr_line_addt}\;
}
}
$\GTG \leftarrow \GTG + g^*$\;
}
}
\Return {$G$}
}
$G \leftarrow \text{ graph of }\Prog \text{ lifted to a partial evaluation
graph}$\;
$G \leftarrow G - S$\;
\For{$t = (\ell, p, \tau, \eta, \ell')\in \Tin(S)$} {
$G \leftarrow \FEvaluate{$G, \langle\ell',\texttt{true}\rangle $}$ \;
}
\Return $G, \GTG$
\end{algorithm}
The partial evaluation starts at every entry transition $\T_in(S)$ to the
component. When unfolding an exit transition $t = (\ell, \_, \_, \_, \ell') \in
\Tout(S)$ from a version $\langle \ell, \varphi\rangle$, to $\unfold^*(\langle
\ell, \varphi\rangle, t) = \{(\langle\ell,\varphi\rangle, \_, \_, \_,
\langle\ell', \varphi'\rangle)\}$ the target version
$\langle\ell',\varphi'\rangle$ is replaced by the trivial overapproximation
$\langle\ell',\texttt{true}\rangle$. This target is always already contained in
the program, by construction and the algorithm always backtracks.
Lets cosider an admissible finite prefix $f \in \fpath_\Prog$ with
\begin{align*}
f &= c_0c_1\dots{}c_k\dots{}c_m\dots{}c_n \\
&=
(\ell_0,t_\in,s_0)(\ell_1,t_1,s_1)\dots\underbrace{(\ell_k,t_k,s_k)\dots(\ell_m,t_m,s_m)}_{t_k,\dots,t_m
\in S}\dots(\ell_n,t_n,s_n)
\end{align*}
with an infix $c_m\dots{}c_k$ going through the component.
By construction, up to the component the partial evaluation $\Prog'$ contains a
similar prefix
$(\langle\ell_0,\texttt{true}\rangle,t_\in,s_0)(\langle\ell_1,\texttt{true}\rangle,t_1,s_1)\dots(\langle\ell_{k-1},\texttt{true}\rangle,t_{k-1},s_{k-1})
\in \fpath_{\Prog'}$. The component is entered via the transition
$t_k\in\Tin(S)$ which is was used as a starting point for the evaluation with
the trivial version $\langle\ell_{k-1}, \texttt{true}\rangle$ in Algorithm
\ref{alg:eval_component}. By the same argument as in Theorem
\ref{thm:correctness}, a similar prefix
$(\langle\ell_0,\texttt{true}\rangle,t_\in,s_0)(\langle\ell_1,\texttt{true}\rangle,t_1,s_1)\dots(\langle\ell_{k-1},\texttt{true}\rangle,t_{k-1},s_{k-1})(\langle\ell_k,\varphi_k\rangle,t_k,s_k)\dots(\langle\ell_{m-1},\varphi_{m-1}\rangle,t_{m-1},s_{m-1})
\in \fpath_{\Prog'}$ through the component exists in $\Prog'$. The exit
transition $t_m$ must have been unfolded before backtracking and afterwards the
similar prefix trivially follows the lifted versions again, yielding a similar
finite prefix
$(\langle\ell_0,\texttt{true}\rangle,t_\in,s_0)(\langle\ell_1,\texttt{true}\rangle,t_1,s_1)\dots(\langle\ell_{k-1},\texttt{true}\rangle,t_{k-1},s_{k-1})(\langle\ell_k,\varphi_k\rangle,t_k,s_k)\dots(\langle\ell_{m-1},\varphi_{m-1}\rangle,t_{m-1},s_{m-1})(\langle\ell_m,\texttt{true}\rangle,t_m,s_m)\dots(\langle\ell_n,\texttt{true}\rangle,t_n,s_n)
\in \fpath_{\Prog'}$.
\begin{definition}[Solutions\cite{Domenech19}]\label{def:solutions}
Let $\varphi \in \Phi$ be an \gls{fo}-formula. $\llbracket \varphi
\rrbracket$ is defined as the set of all satisfying assignments of the
formula $\varphi$.
\end{definition}
Using the $\models$ relation and $\equiv$ relation from \gls{fo} logic, the
notion of satisfiability is defined.\cite{barwise1977} Constraints are a subset
of formulas in \gls{fo} logic, thus, their semantics shall be defined
accordingly. In this thesis we will only consider integer arithmetic. Let $\Zs
:= (\Z, +, \cdot, 0, 1, \leq, <, =)$ be the structure standard integer
arithmetic. For an assignment $\beta : \V \rightarrow \Z$ and a formula
$\varphi \in Phi$ we write $\beta \models \varphi$ instead of $\Zs, \beta
\models \varphi$.
The set of quantifier-free formulas of first-order logic over a variable set
$A\subset\V$ is denoted with $\Phi_A$, or simply $\Phi$, when $A=\V$. For
foundations on FO-logic we'll refer to the literature \cite{barwise1977}.
Constraints are a subset of formulas in \gls{fo} logic, thus, there semantics
shall be defined accordingly using the $\models$ and $\equiv$ relation from
\gls{fo} logic.\cite{barwise1977}
In this thesis we will only consider integer arithmetic. Let $\Zs := (\Z, +,
\cdot, 0, 1, \leq, <, =)$ be the structure standard integer arithmetic. For an
assignment $\beta : \V \rightarrow \Z$ and a formula $\varphi \in \Phi$ we write
$\beta \models \varphi$ instead of $\Zs, \beta \models \varphi$.
\begin{proof}
\enquote{$\Rightarrow$} Let $\varphi_1$ entails $\varphi_2$, and assume
an assignment $\beta: \V \rightarrow \Z$ exists such that $\beta \models
\varphi_1 \Land \neg \varphi_2$.
\begin{align*}
\Rightarrow &\beta \models \varphi_1 \\
\Rightarrow & \beta \in \llbracket \varphi_1 \rrbracket \\
\Rightarrow & \beta \in \llbracket \varphi_2 \rrbracket \\
\Rightarrow & \beta \models \varphi_2 \\
\Rightarrow & \beta \not\models \neg\varphi_2 \\
\Rightarrow & \beta \not\models \varphi_1 \Land \neg\varphi_2 \lightning
\end{align*}
By contradiction $\varphi_1 \Land \neg\varphi_2$ is unsatisfiable.
\enquote{$\Leftarrow$} Let $\varphi_1 \Land \neg\varphi_2$ is
unsatisfiable and assume $\llbracket \varphi_1 \rrbracket \not\subseteq
\llbracket \varphi_1 \rrbracket$. There exists an assignment
\begin{align*}
& \beta \in \llbracket \varphi_1\rrbracket \\
\Rightarrow&\beta \models \varphi_1 \\
\end{align*}
and
\begin{align*}
&\beta \not\in\llbracket\varphi_2\rrbracket \\
\Rightarrow& \beta \not\models \varphi_2 \\
\Rightarrow& \beta \models \neg\varphi_2
\end{align*}
Combined $\beta \models \varphi_1 \Land\neg \varphi_2 \lightning$
By contradiction $\varphi_1$ entails $\varphi_2$.
\end{proof}
\proof{see Appendix on page \pageref{apdx:lem:entailment}.}
Let $\Z[\V]$ be the set of integer polynomials over variables $\V$ and $\Sigma =
\set{s : A \rightarrow \Z}{A \subseteq \V}$ be the set of full integer
assignments.
For an integer polynomial $f\in\Z[\V]$ and a full assignment $s \in \Sigma$ we
write $f(s)$ for the value that is found by fully evaluating the polynomial
$f[x_i/s(x_i)]$. \todo{restrict to full assignments?}
Let $\Sigma = \{s : \V \rightarrow \Z\}$ be the set of full integer
assignments, that are assignments which assign a value to every variable
$v\in\V$. In contrast, partial assignments over a variable set $A \subseteq \V$
assign values only to a subset of variables, set set of partial assignments over
the variables $A\subseteq\V$ is denoted by $\Sigma_A = \{s: A \rightarrow \Z\}$.
For an integer polynomial $f\in\Z[A]$ over a variable set $A \subseteq \V$ and a
assignment $s \in \Sigma_A$ we write $\varf(s)$ for the value that is found by
evaluating the polynomial $\varf[x_i/s(x_i)]_{x\in A}$.
An update is described by an update function $u : A \rightarrow \Z[\V]$ that
maps every updated variable to a polynomial expression over $\V$. The value
of a variable $x\in A$ \textit{after} the update is determined by $u(x)(s)$.
Note, that $u(x)$ is a polynomial and $u(x)(s)$ is the evaluation of that
polynomial.
Let $A,B \subseteq \V$. An update is described by an update function $u : A
\rightarrow \Z[B]$ that maps every updated variable to a polynomial
expression over $B$. The value of a variable $x\in A$ \textit{after} the
update for an assignment $s\in\Sigma_B$ is determined by
$\bigl(u(x)\bigr)(s)$. Note, that $u(x) \in \Z[\V]$ is a polynomial and
$\bigl(u(x)\bigr)(s) \in \Z$ is the evaluation of that polynomial.
full assignment $s : \V \rightarrow \Z$, we write $u(s) : A \rightarrow \Z$ for
the partial assignment found by assigning every updated variable to it's value
after the update.
assignment $s \in \Sigma$, we define a function $\rho_u: \Sigma \rightarrow
\Sigma_A$. It computes the assignment after an update $u$ given an assignment
$s \in \Sigma$ before the update:
\begin{lemma}\label{lem:nonprop_update}
Given a full assignment $s\in\Sigma$, a
formula $\varphi$ and an update $u: A \rightarrow \Z[\V]$. Without loss of
generality $A' = \set{x'}{x \in A}$ is a set of fresh temporary
variables. We define $u(\varphi) := ((\varphi \Land \LAnd_{x\in A} x' =
u(x))|_{\PV'})[x'/x]$. For any partial assignment $s' = u(s)|A$
and if $s \models \varphi$, then $s' \models u(\varphi)$.
\begin{example}
Consider the update $u : \{x\} \rightarrow \Z[\{x,y\}]$ with $u(x) = x\cdot
y+1$. Let $s\in\Sigma_{\{x,y\}}$ assign $s(x) = 2$ and $s(y)=3$
\textit{before} the update. \textit{After} the update
$\bigl(\rho_u(s)\bigr)(x) = 2 \cdot 3 + 1 = 7$
\end{example}
\begin{proof}
\begin{align}
s' &\models ((\varphi \Land \LAnd_{x\in A} x' =
u(x))|_{A'})[x'/x] \nonumber\\
\Leftrightarrow & \models ((\varphi \Land \LAnd_{x\in A} x' =
u(x))|_{A'})[x'/x][x/s'(x)] \nonumber\\
\Leftrightarrow & \models ((\varphi \Land \LAnd_{x\in A} x' =
u(x))|_{A'})[x'/s'(x)] \label{eq:chaining_subst}\\
\Leftrightarrow & \models ((\varphi \Land \LAnd_{x\in A} x' =
u(x)))[x'/s'(x)]_{x\in A} \label{eq:dropping_projection}\\
\Leftrightarrow & \models ((\varphi \Land \LAnd_{x\in A} x' =
u(x)))[x'/u(s)(x)]_{x\in A}\nonumber\\
\Leftrightarrow & \models ((\varphi \Land \LAnd_{x\in A} x' =
u(x)))[x'/u(x)(s)]_{x\in A}\nonumber\\
\Leftrightarrow & \models
(\varphi[x'/u(x)(s)]_{x_\in A}) \Land (\LAnd_{x\in A}
x'[x'/u(x)(s)]_{x_\in A} = [x'/u(x)(s)]_{x\in
A}) \nonumber\\
\Leftrightarrow & \models
\varphi \Land \LAnd_{x\in A} x'[x'/u(x)(s)] =
u(x) \label{eq:dropping_substitutions}\\
\Leftrightarrow & \models
\varphi \Land \LAnd_{x\in A} u(x)(s) = u(x)\nonumber\\
\Leftrightarrow & \models
\varphi[x/s(x)]\Land \LAnd_{x\in A}
u(x)[x/s(x)] =
u(x)\label{eq:definition_of_update}\\
\Leftarrow & \models
\varphi[x/s(x)]\Land \LAnd_{x\in A}
u(x)[x/s(x)][x/s(x)] =
u(x)[x/s(x)]\label{eq:adding_model}\\
\Leftrightarrow & \models
(\varphi\Land \LAnd_{x\in A} u(x) = u(x))[x/s(x)]
\label{eq:dedup_substitution}\\
\Leftrightarrow & \models
\varphi[x/s(x)]\nonumber\\
\Leftrightarrow s & \models \varphi\label{eq:assumption}
\end{align}
\begin{lemma}\label{lem:nonprop_update}
Let $A, B \subseteq \V$. Given an assignment $s\in\Sigma$, a formula
$\varphi \in \Phi$ and an update $u: A \rightarrow \Z[B]$. Without loss of
generality $A' = \set{x'}{x \in A} \subset \V$ is a set of fresh variables
with $A' \cap (A \union B) = \emptyset$. We define $u(\varphi) := ((\varphi
\Land \LAnd_{x\in A} x' = u(x))|_{A'})[x'/x]_{x\in A}$. For any partial
assignment $s' = \rho_u(s)|_A$ and if $s \models \varphi$, then $s' \models
u(\varphi)$.
Equation \ref{eq:chaining_subst} holds, because the substitutions can be
chained. Equation \ref{eq:dropping_projection} holds, because the
projection removed no variables contained in the substitution. Hence they
are just implicitly existentially quantified and not affected otherwise.
In equation \ref{eq:dropping_substitutions} the substitutions can be
removed, because $\varphi$ and $u(x)$ do not contain any affected
variables, as they were freshly generated. In Equation
\ref{eq:definition_of_update} the update is just replaced by it's
definition. Equation \ref{eq:adding_model} holds because if $s$ is a
model for the formula, the formula is satisfiable. The inverse is not
necessary true. Applying the substitution $[x/s(x)]$ a second
time, does nothing in Equation \ref{eq:dedup_substitution} because
$s(x) \in \Z$ and specifically do not contain any variables $x\in A$.
Afterwards, the substitution is moved to the outside. Equation
\ref{eq:assumption} holds by assumption.
\end{proof}
\proof{see Appendix on page \pref{apdx:lem:nonprop_update}.}
The orthogonal projection computes the projection of an \gls{fo} formula.
Geometrically, the projection can be seen as set of solutions projected (casting
a shadow) onto the remaining dimensions.
The projection operation computes the projection of an \gls{fo} formula.
Geometrically, the projection is the set of assignments found by orthogonally
projecting every satisfying assignment onto the remaining dimensions.
Taking the definitions from \cite{bagnara2005convexpolyhedron} a set of linear
constraints exactly defines a polyhedron. Every linear non-strict inequality
constraint $\psi \in \Lin^{\leq}$ describes an closed affine half-space and every
linear strict inequality constraint $\psi \in \Lin^{\leq}$ describes an open
affine half-space in the euclidean space $\R^n$.
Every linear non-strict inequality constraint describes an closed affine
half-space and every linear strict inequality constraint describes an open
affine half-space in the euclidean space $\R^n$. As discussed in
\Sref{sec:constraints} strictness of linear constraints and in turn openness of
half-spaces are not relevant to the integer domain.
\begin{definition}[Not necessarily closed
Polyhedra\cite{bagnara2005convexpolyhedron}\label{def:polyhedra}]
$\mathcal{P} \subseteq \R^n$ is a closed (convex) polyhedron if and only if
it can be expressed by an intersection of a finite number of not necessarily
closed affine half-spaces.
\begin{definition}[Convex Polyhedra\cite{bagnara2005convexpolyhedron}\label{def:polyhedra}]
$\mathcal{H} \subseteq \N^n$ is a \emph{convex polyhedron} if and only if it
can be expressed by an intersection of a finite number of affine
half-spaces.
As discussed in \Sref{sec:constraints} strictness of linear constraints and in
turn openness of half-spaces are not relevant to the integer domain. Hence when
talking about integer arithmetic one refers only to polyhedra.
Polyhedra can exactly only represent linear constraints. Non-linear polynomial
Polyhedra can represent linear constraints exactly. Non-linear polynomial
Polyhedra will be used for the partial evaluation algorithm described in chapter
\ref{ch:theory}, since in practice the partial evaluation is called only on
small sub-programs and precision is more desirable than performance.
Polyhedra will be used to compute the projection of \gls{fo}-formulas, possibly
over-approximating the constraints in the process.
\subsection{Intervals}\label{ssec:box}
For completeness sake, let's introduce a third abstract domain: the interval
domain. A closed interval $[a,b]$ with $a, b \in \R$ for a variable $v \in \V$
can be expressed as a pair of linear constraints of the form $a \leq v$ and $v
\leq b$.
\subsection{Boxes}\label{ssec:box}
For completeness' sake, let's introduce a third abstract domain of Boxes. A
closed interval $[a,b]$ with $a, b \in \R$ for a variable $v \in \V$ can be
expressed as a pair of linear constraints of the form $a \leq v$ and $v \leq b$.
$\mathcal{P} \subseteq \R^n$ is a box if it can be expressed by a set of
intervals.
$\mathcal{B} \subseteq \R^n$ is a Box if it can be expressed by a cartesian
product $I_1 \times \dots \times I_n$ of intervals $I_1,\dots,I_n$
polyhedron $P$ one can find a box $P'$ that contains the polyhedron $P$. Boxes
are computationally very efficient, although rather imprecise for most
applications.
polyhedron $\mathcal{H}$ one can find a box $\mathcal{B}$ that contains the
polyhedron $\mathcal{H}$. Boxes are computationally very efficient, although
rather imprecise for most applications.
\section{Probability Theory}
% Since we are talking about probabilistic programs, we will need tools to
% represent probabilities throughout the program. For example a variable in a
% probabilistic program can take a random value. That means that the value
% assigned to a variable is not deterministic but chosen at random from a given
% distribution.
\section{Probability Theory}\label{sec:probability}
Imagine a probabilistic event with some possible outcomes $\Omega$. For example
a coin-toss can have arbitrary many outcomes when taking the position on the
table into account, the fact that the coin might get lost in the process etc. A
probability measure describes the probabilities of sets of random outcomes. For
example the coin-toss on the table has a sub-set of outcomes where the coin's
head is above. The probability measure would assign a probability close to 50\%
for a somewhat normal coin-toss.
Imagine a probabilistic experiment with some possible outcomes $\Omega$. For
example, a coin-toss can have arbitrary many outcomes when taking the position on
the table into account, the fact that the coin might get lost in the process
etc. A probability measure describes the probabilities of sets of random
outcomes. For example, the coin-toss on the table has a sub-set of outcomes
where the coin's head is above. The probability measure would assign a
probability close to 50\% for a somewhat normal coin-toss.
the intuitive property of random event: if you can measure two subsets of
outcomes (e.g. $A = \set{ \omega \in \Omega} {\text{ coin shows head}}$, and $B
= \set{\omega \in \Omega} {\text{coin shows tail}}$) so you can for both subsets
together ($A \union B = \set{\omega \in \Omega} {\text{coin shows head or
the intuitive property of a random event: if you can measure two subsets of
outcomes (e.g. $A = \set{ \omega \in \Omega} {\text{ coin shows head}}$ and $B
= \set{\omega \in \Omega} {\text{coin shows tail}}$) so you can for the union of
both subsets ($A \union B = \set{\omega \in \Omega} {\text{coin shows head or
Let $X$ be a discrete random variable. The distribution is described by the
probability measure $\mu: \Z \rightarrow \R$ with
\begin{align}
\mu(A) = P(\set{\omega \in \Omega}{X(\omega) \in A})
Let $X$ be a discrete random variable. The probability of a single value is
well defined by the distribution $\mu:F \rightarrow [0,1]$ as follows:
\begin{align}
P(X=r) = \mu(\{r\}) = P(\set{\omega \in \Omega}{X(\omega) = r})
When considering a random variable one is usually interested the value to expect
in general. For example most possible outcomes of a binomial distribution are
very improbable. This is described by the expected value.
\begin{definition}[Expected Value\cite{billingsley2011, meyer20arxiv}]
The expected value of a discrete random variable $X$ on a probability space
$(\Omega, \F, P)$ is the weighted sum of all values X with respect to the
measure $P$:
\begin{equation*}
\E(X) = \sum_{r \in \bar{\N}} r \cdot P(X = r)
\end{equation*}
If $\E(X)$ does not exist, then $\E(X) = \infty$.
\end{definition}
When considering a random variable one is usually interested the value to expect
in general. For example most possible outcomes of a binomial distribution are
very improbable. This is described by the expected value.
\begin{definition}[Expected Value\cite{billingsley2011}]
The expected value of a random variable $X$ on a probability space $(\Omega,
\F, P)$ is the integral of X with respect to the measure $P$:
\begin{equation*}
\E(X) = \int X dP = \int_\Omega X(\omega) P(d\omega)
\end{equation*}
This gets simplified for discrete random variables where every the integral can
be replaced by a sum. (see Lemma 48 in \cite{meyer20arxiv})
\begin{equation*}
\E(X) = \sum_{r \in \bar{\R}_{\geq0}} r \cdot P(X = r)
\end{equation*}
\end{definition}
$\Z[\V\union \D]$, is the set of integer polynomials containing variables $\V$
or probability distributions $\D$. They behave exactly like normal polynomials
except that the values of the distributions are sampled probabilistically
instead of being given as argument. As such we can define a function $[\cdot]:
\Z[\V\union \D] \rightarrow \Sigma \rightarrow \Z \rightarrow [0,1]$ that returns
the probability that the polynomial $f \in \Z[\V \union D]$ with a state $s \in
\Sigma$ evaluates to a value $k\in\Z$ with a probability $[f](s)(k)$.
Let $\D$ be the set of (valid) probability distributions. $\Z[\V\union \D]$, is
the set of integer polynomials containing variables $\V$ or probability
distributions $\D$. They behave exactly like normal polynomials except that the
values of the distributions are sampled probabilistically instead of being given
as argument. As such we can define a function $[\cdot]: \Z[\V\union \D]
\rightarrow \Sigma \rightarrow \Z \rightarrow [0,1]$ that returns the
probability that the polynomial $f \in \Z[\V \union D]$ with a state $s \in
\Sigma$ evaluates to a value $k\in\Z$ with a probability $[\varf{}](s)(k)$.
we write $[x_i\backslash s(x_i)]$.
The resulting polynomial $f[x_i\backslash s(x_i)]$ describes a random variable
$[f](s)$ that takes a value at random depending only on the distributions $D_1,
\dots, D_m$ within. The random variable has the following probability
distribution:
we write $[x_i\backslash s(x_i)]$. The resulting polynomial
$\varf{}[x_i\backslash s(x_i)]$ describes a random variable $[\varf{}](s)$ that
takes a value at random depending only on the distributions $D_1, \dots, D_m$
within. The random variable has the following probability distribution:
The value of a random variable can approximated by a constraint for the bounds
of the support of the probability distribution. For any probability distribution
$D \in \D$ and a variable $d \in \V$, we define
\begin{equation*}
\lfloor D \rceil_d = \begin{cases}
\min (\support(D)) \leq d \leq \max (\support(D)) & \text{if}
\min(\support(D)) \text{and} \max (\support(D)) \text{ exist}, \\
\min (\support(D)) \leq d & \text{if} \min(\support(D))
\text{ exists}, \\
d \leq \max(\support(D)) & \text{if} \max(\support(D)) \text{ exists},\\
\texttt{true} & \text{otherwise}.
\end{cases}
\end{equation*}
Let $f \in \Z[\V\union\D]$ be a probabilistic polynomial with probability
distributions $D_1,\dots,D_n$ used within. Then the non-probabilistic
over-approximation of a probabilistic polynomial is defined as the tuple
$(\lfloor f\rceil,\tilde{f})$ with
\begin{align*}
\lfloor f \rceil = \LAnd_{i=1,\dots,m} \lfloor D_i \rceil \\
\tilde{f} = f[D_i/d_i] \text{ and }
Let $A\subset\V$ and $f \in \Z[A\union\D]$ be a probabilistic polynomial
with probability distributions $D_1,\dots,D_n$ used within. Then the
non-probabilistic over-approximation of a probabilistic polynomial is
defined as the tuple
$(\lfloor f\rceil,\tilde{\varf})$ with
\begin{align*}
\lfloor \varf \rceil = \LAnd_{i=1,\dots,m} \lfloor D_i \rceil_{d_i} \text{
and }
\\ \tilde{f} = \varf{}[D_i/d_i]
\begin{example}
Consider the same probabilistic polynomial as in Example
\ref{ex:prob_polynomial}: $f = x^2 + \textsc{Unif}(3,5)$. The distribution
support of the uniform distribution is bounded in both directions.
\begin{align*}
\min(\support(\textsc{Unif}(3,5))) &= 3 \\
\max(\support(\textsc{Unif}(3,5))) &= 5 \\
\end{align*}
Hence a variable $d$ assigned from the distribution satisfies the constraint
$\lfloor \varf\rceil = \lfloor \textsc{Unif}(3,5)\rceil_d = 3 \leq d \leq
5$. The distribution in the probabilistic polynomial is replaced by the
variable $d$. We get the new approximated polynomial $\tilde{\varf} = x^2 +
d$.
\end{example}
contains a condition, called \textit{guard}, that must be true for the current
state to be taken. Whenever a transition is taken it can modifies the state with
some \textit{update}. An update assigns a new value to a program variable
depending on the previous state. Every integer program starts at a unique
location usually called $l_0$. Whenever no transition guard is satisfied the
program terminates.
contains a condition $\tau$, called \textit{guard}, that must be true for the
current state to be taken. Whenever a transition is taken it can modifies the
state with some \textit{update} $\eta$. An update assigns a new value to each
program variable depending on the previous state. Every integer program starts
at a unique location usually called $\ell_0$. Whenever no transition guard is
satisfied the program terminates. The arguments to the program is given by some
starting state $s_0$, assigning a value for every program variable.
We use a graphical representation of programs, as shown in
\fref{fig:ex_pip_nd}. Updates are assign a value to every program variable,
however for better readability, the updates that preserve the value of a program
variable are omitted.
or probabilistic, which is called non-deterministic or probabilistic
sampling. Non-deterministic sampling occurs when on a transition a variable is
updated to an expression that contains unbound variables (variables that are not
assigned a value in the previous state). The unbound variables are sampled
arbitrarily from all the integers, but can be constraint by the guard, in which
case the transition is only taken if the non-deterministic sampling fulfills the
guard. If the sampled value fulfills no transition's guard it means the program
or probabilistic, which is called non-deterministic or probabilistic sampling.
Non-deterministic sampling occurs when on a transition a variable is updated to
an expression that contains unbound variables (variables that are not part of
the set of program variables). The unbound variables are sampled arbitrarily
from all the integers, but can be constraint by the guard, in which case the
transition is only taken if the non-deterministic sampling fulfills the guard.
If the sampled value fulfills no transition's guard it means the program
\Sref{distributions}. Updates are restricted to integer polynomials with
variables and additionally distributions when probabilistic sampling is allowed.
The value of the indeterminate in the polynomial is equal to the (possibly
temporary) variable or sampled from the distribution with the corresponding
probability.
\Sref{sec:probability}. Updates are expanded to integer polynomials over with
variables and distributions. The value of the indeterminate in the polynomial is
equal to the (possibly temporary) variable or sampled from the distribution with
the corresponding probability.
\item $\Loc_\Prog$ is a finite non-empty set of location,
\item $\GT_\Prog$ is a finite set of general transitions, where a
general transition $g \in \GT_\Prog$ is a finite non-empty set of
transitions $t = (l, p, \tau, \eta, \kappa, l') \in \T_\Prog \subseteq
\GT_\Prog$ with:
\item $\Loc_\Prog \subseteq \Loc$ is a finite non-empty set of location,
\item $\GT_\Prog \subseteq \GT$ is a finite set of general transitions,
where a general transition $g \in \GT_\Prog$ is a finite non-empty
set of transitions $t = (\ell, p, \tau, \eta, \ell') \in \T_\Prog
\subseteq \GT_\Prog$ with:
transitions share a common start location which we call $l_g$.
\item $l_0 \in L$ is the start location of the program.
transitions share a common start location which we call $\ell_g$.
\item $\ell_0 \in \Loc$ is the start location of the program.
Before defining the semantics of a \gls{pip} a new special location $l_\bot$ is
introduced that will be used to indicate the termination of a program as well as
a virtual transition $t_\bot$ and general transition $g_\bot = \{t_\bot\}$ with
the same purpose. Instead of just \enquote{terminating} a program takes the
transition $g_\bot$ to the location $l_\bot$ if and only if no other general
Before defining the semantics of a \gls{pip} a new special location $\ell_\bot$
is introduced that will be used to indicate the termination of a program as well
as a virtual transition $t_\bot$ and general transition $g_\bot = \{t_\bot\}$
with the same purpose. Instead of just \enquote{terminating} a program takes the
transition $g_\bot$ to the location $\ell_\bot$ if and only if no other general
$l_\bot$ the run loops indefinitely on the location $l_\bot$. In addition a new
transition $t_\text{in}$ is added for the start of the program.
$l_\bot$ the run loops indefinitely on the location $\ell_\bot$. In addition a
new transition $t_\text{in}$ is added for the start of the program.
defined as $\confs_\Prog = \Loc_\Prog \union \{l_\bot\} \times \T_\Prog \union \{t_{\text{in}},
t_\bot\} \times \Sigma$.
Note that additionally to the location and state, the last transition taken to
the current location is remembered as well. This won't be relevant for the partial
evaluation but is important to define the probabilistic properties of a
\gls{pip}.
defined as $\confs_\Prog = (\Loc_\Prog \union \{\ell_\bot\}) \times (\T_\Prog
\union \{t_{\text{in}}, t_\bot\}) \times \Sigma$. Note that additionally to the
location and state, the last transition taken to the current location is
remembered as well. This won't be relevant for the partial evaluation but is
important to define the probabilistic properties of a \gls{pip}.
set of all runs is defined as $\runs_\Prog = \confs_\Prog^\omega$. In contrast a finite
prefix is a finite sequence of configurations. The set of all finite prefixes is
defined as $\fpath_\Prog = \confs_\Prog^*$.
set of all runs is defined as $\runs_\Prog = \confs_\Prog^\omega$. In contrast a
finite prefix is a finite sequence of configurations. The set of all finite
prefixes is defined as $\fpath_\Prog = \confs_\Prog^*$.
\begin{lemma}[Proposition 7.1.1 in \cite{puterman1994markov}]
For a random variable $X$ describing the total reward of a Markovian
decision process, and if the expected reward $\ESs(X)$ is finite for all
schedulers $\scheduler \in \HDS$, and initial states $s_0$ the following
holds:
\begin{equation*}
\sup_{\scheduler \in \HDS}{\ESs(X)} = \sup_{\scheduler \in
\MDS}{\ESs(X)}
\end{equation*}
\end{lemma}
Let $\scheduler \in \MDS^\Prog$ be a scheduler and $s_0$ be an initial state.
First, the probability for a program to start at a configuration $c$ is 1 only
for the configuration that has the initial state $s_0$ at the start location
$l_0$ coming from the initial transition $t_\text{in}$. All other configuration
are invalid starting configurations and get assigned a zero probability by the
probability measure $\prSs : \confs_\Prog \rightarrow [0,1]$
Let $\scheduler \in \MDS_Prog$ be a scheduler and $s_0 \in \Sigma$ be an
initial state. First, the probability for a program to start at a configuration
$c$ is 1 only for the configuration that has the initial state $s_0$ at the
start location $\ell_0$ coming from the initial transition $t_\text{in}$. All
other configuration are invalid starting configurations and get assigned a zero
probability by the probability measure $\prSs : \confs_\Prog \rightarrow [0,1]$
the configuration $c$ with $c' = (l', t', s')$ and $t' = (l, p, \tau, \eta, \_,
l') \in g$ depends on three things: First the transition $t \in g$ is taken with
the probability $p$, second the probability that each program variable
$x\in\PV$ is updated to the value $s'(x)$ by the probabilistic update - recall
definition \ref{def:prob_update}, third that the temporary variables are
the configuration $c$ with $c' = (\ell', t', s')$ and $t' = (\ell, p, \tau,
\eta, \_, \ell') \in g$ depends on three things: First the transition $t \in g$
is taken with the probability $p$, second the probability that each program
variable $x\in\PV$ is updated to the value $s'(x)$ by the probabilistic update -
recall definition \ref{def:prob_update}, third that the temporary variables are
$\prSs(f) > 0$. A run $c_0c_1\dots \in \runs$ is \textit{admissible} if and
only if every finite prefix $c_0\dots c_n \in\fpath_\Prog$ is admissible.
$\prSs(f) > 0$. A run $c_0c_1\dots \in \runs_\Prog$ is \textit{admissible}
if and only if every finite prefix $c_0\dots c_n \in\fpath_\Prog$ is
admissible.
program is in the terminal location $l_\bot$ and the following configuration
must again be in the terminal location $l_\bot$. A terminating run must end with
program is in the terminal location $\ell_\bot$ and the following configuration
must again be in the terminal location $\ell_\bot$. A terminating run must end with
For a scheduler $\scheduler$ and a start location $s_0 \in \Sigma$ the
expected runtime complexity $\Rt_{\scheduler,s_0}(\Prog)$ of a program
$\Prog$ is defined as the expected value of the runtime complexity.
For a scheduler $\scheduler \in MDS_\Prog$ and a start location $s_0 \in
\Sigma$ the expected runtime complexity $\Rt_{\scheduler,s_0}(\Prog)$ of a
\gls{pip} $\Prog$ is defined as the expected value of the runtime
complexity.
for a given starting state $s_0$ and $scheduler \in \MDS$, the expected
runtime depends on the initial variable $x$. The probability for decrementing
$x$ once follows a geometric distribution. The runtime for decrementing $x$
until zero ($x$ times) is
for a given starting state $s_0 \in \Sigma$ and $scheduler \in \MDS$, the
expected runtime depends on the initial variable $x$. The probability for
decrementing $x$ once follows a geometric distribution. The runtime for
decrementing $x$ until zero ($x$ times) is
For $s_0\in\Sigma$ and a \gls{pip} $\Prog$, $\RB \in \B$ is a expected runtime
bound for $s_0$, if
For $s_0\in\Sigma$ and a \gls{pip} $\Prog$, $\RB \in \B$ is a expected
runtime bound if
case for a scheduler $\scheduler$ and a starting state $s_0$ there is only a
single run $c_0c_1\dots$ with probability $\PrSs(c_0c_1\dots) = 1$ and the
expected runtime defines the runtime of the program.
case for a scheduler $\scheduler$ and a initial state $s_0 \in \Sigma$ there is
only a single run $c_0c_1\dots \in \runs_\Prog$ with probability
$\PrSs(c_0c_1\dots) = 1$ and the expected runtime defines the runtime of the
program.
\subsection{Graph Operations}
The operations $\Tin : \Loc \rightarrow 2^\T$, $\Tout : \Loc \rightarrow 2^\T$,
$\GTin : \Loc \rightarrow 2^\GT$, and $\GTout: \Loc \rightarrow 2^\GT$ return
the set of (general) transitions starting or ending in the given location. A
\gls{pip} $\Prog$ describes a graph $G_\Prog$ with vertices $\Loc_\Prog$ and
edges $\T_\Prog$. Let $\G$ be the set of graphs over vertices $\Loc$ and
transitions $\T$. Adding a transition and possibly new vertices(locations)
therein to a graph is denoted by the plus operator $+: \G \times \T \rightarrow
\G$. Removing a transitions from a graph, while preserving the vertices therein,
is denoted by the minus operator $- : \G \times \T \rightarrow \G$.
\begin{definition}[\acrfull{scc}]
A set of transitions $\S \subseteq \T_\Prog$ is called a \emph{component} of
$\Prog$ with locations $V(\S)= \set{\ell, \ell'}{(\ell, \_, \_, \_,\ell') \in
\S}$. A component $\S \subseteq \T_\Prog$ is \emph{strongly connected} when
for every two locations $\ell, \ell' \in V(\S)$ there exists a path between
the two using only the transitions from $\S$.
\end{definition}
% -----
Adding a transition and possibly new locations therein to graph is denoted by
the plus operator $+: \G \times \T \rightarrow \G$
\subsection{Strongly connected components}
A given \gls{pip} induces two relations. The first one being the relation if a
transition exists from one location to another and the other one describes if a
path exists from one location to another.
% \begin{figure}
% \centering
% \begin{subcaptionblock}[t]{0.4\textwidth}
% \centering
% \input{figures/ch1_classic}
% \caption{Classic integer program $\Prog_1$\label{fig:classic}}
% \end{subcaptionblock}
% \begin{subcaptionblock}[t]{0.5\textwidth}
% \centering
% \input{figures/ch1_classic_pe}
% \caption{(UNFINISHED) Partial evaluation of
% $\Prog_1$\label{fig:classic_pe}}
% \end{subcaptionblock}
% \caption{Program with hard to find runtime-complexity bounds using only
% \gls{mprf}.\cite{giesl2022arxiv}}
% \end{figure}
Consider the program $\Prog_1$ displayed in \fref{fig:classic}. It contains two
program variables $x, y$ and one temporary variables $u$ which is used to
Consider the program $\Prog_1$ displayed in \fref{fig:classic_c}. It contains
two program variables $x, y$ and one temporary variables $u$ which is used to
clearly terminates. The first loop increments $x$ up to 3 if $x$ is
positive to begin with. The second loop decrements $y$ while $y$ is positive.
However, the classical analysis using only \gls{mprf} fails to find a finite
clearly terminates. The first loop increments $x$ up to 3 if $x$ is positive to
begin with. The second loop decrements $y$ while $y$ is positive. However, the
classical analysis using only \gls{mprf} fails to find a finite
of \gls{cfr} on sub-\gls{scc} level, the program (see \fref{fig:classic_pe}) can
be transformed into an equivalent program where the analysis with \gls{mprf}
succeeds at finding a finite runtime-complexity bound.
of \gls{cfr} on sub-\gls{scc} level, the program (see \fref{fig:classic_pe_c})
can be transformed into an equivalent program where the analysis with
\gls{mprf} succeeds at finding a finite runtime-complexity bound.
\fref{fig:prob}. The variables $x$ and $y$ are now only in/-decremented with
50\% probability in every iteration. The program will probably always terminate,
since a longer runtime gets increasingly unlikely. The current version of
\gls{koat} fails to find finite runtime and expected complexity bounds for this
program. It would be nice to transform $\Prog_2$ into an equivalent program like
the one shown in \fref{fig:prob_pe} similarly to the \gls{cfr} technique
presented by \citeauthor{giesl2022lncs}\cite{giesl2022lncs} and
\citeauthor{Domenech19}\cite{Domenech19}.
\fref{fig:prob_c}. The variables $x$ and $y$ are now only in/-decremented with
50\% probability in every iteration. The program will probably always
terminate, since a longer runtime gets increasingly unlikely. The current
version of \gls{koat} fails to find finite runtime and expected complexity
bounds for this program. It would be nice to transform $\Prog_2$ into an
equivalent program like the one shown in \fref{fig:prob_pe_c} similarly to the
\gls{cfr} technique presented by \citeauthor{giesl2022lncs}\cite{giesl2022lncs}
and \citeauthor{Domenech19}\cite{Domenech19}.
RWTH University and aim at the finding lower and respectively upper
runtime-complexity and size bounds on transition systems. At its core, KoAT
computes bounds for subprograms using \gls{mprf} or \gls{twn}, and then putting
the subprograms together generate upper runtime-complexity and size bounds for
the whole program. Optionally to improve the analysis, \gls{koat} can resort to
partial evaluation as a control-flow refinement techniques
on a sub-\gls{scc} level\cite{giesl2022lncs}.
RWTH University and aim at finding lower and respectively upper
runtime-complexity bounds on transition systems. At its core, KoAT computes
bounds for subprograms using \gls{mprf} or \gls{twn}, and then putting the
subprograms together in order to generate upper runtime-complexity and size
bounds for the whole program. Optionally to improve the analysis, \gls{koat} can
resort to partial evaluation as a control-flow refinement techniques on a
sub-\gls{scc} level\cite{giesl2022lncs}.
\gls{apron} is the overall tool, taking part in competitions. It works on
various input formats and transforms them accordingly to be used with
specialized tools. \gls{koat} and \gls{loat} under the hood of \gls{apron} for
the overall analysis.
independent consecutive loops. iRankFinder successfully implemented the
control-flow refinement technique by partial evaluation proposed by
independent consecutive loops. iRankFinder implemented the control-flow
refinement technique by partial evaluation proposed by
\Sref{sec:preliminaries}, focusing especially on constraints, probability, and
(probabilistic) integer programs. In \Sref{sec:theory} the \gls{cfr} techniques
from iRankFinder are adapted to \Gls{pip}, showing the proving the theoretical
correctness of the construction. \Sref{sec:implementation} will focus on the
changes made to \gls{koat} and the challenges met during implementation of the
technique in OCaml. Finally in \Cref{sec:conclusion} the new version of
\gls{koat} will be compared to its predecessor with regard to the found
runtime-complexity and size bounds as well as the overall computational
performance of the tool using the problems from the \gls{tpdb}.
\Cref{ch:preliminaries}, focusing especially on constraints, probability
theory, and (probabilistic) integer programs.
In \Cref{ch:theory} the \gls{cfr} techniques from iRankFinder are adapted to
\Glspl{pip}. We will prove the theoretical correctness of the construction.
Furthermore, the technique of sub-\gls{scc} level control-flow-refinement is
presented.
\Cref{ch:implementation} will focus on the changes made to \gls{koat}, and the
performance improvements made with the new implementation. Finally
in \Cref{ch:conclusion} we will recapitulate the results of this thesis and give
an outlook on future research.
\newacronym{fvs}{FVS}{Feedback Vertex Set}