\documentclass[conference,10pt]{IEEEtran}
\newfont{\fsc}{eusm10}
\newfont{\fscs}{eusm10 scaled 700}
\DeclareMathAlphabet{\mathbbm}{U}{bbm}{m}{n} % blackboard bold
\usepackage{amsmath}
\usepackage{hyperref}
\usepackage{theorem}\theorembodyfont{\rmfamily}
\usepackage{url}
\usepackage{amsmath} % \boldsymbol
\usepackage{dsfont}
\usepackage{epsfig}
\usepackage{amssymb}
\usepackage{color}
\usepackage{tikz}
\renewcommand{\|}{\mathbin{\scalebox{1}[0.885]{\raisebox{0.28pt}{$|$}}\hspace{-1.7mm}\upharpoonright}}
\newcommand{\R}{\mbox{\fsc R}}
\newcommand{\Rs}{{\mbox{\fscs R}}}
\newcommand{\pow}{\mbox{\fsc P}}
\newcommand{\cterms}{{\rm T}(\Sigma)}
\newcommand{\termset}{\mathbb{T}(\Sigma)}
\newcommand{\rhs}{{\it rhs}}
\newcommand{\lhs}{{\it lhs}}
\newcommand{\diam}[1]{\langle#1\rangle}
\newcommand{\trans}[1]{\stackrel{#1}{\longrightarrow}}
\newcommand{\ntrans}[1]{\hspace{4pt}\not\hspace{-4pt}\stackrel{#1\ }{\longrightarrow}}
\newcommand{\var}{{\it var}}
\newcommand{\ar}{{\it ar}}
\renewcommand{\phi}{\varphi}
\newtheorem{definition}{Definition}
\newtheorem{theorem}{Theorem}
\newtheorem{corollary}{Corollary}
\newtheorem{lemma}{Lemma}
\newtheorem{proposition}{Proposition}
\newtheorem{example}{Example}
\newtheorem{remark}{Remark}
\newtheorem{observation}{Observation}
\newenvironment{proof}[1][Proof]{\begin{trivlist}
\item[\hskip \labelsep {\bfseries #1}]}{\end{trivlist}}
\newcommand{\qed}{\nobreak \ifvmode \relax \else
\ifdim\lastskip<1.5em \hskip-\lastskip
\hskip1.5em plus0em minus0.5em \fi \nobreak
\vrule height0.75em width0.5em depth0.25em\fi}
\begin{document}
\pagestyle{plain}
\title{Precongruence Formats with Lookahead\\ through Modal Decomposition}
\author{
\IEEEauthorblockA{Wan Fokkink \hspace{2.7in} Rob van Glabbeek}
\IEEEauthorblockN{Vrije Universiteit Amsterdam, The Netherlands \hspace{1in}
% {w.j.fokkink@vu.nl}
Data61, CSIRO, Sydney, Australia\hspace{.5in}\mbox{}\\
\hspace{3.6in} University of New South Wales, Sydney, Australia
%{rvg@cs.stanford.edu}
}}
\date{}
\maketitle
\begin{abstract}
\textsc{Bloom, Fokkink \& van Glabbeek} (2004) presented a method to decompose formulas from Hennessy-Milner logic with regard to a structural operational semantics specification. A term in the corresponding process algebra satisfies a Hennessy-Milner formula if and only if its subterms satisfy certain formulas, obtained by decomposing the original formula. They used this decomposition method to derive congruence formats in the realm of structural operational semantics. In this paper it is shown how this framework can be extended to specifications that include bounded lookahead in their premises. This extension is used in the derivation of a congruence format for the partial trace preorder.
\end{abstract}
\section{Introduction}
Structural operational semantics \cite{Plo04} provides specification languages with an interpretation.
A transition system specification (TSS), consisting of an algebraic signature and a set of transition rules
of the form $\frac{\rm premises}{\rm conclusion}$, generates a labelled transition system consisting of
transitions between the closed terms over the signature. Transition rules may contain lookahead, meaning that
the right-hand side of a premise may occur in the left-hand side of a premise.
Lookahead appears for example in the structural operational semantics of a process algebra for process creation \cite{BaVa92}, an axiomatisation of the process algebra ACP$_\tau$ \cite{vGl11}, timed LOTOS \cite{LL98}, the stochastic timed process algebra EMPA \cite{Ber97}, a probabilistic bisimulation tester \cite{DL12}, and the synchronous programming language Esterel \cite{Mou09}. It also plays an important role in parsing algorithms for e.g.\ Java \cite{DosReis12}. The usefulness of lookahead in formal semantics in the context of agent systems is advocated in \cite{HR07}.
Hennessy-Milner logic (HML) \cite{HM85} is a dynamic logic to specify properties of a labelled transition system.
Larsen \cite{Lar86} showed how to decompose formulas from HML with respect to a TSS in the De Simone format
\cite{Sim85}. In \cite{FvGdW06} this decomposition method was extended to the tyft/tyxt format \cite{GV92}, which allows lookahead.
As a step towards this end they used a transformation from \cite{FvG96}
to derive so-called ``$P$-ruloids'' from a TSS $P$: transition rules in which the left-hand sides of premises are single variables.
A rule has bounded lookahead if all chains of forward dependencies between its premises are finite.
In \cite{FvGdW06} each ruloid with unbounded lookahead is replaced by an equivalent ruloid
with bounded lookahead, by endowing each infinite forward chain with
an ordinal count-down. This step is needed because HML, even with infinite conjunctions, cannot capture unbounded lookahead.
An equivalence is a congruence for a given process algebra if it is preserved by all functions in the signature.
This is an important property, notably to fit a semantics into an axiomatic framework. Different syntactic formats have been
developed for TSSs, to guarantee this property for specific semantics,
i.e.\ for specific behavioural equivalences or preorders.
Most of these congruence formats, notably the De Simone format, GSOS \cite{BIM95} and the ready simulation format \cite{vGl93}, disallow lookahead.
In \cite{BFvG04} the decomposition method for HML is exploited to derive congruence formats, in the context of structural operational semantics, for a wide range of semantics.
It takes the ready simulation format as starting point, so the obtained
congruence formats are limited to transition rules that have no
lookahead. With regard to their congruence format for partial trace semantics,
the open question was posed whether the method can be extended to allow some form of lookahead.
Doing this is the aim of the current paper.
The key idea in the decomposition method from \cite{BFvG04} is that a congruence format $\mathfrak{F}$ for a semantics must ensure that the formulas
in a modal characterisation of this semantics are always decomposed into formulas that are again in this modal characterisation.
To obtain such a property one needs that if all rules of a TSS $P$ are in $\mathfrak{F}$-format,
then so are are all $P$-ruloids that are needed in the decomposition methods from \cite{FvGdW06}.
However, the ruloids produced by the transformation from unbounded to bounded lookahead in \cite{FvGdW06} violate most congruence
formats, including the partial trace format from \cite{BFvG04}. (This is no surprise, as a partial trace format cannot allow unbounded
lookahead; see Ex.\ \ref{exa:unbounded}.)
Lookahead is intrinsically difficult because it establishes an indirect, transitive relation between variables in (proofs using) transition rules.
We introduce the notion of a ``general'' $P$-ruloid, which intuitively means that different
occurrences of the same variable in the rule are linked to each other through its proof; i.e., after renaming
some but not all occurrences of a variable $x$ to another variable $y$, the resulting rule is no
longer provable (by means of that proof). We show that the decomposition method from \cite{FvGdW06} can be restricted to
general ruloids. Next we show that general
ruloids preserve bounded lookahead. This opens the door to deriving congruence formats that allow bounded lookahead.
As a concrete example we extend the partial trace format from \cite{BFvG04} with bounded lookahead. We prove that the resulting partial trace format is preserved by general ruloids, which implies that it is a congruence format for the partial trace preorder. This answers the open question from \cite{BFvG04}.
Concluding, this paper develops machinery to cope with bounded lookahead in the context of
structural operational semantics and modal logic, and applies it to obtain a congruence
format for the partial trace preorder. We conjecture that the same machinery also makes it
possible to develop congruence formats that allow bounded lookahead for the decorated trace and
ready simulation equivalences, which would provide a positive answer to another open question in \cite{BFvG04}.
\section{Preliminaries}
\label{sec:preliminaries}
This section presents the basic notions of partial trace semantics, HML and structural operational semantics, as well as the decomposition method from \cite{FvGdW06}.
\subsection{Hennessy-Milner logic}
A \emph{labelled transition system (LTS)} is a pair $(\mathds{P},\rightarrow)$ with $\mathds{P}$ a set of \emph{processes} and $\rightarrow\;\subseteq\mathds{P}\times A\times \mathds{P}$ a \emph{transition relation}, for a set $A$ of \emph{actions}. We call each $(p,a,q)\in\;\rightarrow$ a \emph{transition}, and write it as $p \trans{a} q$.
A sequence $a_1\cdots a_n\in A^\ast$ is a {\em (partial) trace} of a $p\in\mathds{P}$ if $p\trans{a_1}\cdots\trans{a_n}p'$ for some $p'\in\mathds{P}$. We write $p\sqsubseteq_T q$ if the set of partial traces of $p$ is included in that of $q$.
Properties of processes can be formulated in modal logic.
Hennessy-Milner logic (HML) \cite{HM85}
characterises the bisimulation equivalence relation on processes.
The set $\mathds{O}$ of \emph{HML formulas} is defined by the BNF grammar
$\phi ::= \bigwedge_{i\in I}\phi_i\mid\diam{a}\phi\mid\neg\phi$ where $a$ ranges over $A$ and $I$ is any index set.
The satisfaction relation $\models\;\subseteq \mathds{P}\times \mathds{O}$ is defined as usual. In particular,
$p\models\diam{a}\phi$ iff $p\trans{a}p'$ and $p'\models\phi$ for some $p'\in\mathds{P}$.
We use $\phi_1\wedge\phi_2$ and $\top$ as abbreviations of $\bigwedge_{i\in\{1,2\}}\phi_i$ and $\bigwedge_{i\in\emptyset}\phi_i$, respectively.
We write $\phi\mathbin\equiv\phi'$ if $p\mathbin{\models}\phi\Leftrightarrow p\mathbin{\models}\phi'$ for each process $p$ in each LTS.
The set $\mathds{O}_T$ of \emph{partial trace observations} is defined by the BNF grammar $\phi ::= \top\mid\diam{a}\phi$
where $a$ ranges over $A$. Given an LTS $(\mathds{P},\rightarrow)$, let ${\cal O}_T(p)$ denote $\{\phi\in\mathds{O}_T\mid p\models \phi\}$. The class $\mathds{O}_T^\equiv$ denotes the closure of $\mathds{O}_T$ under $\equiv$.
\begin{proposition}[\cite{vGl01}]\label{prop:O_characterizes}
$p \sqsubseteq_T q \Leftrightarrow {\cal O}_T(p)\subseteq {\cal O}_T(q)$.
%% \Leftrightarrow {\cal O}_T^\equiv(p)\subseteq {\cal O}_T^\equiv(q)$.
\end{proposition}
\subsection{Transition system specifications}
$V$ is an infinite set of variables with $|V|\geq|A|$.
Let $\var(S)$ denote the set of variables that occur in a syntactic object $S$.
We say that $S$ is \emph{closed} if $\var(S)=\emptyset$.
A \emph{signature} is a set $\Sigma$ of function symbols $f\not\in V$, with $|\Sigma|\leq|V|$, equipped with an arity function $\ar:\Sigma\rightarrow\mathbbm{N}$. The set $\termset$ of \emph{terms} over a signature $\Sigma$ is defined recursively by:
$V\subseteq\termset$, and if $f\in\Sigma$ and $t_1,\ldots,t_{\ar(f)}\in\termset$ then $f(t_1,\ldots,t_{\ar(f)})\in\termset$.
Let $\cterms$ denote the set of closed terms over $\Sigma$.
A $\Sigma$-\emph{substitution} $\sigma$ is a function from $V$ to $\termset$.
Let $\sigma(S)$ denote the syntactic object obtained from $S$ by replacing
each occurrence of all $x\mathbin\in V$ in $S$ by $\sigma(x)$.
A $\Sigma$-substitution $\sigma$ is {\em closed} if $\sigma(x)\in\cterms$ for all $x\in V$.
A \emph{$\Sigma$-literal} (or \emph{transition}) is an expression $t \trans{a} t'$ with $t,t'\in \termset$ and $a \in A$.
A \emph{transition rule}
is
of the form $\frac{H}{\alpha}$ with $H$ a set of $\Sigma$-literals (the \emph{premises} of the rule) and $\alpha$ a $\Sigma$-literal (the \emph{conclusion}).
The left- and right-hand side of $\alpha$ are called the \emph{source} and
\emph{target} of the rule.
A \emph{transition system specification (TSS)} is a pair $(\Sigma,R)$ with $R$ a collection of transition rules over $\Sigma$.
The purpose of a TSS $(\Sigma,R)$ is to specify an LTS
$(\cterms,\rightarrow)$ with as processes the closed terms over
$\Sigma$ and as transition relation a set of closed literals
$\rightarrow\;\subseteq\cterms\times A\times\cterms$.
Let $P\mathbin=(\Sigma,R)$ be a TSS. An {\em irredundant proof} from $P$ of a transition rule $\frac{H}{\alpha}$ is a well-founded, upwardly branching tree whose nodes are labelled by $\Sigma$-literals, and some of the leaves are marked ``hypothesis'', such that:
the root is labelled by $\alpha$,
$H$ is the set of labels of the hypotheses, and
if $\beta$ is the label of a node $q$ which is not a hypothesis and $K$ is the set of labels of the nodes directly above $q$, then $\frac{K}{\beta}$ is a substitution instance of a transition rule in $R$.
Note that if a leaf in a proof from $P$ is not marked as hypothesis, then it is a substitution instance of a rule without premises in $R$.
A {\em proof} from $P$ of $\frac{K}{\alpha}$ is an irredundant proof from $P$ of $\frac{H}{\alpha}$ with $H\subseteq K$.
The proof of $\frac{H}{\alpha}$ is called irredundant because $H$ must equal (instead of include) the set of labels of the hypotheses. Rules that are derived with an irredundant proof may inherit certain syntactic structure from the transition rules in the TSS from which they are derived; in standard proofs this syntactic structure is usually lost, because arbitrary literals can be added as premises of derived rules. Irredundancy of proofs is essential in applications of our decomposition method to derive congruence formats for TSSs \cite{BFvG04}.
\subsection{Syntactic restrictions on transition rules}
We present some definitions for transition rules; the majority stems from \cite{GV92}.
A rule is \emph{univariate} if its source does not contain multiple occurrences of the same variable.
A \emph{tytt rule} is a transition rule in which the right-hand sides of premises are distinct variables that
do not occur in the source. % reworded in csl17
A univariate tytt rule is \emph{tyxt} if its source is a variable, and \emph{tyft} if its source contains exactly one function symbol.
A tytt rule is \emph{xytt} if the left-hand sides of its premises are variables. An \emph{xyft rule} is a tyft rule that is also an xytt rule.
The \emph{dependency graph} of a rule with premises $\{t_i\trans{a_i}t_i'\mid\linebreak[3] i\mathbin\in I\}$
is a directed graph with as edges $\{\langle x,y\rangle\mid x\mathbin\in\var(t_i)\mbox{ and }y\mathbin\in\var(t_i')\mbox{ for some }i\mathbin\in I\}$.
A rule is \emph{well-founded} if each backward chain of edges in its dependency graph is finite. A
rule has \emph{lookahead} if there is a variable in the right-hand side of a premise that also occurs in
the left-hand side of a premise; the lookahead is \emph{bounded} if each forward chain of edges in
the dependency graph is finite.
A variable in a rule is \emph{free} if it occurs in neither the source nor the right-hand sides of premises of this rule.
A rule is \emph{pure} if it is well-founded and does not contain free variables.
Each combination of syntactic restrictions on transition rules induces a corresponding syntactic format of the same name for TSSs. For example, a TSS is in \emph{pure tyft/tyxt format} iff it contains only pure tyft and tyxt rules.
\subsection{Partial trace format}
A preorder is a \emph{precongruence} if, for all functions $f$ in the signature, $p_i\sqsubseteq q_i$ for all $i=1,\ldots,\ar(f)$
implies $f(p_1,\ldots,p_{\ar(f)})\sqsubseteq f(q_1,\ldots,q_{\ar(f)})$.
Likewise, an equivalence is a \emph{congruence} if it is preserved by all functions in the signature.
Here we extend the precongruence format for the partial trace preorder from \cite{BFvG04} with bounded lookahead,
answering an open question from \cite{BFvG04}.
Let $\Lambda$ be a predicate on $\{(f,i)\mid
f \in \Sigma,\,1 \leq i \leq \ar(f)\}$; intuitively it marks those arguments
of function symbols that may contain processes that have started running. For example, the first
argument of the sequential composition operator from process algebra is typically marked by $\Lambda$, but
the second argument of this operator generally is not (cf.\ the process algebra APC in Sect.~\ref{sec:apc}).
If $\Lambda(f,i)$, then the argument $i$ of $f$ is called {\em $\Lambda$-liquid};
else it is {\em $\Lambda$-frozen}.
An occurrence of a variable $x$ in a term $t \in \termset$ is
{\em at a $\Lambda$-liquid position} if either $t=x$, or $t=f(t_1,\ldots,t_{\ar(f)})$
and the occurrence of $x$ is $\Lambda$-liquid in $t_i$ for some liquid argument $i$ of $f$.
A variable in a tytt rule over $\Sigma$ is {\em $\Lambda$-floating}
if either it occurs as the right-hand side of a premise,
or it occurs exactly once in the source, at a $\Lambda$-liquid position.
\begin{definition}\rm
%[partial trace format]
\label{def:format}
Let $\Lambda$ be a predicate on the arguments of function symbols.
A tytt rule is {\em $\Lambda$-partial trace safe} if:
\begin{itemize}
\item it has bounded lookahead, and
\item each $\Lambda$-floating variable has at most one occurrence
in total in the left-hand sides of the premises and in the target;
this occurrence must be at a $\Lambda$-liquid position.
\end{itemize}
A TSS is in {\em partial trace format} if it is in tyft/tyxt format and its rules
are $\Lambda$-partial trace safe with respect to some $\Lambda$.
\end{definition}
This format extends the partial trace format from \cite{BFvG04} in allowing bounded lookahead.
By abuse of terminology we reuse the format name from \cite{BFvG04} for our more liberal format.
\begin{remark}
If a TSS is in partial trace format, then there is a smallest predicate $\Lambda$
for which all its rules are $\Lambda$-partial trace safe; it is \emph{generated} by the second condition above.
\end{remark}
\noindent
This paper develops the machinery to prove that the partial trace preorder induced by
a TSS in partial trace format is a precongruence (see Cor.\ \ref{cor:main}).
The next example shows that the partial trace format cannot allow unbounded lookahead.
\begin{example}\label{exa:unbounded}
Let $p\trans{a}r_i$, $q\trans{a}r_i$, $r_{i+1}\trans{a}r_i$ for all $i\in{\mathbb Z}_{\geq 0}$, and $q\trans{a} q$.
Clearly $q\sqsubseteq_T p$, as the partial traces of both processes are $a^i$ for all $i\in{\mathbb Z}_{\geq 0}$.\vspace{-2pt}
Let the unary function symbol $f$ be defined by the xyft rule
$\frac{\{x_i\trans{a}x_{i+1}\mid i\in{\mathbb Z}_{\geq 0}\}}{f(x_0)\trans{b}{\bf 0}}$.\vspace{-4pt}
where ${\bf 0}$ is a constant.
Then $f(q)\not\sqsubseteq_T f(p)$, because $f(q)\trans{b}{\bf 0}$ while $f(p)$ cannot perform a $b$-transition.
\end{example}
\noindent
The next example shows that the partial trace format cannot allow multiple occurrences of a $\Lambda$-floating variable
in left-hand sides of premises.
\begin{example}
Let $p\trans{a}p'$, $p\trans{a}p''$, $p'\trans{b}{\bf 0}$, and $p''\trans{c}{\bf 0}$.\vspace{-2pt}
Moreover, let $q\trans{a}q'$, $q'\trans{b}{\bf 0}$, and $q'\trans{c}{\bf 0}$.
Clearly $q\sqsubseteq_T p$, as the completed traces of both processes are $ab$ and $ac$.\vspace{-2pt}
Let the unary function symbol $f$ be defined by the xyft rule
$\frac{x\trans{a}y~~y\trans{b}z~~y\trans{c}z'}{f(x)\trans{d}{\bf 0}}$.\vspace{-3pt}
Then $f(q)\not\sqsubseteq_T f(p)$, because $f(q)\trans{d}{\bf 0}$ while $f(p)$ cannot perform a $d$-transition.
\end{example}
\noindent
Since $p\sqsubseteq_T q$ allows that $p\ntrans{a}$ (i.e., $p$ cannot perform any $a$-transitions)
while $q\trans{a}q'$, clearly the partial trace format cannot contain so-called negative premises
$t\ntrans{a}$; cf.\ \cite[Ex.\ 13---aliased 11.2]{BFvG04}. (For partial trace
\emph{equivalence}, $\Lambda$-frozen variables can be allowed to occur in negative premises; see
\cite[Thm.\ 9---aliased 11.3]{BFvG04}.) Moreover, negative premises do not combine well with
lookahead; cf.\ \cite[Ex.\ 7---aliased 3.15]{FvGdW06}. For these reasons the current paper
focuses on so-called positive TSSs that do not contain negative premises.
\subsection{Application: Algebra for process creation}
\label{sec:apc}
{\sc Baeten \& Vaandrager} \cite{BaVa92} defined a process algebra APC for process creation. Its structural
operational semantics contains one transition rule with lookahead. From our congruence format it follows immediately
that the partial trace preorder is a precongruence for APC. For simplicity only part of its syntax and semantics is presented here;
some auxiliary operators needed for the axiomatisation and the so-called encapsulation operator are omitted.
APC contains the following constants: actions $a$ from a set $A$, successful termination $\varepsilon$, and deadlock $\delta$.
The rules are:\vspace*{-3mm}
\[
\frac{~}{a\trans{a}\varepsilon} \hspace{2cm} \frac{~}{\varepsilon\trans{\surd}\delta}
\]
\noindent
Let $e$ range over $A\cup\{\surd\}$. The two rules for the binary alternative composition operator $+$ are:\vspace*{-2mm}
\[
\frac{x_1\trans{e}y_1}{x_1+x_2\trans{e}y_1} \hspace{2cm} \frac{x_2\trans{e}y_2}{x_1+x_2\trans{e}y_2}
\]
\noindent
The asymmetric binary parallel composition
%& operator
$\|$ assumes a partially defined communication function $|\mathop{:}A\times A\rightarrow A$; it
passes through a termination signal $\surd$ from the right side only.\vspace*{-5mm}
\[
\begin{array}{c}
{\displaystyle \frac{x_1\trans{a}y_1}{x_1\|x_2\trans{a}y_1\|x_2} \hspace{2cm} \frac{x_2\trans{e}y_2}{x_1\|x_2\trans{e}x_1\|y_2}} \vspace{3mm} \\
{\displaystyle \frac{x_1\trans{a}y_1~~~~x_2\trans{b}y_2~~~~~~a|b=c}{x_1\|x_2\trans{c}y_1\|y_2}}
\end{array}
\]
The unary operator {\bf new} creates a process that is put in parallel with an existing process.\vspace*{-7mm}
\[
\frac{~}{{\bf new}(x)\trans{\surd}x\cdot\delta} \hspace{1.5cm} \frac{x\trans{a}y}{{\bf new}(x)\trans{a}{\bf new}(y)}
\]
\noindent
The second and third rule rule for the binary sequential composition operator $\cdot$ below
are unusual: they spawn a parallel component. The last rule contains lookahead.\vspace*{-5mm}
\[
\begin{array}{c}
{\displaystyle \frac{x_1\trans{a}y_1}{x_1\cdot x_2\trans{a}y_1\cdot x_2} \hspace{1.5cm} \frac{x_1\trans{\surd}y_1~~~~x_2\trans{e}y_2}{x_1\cdot x_2\trans{e}y_1\|y_2}} \vspace{3mm} \\
{\displaystyle \frac{x_1\trans{\surd}y_1~~~~y_1\trans{a}z_1~~~~x_2\trans{b}y_2~~~~~~a|b=c}{x_1\cdot x_2\trans{c}z_1\|y_2}}
\end{array}
\]
\noindent
These are all tyft rules, because in each rule the source contains a single function symbol and the variables in the source and in the right-hand sides of the premises are all distinct.
Furthermore, these rules clearly all have bounded lookahead.
We take the arguments of $\|$ and {\bf new} and the first argument of $\cdot$ to be $\Lambda$-liquid,
and the arguments of $+$ and the second argument of $\cdot$ to be $\Lambda$-frozen.
Each $\Lambda$-floating variable has at most one occurrence in total in the left-hand sides
of the premises and in the target; this occurrence is in all cases at a $\Lambda$-liquid position.
Hence the TSS is in the partial trace format. The transition rules for the omitted operators are also
in this format. So the partial trace preorder is a precongruence with regard to APC.
\subsection{Decomposition of HML formulas}\label{sec:decomposition_hml_formulas}
In \cite{FvGdW06} it was shown how to decompose HML formulas with respect to process terms, given a TSS in tyft/tyxt format. The decomposition method uses a collection of pure xytt rules extracted from this TSS, called {\em ruloids}.
We require that there is a proof of a transition \mbox{$p \trans{a}q$}, with $p$ a closed substitution instance of a term $t$, iff there exists a proof that uses at the root a ruloid with source $t$.
\begin{definition}\rm\label{def:ruloids}
A collection $\R$ of pure xytt rules is called \emph{a suitable set of ruloids} for a TSS $P=(\Sigma,R)$ if
for each $t\in\termset$, $p\in\cterms$ and closed substitution $\sigma$, the transition
\raisebox{0pt}[0pt][0pt]{$\sigma(t)\trans a p$} is provable from $P$ iff there are a ruloid $\frac{H}{t\trans a u}\in\R$ and a closed substitution $\sigma'$ where $\sigma'(\alpha)$ is provable from $P$ for all $\alpha \in H$, $\sigma'(t)=\sigma(t)$ and $\sigma'(u)=p$.
\end{definition}
\noindent
Let $\R$ be a collection of ruloids with bounded lookahead that is suitable for a TSS $P$.
The following definition from \cite{FvGdW06} assigns to each term $t\in\termset$ and each
observation $\phi\mathbin\in\mathds{O}$ a collection $t^{-1}_\Rs(\phi)$ of decomposition mappings
$\psi:V\mathbin{\rightarrow}\mathds{O}$. A closed substitution instance
$\sigma(t)$ satisfies $\phi$ iff for some $\psi \in t^{-1}_\Rs(\phi)$, $\sigma(x)$ satisfies the formula $\psi(x)$ for all $x\in\var(t)$.
\begin{definition}\rm
%[decomposition of HML]
\label{def:inverse}
Let
$\R$ be a collection of ruloids with bounded lookahead, suitable for a TSS $P=(\Sigma,R)$.
Then $\cdot_\Rs^{-1}:\termset\rightarrow(\mathds{O}\rightarrow\pow(V\rightarrow\mathds{O}))$ is defined by:
\begin{itemize}
\item $\psi\in t_\Rs^{-1}(\diam{a}\phi)$ iff there is a ruloid $\frac{H}{t\trans{a}u} \in\R$
and a $\chi\in u^{-1}_\Rs(\phi)$ such that $\psi:V\rightarrow\mathds{O}$ is given by
\[\psi(x)=\left\{
\begin{array}{@{}l@{~~~~}l}
\displaystyle{\bigwedge_{(x\trans{b}y)\in H}\!\!\!\!\!\diam{b}\psi(y)} ~\land~ \chi(x) & \mbox{if $x\in\var(u)$}\\[4ex]
\displaystyle{\bigwedge_{(x\trans{b}y)\in H}\!\!\!\!\!\diam{b}\psi(y)} & \mbox{if $x\not\in\var(u)$.}
\end{array}
\right.\]
\item $\psi\in t_\Rs^{-1}({\displaystyle\bigwedge_{i\in I}}\phi_i)$ iff $\psi(x)= \displaystyle{\bigwedge_{i\in I}\psi_i(x)}$ where $\psi_i\in t_\Rs^{-1}(\phi_i)$ for all $i\in I$.\vspace{1.5mm}
\item $\psi\in t_\Rs^{-1}(\neg\phi)$ iff there is a function $h:t_\Rs^{-1}(\phi)\rightarrow\var(t)$ such that $\psi:V\rightarrow\mathds{O}$ is given by
$\psi(x)=\hspace*{-2mm}\displaystyle{\bigwedge_{\chi\in h^{-1}(x)}\!\!\!\neg\chi(x)}$.
\end{itemize}
\end{definition}
\noindent
This recursive definition is well-founded because the ruloid employed in the
case $t^{-1}_\Rs(\diam{a}\phi)$ has bounded lookahead.
We note that, in contrast to the setting of \cite{BFvG04} without lookahead, $x\notin\var(t)$ here does not imply $\psi(x)\equiv\top$.
\begin{example}
Consider the TSS of xyft rules
$\frac{x\trans{a}y}{f(x)\trans{b}g(y)}$ and $\frac{x\trans{c}y~~~~y\trans{d}z}{g(x)\trans{e}{\bf 0}}$.
Let the suitable collection of ruloids $\R$ contain $\frac{y\trans{a}x}{f(y)\trans{b}g(x)}$ and
$\frac{x\trans{c}y~~y\trans{d}z}{g(x)\trans{e}{\bf 0}}$.
We calculate a $\psi\in f(y)_\Rs^{-1}(\diam{b}\diam{e}\top)$. For a start, the ruloid $\frac{y\trans{a}x}{f(y)\trans{b}g(x)}$
yields $\psi(y)=\diam{a}\psi(x)$ and $\psi(x)=\chi(x)$ for some $\chi\in g(x)_\Rs^{-1}(\diam{e}\top)$. For the calculation of
$\chi$ we use the
ruloid $\frac{x\trans{c}y~~y\trans{d}z}{g(x)\trans{e}{\bf 0}}$. This yields $\chi(x)=\diam{c}\chi(y)$
and $\chi(y)=\diam{d}\chi(z)$ and $\chi(z)=\top$. Concluding, $\psi(y)=\diam{a}\diam{c}\diam{d}\top$.
The syntactic overloading of $y$, i.e.\ its occurrence in both the first and second ruloid,
underlines the importance of the separate case in the definition of $\psi\in t_\Rs^{-1}(\diam{a}\phi)$
in Def.\ \ref{def:inverse}, for $x\notin\var(u)$. Else we would get $\psi(y)=\diam{a}\psi(x)\land\chi(y)$, yielding
a spurious conjunct $\diam{d}\top$ for $\psi(y)$.
\end{example}
\noindent
We reformulate the decomposition result from \cite{FvGdW06}.
\begin{theorem}\label{thm:decomposition}
\hspace{-1pt}
Let
$\R$ be a collection of ruloids with bounded lookahead, suitable for a TSS $P=(\Sigma,R)$.
Then for each $t\in\termset$, $\sigma:V\rightarrow\cterms$ and $\phi\in\mathds{O}$:
\[
\sigma(t)\models\phi~~\Leftrightarrow~~
\exists\psi\in t^{-1}_\Rs(\phi)\,\forall x\in\var(t):\sigma(x)\models\psi(x)
\]
\end{theorem}
In \cite{FvGdW06} $P$ was required to be in tyft/tyxt format, and a specific collection $\R$ was constructed.
However, the proof only uses that $\R$ has the property of Def.~\ref{def:ruloids}. Moreover, the
requirement that $P$ be in tyft/tyxt format was needed merely to ensure that such an $\R$ can be found.
\subsection{Construction of ruloids}
We briefly sketch the extraction of ruloids from a TSS $P$ in tyft/tyxt format, as employed in \cite{FvGdW06}.
First, employing a conversion from \cite{GV92},
if the source of a rule is of the form $x$ then this variable is replaced by a term
$f(x_1,\ldots,x_{{\it ar}(f)})$ for each $f\in\Sigma$. This yields an intermediate TSS
$P^\dagger$ in tyft format, all of which rules are provable from $P$. Next, using a construction
from \cite{FvG96}, the left-hand sides of premises are reduced to variables. Roughly the idea is, given
a premise $f(t_1,\ldots,t_{{\it ar}(f)})\trans{a}y$ in a rule $r$, and a rule
$\frac{H}{f(x_1,\ldots,x_{{\it ar}(f)})\trans{a}t}$, to transform $r$ by replacing the
aforementioned premise by $H$, $y$ by $t$, and the $x_i$ by the $t_i$; this is repeated
(transfinitely) until all premises with a non-variable left-hand side have disappeared.
Each infinite sequence of such substitutions converges to an infinite sequence of variable
replacements; these variables are unified.
The result is a TSS $P^\ddagger$ in xyft format, all of whose rules are provable from
$P^\dagger$ \cite{FvG96}. Next, the premises for which there is no backward chain in the dependency
graph to a variable in the source are eliminated, by substituting closed terms for the variables in
such premises. The resulting TSS in pure xyft format is denoted by $P^+$;
its rules are provable from $P^\ddagger$ \cite{FvG96}.
By \cite[Prop.~3---aliased~3.4]{FvGdW06} the pure xytt rules irredundantly
provable from $P^+$ constitute a suitable collection of ruloids for $P$.
\begin{example}\label{exa-closure}
Consider the process algebra APC from Sect.\ \ref{sec:apc}, with
$A=\{a,b,c,d,e\}$ and a communication function that includes $a|b=c$ and $c|d=e$.
The following ruloid can be derived using the third rule for parallel composition and the third rule for sequential composition.
\[
\frac{x_1\trans{\surd}y_1~~~~y_1\trans{a}z_1~~~~x_2\trans{b}y_2~~~~x_3\trans{d}y_3}{(x_1\cdot x_2)\|x_3\trans{e}(z_1\|y_2)\|y_3}
\]
\end{example}
Using \cite[Lem.~2.10]{FvG96}, it follows that these ruloids are provable from $P$.
Hence another suitable collection of ruloids is given by \emph{all} pure xytt rules provable from
$P$, or all pure xytt rules \emph{irredundantly} provable from $P$.
In Sect.~\ref{sec:general} we will define \emph{$P$-general ruloids} such that each
pure xytt rule irredundantly provable from $P$ is a substitution instance of a
$P$-general ruloid with the same source.
This implies that the collection of $P$-general ruloids is suitable.
In \cite{FvGdW06} an additional step in the construction of ruloids was made to ensure that they all
have bounded lookahead. Each ruloid with unbounded lookahead was replaced by an equivalent ruloid
with bounded lookahead, by endowing each infinite forward chain with
an ordinal count-down.
However, the ruloids produced by this step violate most congruence formats. (A notable exception is the full
tyft/tyxt format, as a congruence format for bisimulation semantics; see \cite[Cor.\ 1---aliased 4.1]{FvGdW06}.)
In particular, starting with a TSS in partial trace format, this step produces ruloids in which
$\Lambda$-floating variables may have multiple occurrences in left-hand sides of premises. This
is no surprise, as in Ex.\ \ref{exa:unbounded} it was shown that the partial trace
format must exclude unbounded lookahead.
%
In this paper we avoid this additional step by considering only TSSs in tyft/tyxt format with bounded lookahead.
We will prove in Sect.\ \ref{sec:bounded} that for such TSSs $P$, each $P$-general ruloid has bounded lookahead.
\section{Structured proofs and general rules}
\label{sec:general}
The following example shows that the second condition of the partial trace format is not always
preserved by irredundant proofs of pure xytt rules.
\begin{example}\label{exa:violate}
Consider the TSS with bounded lookahead consisting of the xyft rules\vspace*{-1mm}
\[
\frac{\{y_{i+1}\trans{a}y_i\mid i\in{\mathbb Z}_{\geq 0}\}}{f(x)\trans{b}g(x,y_0)} \hspace{.5cm} \frac{~}{x\trans{a}x} \hspace{.5cm} \frac{x\trans{c}y}{f(x)\trans{d}f(y)}\vspace*{-.5mm}
\]
In view of the third rule, the argument of $f$ is $\Lambda$-liquid. So by the first rule, the arguments of $g$ are $\Lambda$-liquid as well.
Clearly the TSS is in partial trace format.
Substituting $z$ for $x$ and for all $y_i$ in the first rule as well as for $x$ in the second rule, we can derive the rule \raisebox{.5mm}{$\frac{}{f(z)\trans{b}g(z,z)}$}. The two occurrences of the $\Lambda$-floating variable $z$ in the target violate the partial trace format.
\end{example}
\noindent
This counter-example is spurious: the derived rule is a substitution instance of the rule \raisebox{.5mm}{$\frac{}{f(z)\trans{b}g(z,z')}$} with $z\neq z'$, which does adhere to the partial trace format.
The latter rule can be derived in a similar fashion, by substituting $z'$ (instead of $z$) for all $y_i$ in the first rule as well as for $x$ in the second rule.
The irredundant proof of \raisebox{.5mm}{$\frac{}{f(z)\trans{b}g(z,z)}$} is not ``general'': there is no need to replace the two arguments of $g$ in the target by the same variable.
On the other hand, the same variable must be substituted for all the $y_i$, so that the premises of the first rule can be derived by the second rule in the TSS.
We will show that a suitable collection of ruloids
is formed by the so-called ``general'' pure xytt rules,
which are derived by an irredundant proof in which terms $\sigma(x)$ and $\sigma(y)$ with $x\neq y$ only have variables in common
if this is imposed by the proof. \vspace*{-.25mm}For example, let the TSSs $P_1$ and $P_2$ both contain
the rule $\frac{x\trans a y}{f(x)\trans{b}y}$,
while $P_1$ \vspace*{.75mm}contains \raisebox{.5mm}{$\frac{}{g(x)\trans{a}x}$} and $P_2$ contains \raisebox{.5mm}{$\frac{}{g(x)\trans{a}y}$}.
The rule \raisebox{.5mm}{$\frac{}{f(g(z))\trans{b}z}$} is \vspace*{.5mm}irredundantly provable from both $P_1$
and $P_2$; however, this rule is $P_1$-general but not $P_2$-general.
In contrast, the rule \raisebox{.5mm}{$\frac{}{f(g(z))\trans{b}z'}$} is $P_2$-general, \vspace*{-.75mm}but not provable from $P_1$.
\newcommand{\plat}[1]{\raisebox{0pt}[0pt][0pt]{$#1$}} % no vertical space
\newcommand{\co}[1]{{\it concl}(#1)} % conclusion of a proof EXPAND?
\newcommand{\tp}{{\it top}} % premises in a proof
\newcommand{\s}{s_\pi}
In Sect.\ \ref{sec:bounded} and \ref{sec:guarantee} it will be shown that general rules do preserve the partial trace format.
To formally define the notion of a general rule, we first provide an
alternative characterisation of (irredundant) provability, roughly following \cite{FvG96}.\vspace*{-2mm}
\begin{definition}\rm
%[proof structure]
\label{def:proof structure}\rm
Let $\pi=(B,\alpha,\phi)$ where:
\begin{itemize}
\item
$B$ is a set of transition rules which do not have any variables in common,
\item
$\alpha$ is a literal of the form $\s \trans{a} w$ with $\s \in \termset$ and $w\mathbin\in V$ such
that $\var(\alpha)\cap\var(B)\mathbin=\emptyset$ and $w\mathbin{\notin}\var(\s)$,
and
\item
$\phi$ is an injective mapping from $B$ to $\{\alpha \}\cup \{\beta\mid\beta$ a premise of a rule in $B\}$, such that
\begin{itemize}
\item $\co{b}$ and $\phi(b)$ carry the same action for all $b\mathbin\in B$,
and
\item all chains $b_0,b_1,b_2,\ldots$ in $B$ with each $\phi(b_{i+1})$ a premise of $b_i$ are finite.
\end{itemize}
\end{itemize}
In the sequel, the {\em premises} of $\pi$ are $\alpha$ and the
premises of rules in $B$, and $\tp(\pi)$ denotes the collection of
premises of $\pi$ that are outside the image of $\phi$.
A rule $b_0\in B$, or a premise of $b_0$, is {\em above} a premise
$\beta$ if there exists a chain $b_1,\ldots,b_n$ in $B$ with $\phi(b_i)$
a premise of $b_{i+1}$ for all $0 \leq i < n$ and $\phi(b_n)=\beta$.
$\pi$ is a {\em proof structure} if each rule in $B$ is above $\alpha$. It
is a proof structure {\em over} a TSS $P=(\Sigma,R)$ if each
rule in $B$ is in $R$ modulo alpha-conversion (i.e., renaming of variables).
A substitution $\sigma$ {\em matches} $\pi$ if
$\sigma(\s)=\s$ and, for each $b\mathbin\in B$,
$\sigma(\co b)=\sigma(\phi(b))$.
\end{definition}
\begin{proposition}\label{proof-structure}
A rule \plat{\frac{H}{\gamma}} is provable from a TSS $P$ iff there exists a
proof structure $\pi=(B,\alpha,\phi)$ over $P$ and a substitution
$\sigma$ that matches $\pi$, such that $\sigma(\tp(\pi))\subseteq H$ and
$\sigma(\alpha)=\gamma$. It is irredundantly provable if $\sigma(\tp(\pi))=H$.
\end{proposition}
\begin{proof}
Given a proof structure $\pi=(B,\alpha,\phi)$ and a matching
substitution $\sigma$, an irredundant proof of
\plat{\sigma(\frac{\tp(\pi)}{\alpha})} is obtained as the (multi)set of
premises of $\pi$, each premise $\beta$ labelled by $\sigma(\beta)$,
ordered into a tree by the ``above'' relation; $\tp(\pi)$ will be the set of ``hypotheses''.
Conversely, each irredundant proof $\pi$ of a rule $\frac{H}{t\trans a u}$ can be converted into a
proof structure $(B,\alpha,\phi)$ by replacing each non-hypothesis
node in $\pi$ by an incarnation of the transition rule applied in
that node, where, using alpha-conversion, all incarnations are
given disjoint sets of variables. Take $\alpha := (t \trans a w)$
for a fresh variable $w$.
When the rules for each two nodes have disjoint variable sets, the substitutions used in all
nodes can be united into one substitution matching the entire proof structure.
One point of concern in the above construction is whether there are
enough variables to allocate a disjoint set of variables to the
rules for each node in $\pi$. As $V$ is infinite, this constraint
is satisfied if the number of nodes in $\pi$ is not larger than
$|V|$, which is the case if the branching degree of $\pi$, i.e.\ the
number of premises in each rule, is no larger than $|V|$. In
\cite{FvG96} this was achieved by means of a requirement on TSSs,
namely of being ``small''. Here we just make sure that the set of
{\em all} literals is not larger than $|V|$. This is a simple
consequence of our requirements that $|\Sigma| \leq |V|$ and $|A|
\leq |V|$ (cf.\ Lem.\ 6---aliased 6.4---in \cite{BFvG04}).\hspace{-9pt}
\qed
\end{proof}
A different proof of a small variation of this characterisation can be found in \cite{FvG96}.\vspace{1ex}
\begin{example}\label{exa:running}
Consider the TSS in Ex.\ \ref{exa:violate}. As running example in this section
we introduce a proof structure that has the following shape, where downward arrows depict $\phi$.
\iftrue
\begin{tikzpicture}
\node [above] at (-1.5,.5) {$\cdots$};
\node [below] at (-1.5,-0.2) {$\cdots$};
\draw [<-] (0,0) -- (0,.5);
\node [above] at (0,.5) {$x_1\trans{a}x_1$};
\node [below] at (0,0) {$y_2\trans{a}y_1$};
\draw [<-] (2,0) -- (2,.5);
\node [above] at (2,.5) {$x_0\trans{a}x_0$};
\node [below] at (2,0) {$y_1\trans{a}y_0$};
\draw (-2,-.7) -- (3,-.7);
\node [below] at (0.4,-.7) {$f(x)\trans{b}g(x,y_0)$};
\draw [->] (0.2,-1.5) -- (0.2,-2);
\node [below] at (0.0,-2.05) {$f(z)\trans{b}w$};
\end{tikzpicture}
\fi
\noindent
One matching substitution $\sigma_1$ maps $w$ to $g(z,z)$ and all other variables to $z$.
Another matching substitution $\sigma_2$ maps $w$ to $g(z,z')$, $x$ to $z$ and all other variables to $z'$.
\end{example}
\noindent
A variable $x$ in a proof structure $\pi$ is \emph{prime} if there
exists a matching substitution $\sigma$ for $\pi$ with $\sigma(x)$ a variable.
The relation $\sim_\pi$ relates those prime variables that are mapped to the
same term by each matching substitution for $\pi$.
\begin{definition}\rm
%[prime variable]
\label{equivalence}\rm
Let $\pi=(B,\alpha,\phi)$ be a proof structure.
Let $\sim_\pi$ be the least equivalence relation on $\termset$ satisfying:
\begin{itemize}
\item if $b=\frac{H}{t\trans{a}u}\in B$ and $\phi(b)=(t'\trans{a}u')$
then $t \sim_\pi t'$ and $u' \sim_\pi u$, and
\item if $f(t_1,\ldots,t_k) \sim_\pi f(u_1,\ldots,u_k)$ then $t_i \sim_\pi u_i$
for all $i=1,\ldots,k$.
\end{itemize}
A variable $x\in \var(\pi)$ is {\em composite} if $x \sim_\pi t$ with
$t\not\in V$, and {\em prime} otherwise.
\end{definition}
\begin{observation}\label{matches}
A substitution $\sigma$ matches $\pi$ iff $\sigma(\s)=\s$ and
$\sigma(t)=\sigma(u)$ for all terms $t,u\in\termset$ with $t\sim_\pi u$.
\end{observation}
\begin{example}
For the proof structure in Ex.\ \ref{exa:running}, $x_i\sim_\pi y_i$ and
$x_i\sim_\pi y_{i+1}$ for all $i\mathbin\in{\mathbb Z}_{\geq 0}$.
Moreover, $x \mathbin\sim_\pi z$ and $w \mathbin\sim_\pi g(x,y_0)$.
So the two equivalence classes of prime variables modulo $\sim_\pi$ are
$\{x_i,y_i\mid i\mathbin\in{\mathbb Z}_{\geq 0}\}$ and $\{x,z\}$.
\end{example}
\noindent
A substitution is \emph{minimal} for a proof structure if it is matching and provides as little
syntactic structure to (substitution instances of) variables as possible,
and induces as few identifications of variables as possible.
\begin{definition}\rm
%[general rule]
\label{general}\rm
A substitution $\rho$ for a proof structure $\pi$ is \emph{minimal} if:
\begin{itemize}
\item $\rho(x)=x$ for each $x\mathbin\in\var(\s)$ and $\rho(x)\mathbin\in V$ for each prime variable $x\mathbin\in \var(\pi)$,
\item $\rho(x)=\rho(y)$ iff $x\sim_\pi y$, for each pair of prime
variables $x,y \in \var(\pi)$, and
\item $\rho(t)=\rho(u)$ for each two terms $t,u\in\termset$ with $t\sim_\pi u$.
\end{itemize}
A rule $r$ is \emph{$P$-general} if there exists a proof structure
$\pi=(B,\alpha,\phi)$ over $P$ and a substitution $\rho$ that is minimal for $\pi$
such that $r=\rho(\frac{\tp(\pi)}{\alpha})$.
The pair $(\pi,\rho)$ is called a \emph{structured proof} of $r$ from $P$.
\end{definition}
\begin{example}
The first matching substitution $\sigma_1$ for the proof structure in Ex.\ \ref{exa:running}
is not minimal, because it maps the variables in the two equivalence classes modulo $\sim_\pi$
to the same variable $z$.
The second matching substitution $\sigma_2$ for this proof structure \vspace*{-.75mm}is minimal, meaning that
the rule \raisebox{.5mm}{$\frac{}{f(z)\trans{b}g(z,z')}$} is general with regard to the TSS in Ex.\ \ref{exa:violate}.
\end{example}
\noindent
The following proposition is a pivotal result for this paper.
\begin{proposition}\label{prop:instance}
A rule is irredundantly provable from a TSS $P$ iff it is a
substitution instance of a $P$-general rule with the same source.
\end{proposition}
\begin{proof}
\fbox{$\Leftarrow$} Let the rule $r$ be a substitution instance of a rule
$\rho(\frac{\tp(\pi)}{\s\trans a w})$ with the same source,
where $\pi=(B,\s\trans a w,\phi)$
is a proof structure over $P$ and $\rho$ a minimal substitution for $\pi$.
Then $r\mathbin=\sigma(\rho(\frac{\tp(\pi)}{\s\trans a w}))$ for some substitution $\sigma$.
By assumption, $\sigma(\rho(\s))=\rho(\s)=\s$.
By Obs.\ \ref{matches} and the third requirement on minimal substitutions, $\rho$ matches $\pi$.
Therefore, also $\sigma\circ\rho$ matches $\pi$, so by
Prop.\ \ref{proof-structure} $r$ is irredundantly provable from $P$.
\fbox{$\Rightarrow$} Let the rule $r$ be irredundantly provable from $P$.\vspace{-2pt}
By Prop.\ \ref{proof-structure} $r\mathbin=\sigma(\frac{\tp(\pi)}{\s\trans a w})$
\vspace*{-1mm}for some proof structure $\pi=(B,\s\trans a w,\phi)$ and a matching substitution $\sigma$.
We will now construct a substitution $\rho$ that is minimal for $\pi$, and a substitution $\nu$ with
$\sigma=\nu\circ\rho$. This immediately yields the required result.
For each $\sim_\pi$-equivalence class $C$ of prime variables we pick a
$y_C\in C$ and take $\rho(x):=y_C$ for all $x\in C$---if possible we choose $y_C\in\var(\s)$.
This way the first two requirements of a minimal substitution are met.
In particular, if $x,y\mathbin\in\var(\s)$ with $x\sim_\pi y$ then $\sigma(x)\mathbin=x$
and $\sigma(y)\mathbin=y$, which implies that
$x$ and $y$ are prime, and by Obs.\ \ref{matches}
$x\mathbin=\sigma(x)\mathbin=\sigma(y)\mathbin=y$; thus $\rho(x)\mathbin=x$.
Moreover, take $\nu(y_C):=\sigma(y_C)$, so that $\sigma(x)=\sigma(y_C)=\nu(y_C)=\nu(\rho(x))$
for all $x\in C$, using Obs.\ \ref{matches}.
The substitution $\nu$ satisfies $\nu(z)=z$ for all other variables $z$.
With structural induction on $\sigma(x)$ we proceed to
define $\rho(x)$ for composite variables $x\in\var(\pi)$, such that
$\sigma(x)=\nu(\rho(x))$. Simultaneously,
with structural induction on $\sigma(t) (=\sigma(u))$, we establish
$\rho(t)=\rho(u)$ for each pair of terms $t,u$ with $t\sim_\pi u$.
Let $t,u\mathbin{\notin} V$ be terms with $t\sim_\pi u$.
Let $t\mathbin=f(t_1,\dots,t_k)$ and $u\mathbin=g(u_1,\dots,u_m)$.
By Obs.\ \ref{matches} $\sigma(t)=\sigma(u)$, so $f=g$ and $k=m$.
By Def.\ \ref{equivalence} $t_i \sim_\pi u_i$,
so by induction $\rho(t_i)=\rho(u_i)$, for all $i=1,\ldots,k$, and hence $\rho(t)=\rho(u)$.
Now let $x\in\var(\pi)$ be composite; say $x \sim_\pi t$ for some term $t\notin V$.
By Obs.\ \ref{matches} $\sigma(x)=\sigma(t)$, so for each
$y\in\var(t)$ the term $\sigma(y)$ is a proper subterm of $\sigma(x)$.
By induction, $\rho(y)$ has already been defined before we get to
defining $\rho(x)$, and $\sigma(y)=\nu(\rho(y))$. Hence $\rho(t)$
is well-defined, and $\sigma(t)=\nu(\rho(t))$, so we can take $\rho(x):=\rho(t)$,
thereby obtaining $\sigma(x)=\nu(\rho(x))$.
By the argument above, this definition is independent of the choice of $t$.
Finally, if $x\sim_\pi y$ and one of these variables is
composite, then both are composite and $x \sim_\pi t \sim_\pi y$ for some term $t\notin V$.
Now $\rho(x)=\rho(y)$ follows by transitivity.
\qed
\end{proof}
\begin{example}
With regard to the TSS in Ex.\ \ref{exa:violate}, the irredundantly provable rule
\raisebox{.5mm}{$\frac{}{f(z)\trans{b}g(z,z)}$} is a substitution instance of the general rule \raisebox{.5mm}{$\frac{}{f(z)\trans{b}g(z,z')}$}.
\end{example}
\noindent
Now we define a \emph{$P$-general ruloid} as a $P$-general pure xytt rule.
It follows from Prop.\ \ref{prop:instance} that each irredundantly provable pure xytt rule
is a substitution instance of a $P$-general ruloid with the same source,
so that the $P$-general ruloids form a suitable collection of ruloids for $P$.
We now consider TSSs in univariate tytt format; these syntactic restrictions are part of all congruence formats in the literature.
The following definition makes relations between different occurrences of a variable $z$ in a structured proof explicit.
The underlying idea of its first case is that syntactic structure is inherited in an upward fashion at the left-hand side of
each branch of a proof, and in a downward fashion at the right-hand side.
The second case expresses that occurrences of a variable $x$ in a rule in the proof inherit syntactic
structure from the (unique) occurrence of $x$ in the source or in a right-hand side of a premise of this rule.
The third case expresses that if a variable $x$ is free in a rule in the proof, then all occurrences of $x$
are syntactically linked to each other.
\begin{definition}\rm
%[variable occurrence graph]
\label{graph}\rm
Let $(\pi,\rho)$ with $\pi=(B,\alpha,\phi)$ be a structured proof from a TSS in univariate tytt format.
An \emph{occurrence} of a variable $z$ in this proof is represented by a triple $\theta = (b,\iota,\eta)$
with either $b\in B$ or $b=\alpha$, $\iota$ an occurrence (i.e.\ position) of a variable $x$ in $b$,
and $\eta$ an occurrence of $z$ in $\rho(x)$.
Sometimes we address such an occurrence as $\langle b,\iota_t,\eta\rangle$ where $\iota_t$ is
an occurrence of a \emph{term} $t$ in $b$, and $\eta$ an occurrence of $z$ in $\rho(t)$.
The relations $\rightarrow_z$ and $\leftrightsquigarrow_z$ between the occurrences
of $z$ in $(\pi\hspace{-.5pt},\rho\hspace{-.5pt})$ are given by:
\begin{itemize}
\item if $b=\frac{H}{t\trans{a}u}\in B$ and $\phi(b)=(t'\trans{a}u')$,
writing $b'$ for the rule (or $\alpha$) containing the premise $\phi(b)$
and $\iota_t$, $\iota_{t'}$, $\iota_u$ and $\iota_{u'}$ for the indicated
occurrences of $t$ in $b$, $t'$ in $b'$, $u$ in $b$ and $u'$ in $b'$, respectively,
then $\langle b',\iota_{t'},\eta\rangle \rightarrow_z \langle b,\iota_t,\eta\rangle$
for any occurrence $\eta$ of $z$ in $\rho(t')=\rho(t)$, and
$\langle b,\iota_u,\eta^\dagger\rangle \rightarrow_z \langle b',\iota_{u'},\eta^\dagger\rangle$
for any occurrence $\eta^\dagger$ of $z$ in $\rho(u)=\rho(u')$;
\item if $b\in B$ and $\eta$ is an occurrence of $z$ in $\rho(x)$ for some $x\in\var(b)$,
then $(b,\iota,\eta) \rightarrow_z (b,\iota',\eta)$
where $\iota$ is an occurrence of $x$ either in the source of
$b$ or in the right-hand side of a premise of $b$,
and $\iota'$ is an occurrence of $x$ in the left-hand side of a
premise or in the target of $b$;
\item if $\eta$ is an occurrence of $z$ in $\rho(x)$ with $x\mathbin\in\var(b)$, and
either $b\mathbin=\alpha$, or $b\mathbin\in B$ and $x$ occurs neither in the source of $b$
nor in the right-hand sides of its premises, then $(b,\iota,\eta) \leftrightsquigarrow_z (b,\iota',\eta)$
for $\iota$ and $\iota'$ any two different occurrences of $x$.
\end{itemize}
Let $\sim_z$ denote the smallest equivalence relation containing
${\rightarrow_z} \cup {\leftrightsquigarrow_z}$.
\end{definition}
\begin{example}\label{exa:relations}
Consider the structured proof from Ex.\ \ref{exa:running}, after applying the matching
substitution $\sigma_2$ to it.
\vspace{2mm}
\iftrue
\begin{tikzpicture}
\draw [thick, ->] (-0.5,1) arc [radius=1, start angle=120, end angle=60];
\draw [thick, ->] (1.5,1) arc [radius=1, start angle=120, end angle=60];
\node [above] at (-1.5,.5) {$\cdots$};
\node [below] at (-1.5,-0.2) {$\cdots$};
\draw [thick, ->] (-0.5,0) -- (-0.5,.5);
\draw [thick, <-] (0.5,0) -- (0.5,.5);
\draw [thick, ->] (0.8,-0.35) -- (1.25,-0.35);
\draw [thick, ->] (-1.2,-0.35) -- (-0.75,-0.35);
\node [above] at (0,.5) {$z'\trans{a}z'$};
\node [below] at (0,0) {$z'\trans{a}z'$};
\draw [thick, ->] (1.5,0) -- (1.5,.5);
\draw [thick, <-] (2.5,0) -- (2.5,.5);
\draw [thick, ->] (2.5,-.55) -- (1.4,-.9);
\node [above] at (2,.5) {$z'\trans{a}z'$};
\node [below] at (2,0) {$z'\trans{a}z'$};
\draw (-2,-.7) -- (3,-.7);
\node [below] at (0.4,-.7) {$f(z)\trans{b}g(z,z')$};
\draw [thick, ->] (-0.4,-1.4) arc [radius=1.4, start angle=240, end angle=300];
\draw [thick, <-] (-0.4,-1.55) -- (-0.4,-2.05);
\draw [thick, ->] (1,-1.55) -- (1,-2.05);
\draw [thick, ->] (1.4,-1.55) -- (1.4,-2.05);
\node [below] at (0.4,-1.95) {$f(z)\trans{b}g(z,z')$};
\end{tikzpicture}
\fi
\noindent
The relations $\rightarrow_z$ and $\rightarrow_{z'}$ are depicted by arrows. (The arrows depicting
$\phi$ have been omitted here.)
\end{example}
\noindent
We partition the variable occurrences in a rule $r$ into three
types: we speak of an \emph{incoming} occurrence if it occurs in the source of
$r$, or in the right-hand side of a premise; an \emph{upwards outgoing}
occurrence if it occurs in the left-hand side of a premise; and a
\emph{downwards outgoing} occurrence if it occurs in the target of $r$.
This applies to rules $\rho(b)$ associated to a structured proof $(\pi,\rho)$
with $\pi=(B,\alpha,\phi)$ and $b\in B$; it also applies to $\rho(\alpha)$ by
considering this literal to be a premise.
This terminology is motivated by the following observation on the above
constructed graph of occurrences of a variable $z$ in a structured proof $(\pi,\rho)$.
\begin{observation}\rm\label{classification}
If $(b,\iota,\eta) \rightarrow_z (b',\iota',\eta')$ then either
\begin{itemize}
\item $b'\mathbin=b$, $\iota$ is an incoming occurrence and $\iota'$ an (upwards or downwards) outgoing one, or
\item $\phi(b')$ is a premise of $b$, $\iota$ is an upwards outgoing occurrence in $b$, and
$\iota'$ is an incoming occurrence in $b'$, or
\item $\phi(b)$ is a premise of $b'$, $\iota$ is a downwards outgoing occurrence in $b$, and
$\iota'$ is an incoming occurrence in $b'$.
\end{itemize}
\end{observation}
\begin{example}
Consider the arrows in the picture in Ex.\ \ref{exa:relations} that depict the relations
$\rightarrow_z$ and $\rightarrow_{z'}$. The upward arrows are from an upwards outgoing
to an incoming occurrence of $z$ or $z'$, the downward arrows from a downwards outgoing
to an incoming occurrence of $z$ or $z'$, the straight horizontal arrows from an incoming to an
upwards outgoing occurrence of $z'$, and the diagonal and curved horizontal arrows from an incoming
to a downwards outgoing occurrence of $z$ or $z'$.
\end{example}
\begin{observation}\label{unique outgoing}
For each outgoing occurrence $\theta$ of $z$ in $(\pi,\rho)$ there is at most
one incoming occurrence $\theta'$ of $z$ with $\theta \rightarrow_z \theta'$.
There is none iff $\theta$ occurs in $\tp(\pi)$.
The occurrence $\theta'$ is $\Lambda$-liquid (for a given predicate
$\Lambda$ on arguments of function symbols) iff $\theta$ is $\Lambda$-liquid.
\end{observation}
This last statement is trivial, because $\theta$ and $\theta'$ are the same
occurrences in a term $\rho(t)$ occurring in $b$ as well as in $b'$.
The remainder of this section is dedicated to a key proposition that will be needed in the proofs in Sect.\ \ref{sec:bounded} and \ref{sec:guarantee}.
It states that for a structured proof $\pi$, if a variable $z$ occurs in the right-hand side
of a premise in $\tp(\pi)$ or exactly once in $s_\pi$, then it is related to all other
occurrences of $z$ in $\pi$ through $\rightarrow_z$. We start with some lemmas needed for the
proof of Prop.~\ref{chain}.
\begin{lemma}\label{prime occurrence}
Each variable $z$ occurring in a structured proof $(\pi,\rho)$ has the form
$\rho(x)$ for a prime variable $x\in\var(\pi)$.
\end{lemma}
\begin{proof}
With structural induction on $\rho(y)$ for any $y\in\var(\pi)$ we show that each
$z \in \var(\rho(y))$ has the form $z=\rho(x)$ for a prime variable $x\in\var(\pi)$.
\begin{itemize}
\item In case $y$ is prime, $\rho(y)\in V$ by the first clause of Def.\ \ref{general},
so $z=\rho(y)$ and we are done.
\item Suppose that $y$ is composite. So $y \sim_\pi t$ for some $t\notin V$,
and by the third clause of Def.\ \ref{general}, $\rho(y)=\rho(t)$.
Hence $z$ occurs in $\rho(t)$, and therefore in $\rho(y')$ for a variable $y'$
occurring in $t$. Since $\rho(y')$ is a proper subterm of $\rho(y)$, by
induction $z=\rho(x)$ for a prime variable $x\in\var(\pi)$.
\qed
\end{itemize}
\end{proof}
\begin{lemma}\label{inherited}
Let $\theta=\langle b,\iota,\eta\rangle$ and $\theta'\langle b',\iota',\eta\rangle$ be two
occurrences of a variable $z$ in a structured proof $(\pi,\rho)$ from a TSS $P$ in univariate tytt
format, with $\iota$ an occurrence of a subterm $t$ in $b$, and $\iota'$ an occurrence of a
subterm $u$ in $b'$, where $t\sim_\pi u$. Then $\theta \sim_z \theta'$.
\end{lemma}
\begin{proof}
We apply induction on the derivation of $t \sim_\pi u$.
\begin{itemize}
\item The case that $t \sim_\pi u$ is obtained by the first clause of
Def.\ \ref{equivalence} follows immediate from the definitions, in
particular using the first clause of Def.\ \ref{graph}, but only
when $\iota$ and $\iota'$ are the indicated occurrences $\iota_t$ and $\iota_{t'}$
(or $\iota_u$ and $\iota_{u'}$) in Def.\ \ref{graph}.
We also need to show that if a subterm $t$ occurs multiple times in $\pi$,
the corresponding occurrences of $z$ in the occurrences of $t$ are related by $\sim_z$.
Since $t$ must contain variables, and the sets of variables in different rules $b\in B$
(and $\alpha$) in a proof structure $\pi=(B,\alpha,\phi)$ are pairwise
disjoint, all occurrences of $t$ lay in the same rule $b$.
The second and third clause of Def.\ \ref{graph} together with the fact that $P$
is in univariate tytt format guarantee that all induced occurrences of $z$ are $\sim_z$-related.
\item The case that $t \sim_\pi u$ is obtained by the second clause of
Def.\ \ref{equivalence} is trivial.
\item The case that $t \sim_\pi u$ is obtained by reflexivity, symmetry or
transitivity is trivial too.
\qed
\end{itemize}
\end{proof}
\begin{lemma}\label{related}
For each two occurrences $\theta$ and $\theta'$ of a variable $z$ in a structured proof $(\pi,\rho)$
from a TSS in univariate tytt format, we have $\theta \sim_z \theta'$.
\end{lemma}
\begin{proof}
Let $\pi=(B,\alpha,\phi)$.
By Lem.\ \ref{prime occurrence}, for each variable $z$ occurring in $(\pi,\rho)$
we can choose an occurrence of the form $(b,\iota,\eta)$ with $b\in B\cup\{\alpha \}$,
$\iota$ an occurrence of a prime variable $x$ in $b$, and $\eta$ the occurrence
of $z$ in $\rho(x)=z$.
Let $(b',\iota',\eta')$ be another
occurrence of $z$ in $(\pi,\rho)$, with $\iota'$ an occurrence of a variable
$y'$ in $b'$ and $\eta'$ an occurrence of $z$ in $\rho(y')$.
With structural induction on $\rho(y')$ we show that
$(b,\iota,\eta) \sim_z (b',\iota',\eta')$.
\begin{itemize}
\item Let $y'$ be prime. Then $\rho(y')\in V$, so $\rho(y')=z=\rho(x)$. By the second
clause of Def.\ \ref{general}, $x \sim_\pi y'$.
The result now follows from Lem.\ \ref{inherited}.
\item Let $y'$ be composite. Then $y' \sim_\pi t$ for some $t\notin V$, so
by the third clause of Def.\ \ref{general}, $\rho(y')=\rho(t)$.
Hence the occurrence $\eta'$ of $z$ in $\rho(y')$ appears as an
occurrence $\eta''$ of $z$ in $\rho(y'')$ for a variable $y''$
occurring in $t$. Let $b''$ be the rule containing $t$, $\iota_t$ an occurrence of $t$ in $b''$
and $\iota''$ the appropriate occurrence of $y''$ within $\iota_t$.
Then $(b'',\iota'',\eta'')=\langle b'',\iota_t,\eta'\rangle$ is an occurrence of $z$ in $(\pi,\rho)$.
Since $\rho(y'')$ is a proper subterm of $\rho(y')$, by induction
$(b,\iota,\eta) \sim_z (b'',\iota'',\eta'')$.
Furthermore, Lem.\ \ref{inherited} yields
$\langle b'',\iota_t,\eta'\rangle \sim_z \langle b',\iota',\eta'\rangle = (b',\iota',\eta')$.
\qed
\end{itemize}
\end{proof}
\noindent
In the following observations, which are also needed in the proof of Prop.\ \ref{chain},
$(\pi,\rho)$ is a structured proof from a TSS $P$ in univariate tytt format
with $\pi\mathbin=(B,\alpha,\phi)$.
\begin{observation}\label{unique incoming}
For each incoming occurrence $\theta'$ of $z$ in $(\pi,\rho)$ there is at most
one outgoing occurrence $\theta$ of $z$ with $\theta \rightarrow_z \theta'$.
There is none iff $\theta'$ occurs in $\tp(\pi)$.
\end{observation}
\begin{observation}\label{unique backwards within rule}
For each outgoing occurrence $\theta'=(b',\iota',\eta')$ of $z$ in $(\pi,\rho)$ there is at most
one incoming occurrence $\theta=(b,\iota,\eta)$ of $z$ with $\theta \rightarrow_z \theta'$,
where it must be the case that $b'=b \neq \alpha$.
\end{observation}
\begin{proposition}\label{chain}
Let $\theta=(b, \iota,\eta )$ be an occurrence of a variable $z$ in a structured proof
$(\pi,\rho)$ from a TSS $P$ in univariate tytt format, with $\iota$ either an incoming occurrence in $\tp(\pi)$
or the only occurrence of a variable $x$ in $\s$.
Then $\theta \rightarrow_z^* \theta'$ for any occurrence $\theta'$ of $z$ in $(\pi,\rho)$,
with $^*$ reflexive and transitive closure.
\end{proposition}
\begin{proof}
By Lem.\ \ref{related}, $\theta\sim_z\theta'$.
By the definition of $\sim_z$, $\theta=\theta_0 \sim_z^1 \theta_1 \sim_z^1 \dots \sim_z^1 \theta_k = \theta'$,
where ${\sim_z^1} = {\rightarrow_z} \cup {\leftarrow_z} \cup {\leftrightsquigarrow_z}$.
Without loss of generality we assume that there are no repeated occurrences of
$z$ in this sequence.
By induction on $i$ we show that $\theta_i \rightarrow_z \theta_{i+1}$ for all $i=0,\ldots,k{-}1$.
\begin{itemize}
\item Let $i=0$, and $\theta_0=(b,\iota,\eta)$ with $\iota$
the only occurrence of $x$ in $\s$.
% By the first clause of Def.\ \ref{general}, $x=z$.
By Def.\ \ref{graph} there is no $\theta_1$ with $\theta_1 \rightarrow_z \theta_0$ or ---
using that $\iota$ is the only occurrence of $x$ in $\alpha$ ---
$\theta_0 \leftrightsquigarrow_z \theta_1$.
\item Let $i=0$ and $\theta_0=(b,\iota,\eta)$ with $\iota$ occurring in $\tp(\pi)$. Since $\theta_0$
is an incoming occurrence, by Def.\ \ref{graph} there is no $\theta_1$
with $\theta_0 \leftrightsquigarrow_z \theta_1$. By Obs.\ \ref{unique incoming}
there is no $\theta_1$ with $\theta_1 \rightarrow_z \theta_0$.
\item Let $i>0$ and $\theta_i$ be an incoming occurrence. By
Def.\ \ref{graph} there is no $\theta_{i+1}$ with $\theta_i \leftrightsquigarrow_z \theta_{i+1}$.
By Obs.\ \ref{unique incoming} and our convention that $\theta_{i+1} \neq \theta_{i-1}$,
we cannot have $\theta_{i} \leftarrow_z \theta_{i+1}$.
\item Let $i\mathbin>0$ and $\theta_i$ be an outgoing occurrence. By
Obs.\ \ref{classification} $\theta_{i-1}$ must be a incoming
occurrence, occurring in the same rule. Hence by Def.\ \ref{graph} there is no
$\theta_{i+1}$ with $\theta_{i} \leftrightsquigarrow_z \theta_{i+1}$.
By Obs.\ \ref{unique backwards within rule} and our convention that $\theta_{i+1} \neq \theta_{i-1}$,
we cannot have $\theta_{i} \leftarrow_z \theta_{i+1}$.
\qed
\end{itemize}
\end{proof}
\begin{example}
In Ex.\ \ref{exa:relations}, the occurrence of $z$ in $\s$ is $\rightarrow_z^*$-related to
the three other occurrences of $z$ in the structured proof.
\end{example}
\section{Preservation of bounded lookahead}
\label{sec:bounded}
We show that for any TSS $P$ in tyft/tyxt format with bounded lookahead,
all $P$-general rules have bounded lookahead. Thus congruence formats that allow bounded
lookahead can be derived by means of the decomposition method from \cite{BFvG04}.
\begin{definition}\rm
For a proof structure $\pi=(B,\alpha,\phi)$, let $\prec_\pi$ be the least relation on
$\var(\pi)$ such that:
\begin{itemize}
\item if $x$ occurs in the left-hand side of a premise of $\pi$, and $y$ in its right-hand side, then
$x \prec_\pi y$, and
\item if $b=\frac{H}{t\trans{a}u}\in B$ and $\phi(b)=(t'\trans{a}u')$
with $x \in\var(t') \wedge y\in\var(t)$ or $x \in\var(u) \wedge y\in\var(u')$,
then $x \prec_\pi y$.
\end{itemize}
\end{definition}
\begin{observation}\label{direction}
Let $(b,\iota,\eta) \rightarrow_z (b',\iota',\eta')$ for two occurrences of a variable $z$ in
a proof structure $(\pi,\rho)$, with $\iota$ an occurrence of an $x\in\var(\pi)$ in $b$, and
$\iota'$ of a $y\in\var(\pi)$ in $b'$.
If $b=b'$ then $x=y$, and if $b\neq b'$ then $x \prec_\pi y$.
\end{observation}
\begin{proposition}\label{no infinite}
Let $\pi=(B,\alpha,\phi)$ be a proof structure.
If all rules in $B$ have bounded lookahead, then there is no infinite chain
$x_0 \prec_\pi x_1 \prec_\pi x_2 \prec_\pi \cdots$.
\end{proposition}
\begin{proof}
With structural induction on proof structures $\pi$, seen as well-founded trees.
Given $\pi=(B,\alpha,\phi)$, let $b_0\in B$ be the unique rule with $\phi(b_0)=\alpha$.
For each premise $\beta$ of $b_0$, let $B_\beta \subseteq B$ be the collection of rules that are
above $\beta$, and let $\pi_\beta=(B_\beta, \beta, \phi{\restriction} B_\beta)$.
The structured proofs $\pi_\beta$ are subproofs of $\pi$ and by induction do not contain infinite
chains as above.
Suppose an infinite chain $x_0 \prec_\pi x_1 \prec_\pi \cdots$ occurred in $\pi$.
With the possible exception of $x_0$, which could lay in $\alpha$, this entire chain can be
divided up in connected segments, each of which lays entirely in one of the $\pi_\beta$s.
Each segment has at least two variables in it, and two adjacent segments---laying in $\pi_\beta$ and
$\pi_\gamma$, respectively---overlap in exactly one variable, which must occur in the right-hand
side of $\beta$ as well as in the left-hand side of $\gamma$.
Here we use that the sets $\var(b)$ for $b\in B$ are pairwise disjoint.
Using the induction hypothesis, all these segments must be finite.
Hence, there must be infinitely many.
Restricting the sequence $x_0 \prec_\pi x_1 \prec_\pi x_2 \prec_\pi \cdots$ to those variables that
lay in two adjacent segments yields an infinite forward chain of variables in the dependency graph
of $b_0$, contradicting the supposed absence of unbounded lookahead in the rules of $B$.
\qed
\end{proof}
\begin{theorem}\label{thm:bounded}
Let $P$ be a TSS in univariate tytt format
with bounded lookahead. Then all $P$-general rules have bounded lookahead.
\end{theorem}
\begin{proof}
Let $P$ be a TSS with bounded lookahead, and $r$ a $P$-general rule, say with
structured proof $(\pi,\rho)$. If $r$ had unbounded lookahead, then $\tp(\pi)$ would contain premises
\plat{t_i \trans{a_i} u_i} for $i\in{\mathbb Z}_{\geq 0}$ with $\var(\rho(u_i)) \cap \var(\rho(t_{i+1}))\neq\emptyset$
for all $i\in{\mathbb Z}_{\geq 0}$. Thus, for each $i\in{\mathbb Z}_{\geq 0}$, there would be a $y_i\in\var(u_i)$,
an $x_{i+1}\in\var(t_{i+1})$ and some $z\in\var(\rho(y_i)) \cap \var(\rho(x_{i+1}))$.
Let $\theta_i=(b_i,\iota_i,\eta_i)$ be the occurrence of $z$ in $(\pi,\rho)$
where $b_i$ is the topmost rule with premise $t_i \trans{a_i} u_i$, $\iota_i$ is the
occurrence of $y_i$ in $u_i$ in $b_i$, and $\eta_i$ the occurrence of $z$ in $\rho(y_i)$.
Likewise, $\xi_{i+1}=(b_{i+1},\iota'_{i+1},\eta'_{i+1})$ is the occurrence of $z$ in $(\pi,\rho)$
where $\iota'_{i+1}$ is the occurrence of $x_{i+1}$ in $t_{i+1}$ in $b_{i+1}$, and
$\eta'_{i+1}$ the occurrence of $z$ in $\rho(x_{i+1})$.
By Prop.\ \ref{chain} $\theta_i \rightarrow_z^* \xi_{i+1}$.
Now Obs.~\ref{direction} gives $y_i \prec_\pi^* x_{i+1}$.
Since by definition also $x_i \prec_\pi y_i$ for all $i\in{\mathbb Z}_{\geq 0}$, we have found an
infinite chain $x_0 \prec_\pi x_1 \prec_\pi x_2 \prec_\pi \dots$, which with Prop.\ \ref{no infinite}
yields the required contradiction.
\qed
\end{proof}
\noindent
The following example shows that the restriction in Thm.\ \ref{thm:bounded} to $P$-general rules is essential.
\begin{example}
Consider the rule $\frac{x\trans{a}y}{f(x)\trans{b}{\bf 0}}$.\vspace{-1mm} By substituting $z$ for both $x$ and $y$
we derive $\frac{z\trans{a}z}{f(z)\trans{b}{\bf 0}}$,\vspace{-1mm} which contains unbounded lookahead.
\end{example}
\begin{corollary}\label{cor:decomposition}
Thm.\ \ref{thm:decomposition} applies to each TSS $P$ in tyft/tyxt format with bounded lookahead
by choosing for $\R$ the collection of all $P$-general ruloids.
\end{corollary}
\section[Preservation of partial trace safeness]{Preservation of $\Lambda$-partial trace safeness}
\label{sec:guarantee}
We say that a rule is \emph{$\Lambda$-infinitary trace safe} if
each $\Lambda$-floating variable has at most one
occurrence in total in the left-hand sides of the premises and in the target;
this occurrence must be at a $\Lambda$-liquid position.
\begin{observation}\label{unique liquid}
Let $P$ be a TSS in univariate tytt format for which each rule is $\Lambda$-infinitary trace safe,
and $(\pi,\rho)$ a structured proof of a rule $r$ from $P$.
For each $\Lambda$-liquid incoming occurrence $\theta$ of $z$ in $(\pi,\rho)$ there is at most
one outgoing occurrence $\theta'$ of $z$ with $\theta \rightarrow_z \theta'$;
this occurrence must be at a $\Lambda$-liquid position.
\end{observation}
\begin{theorem}\label{thm:partial-trace}
Let $P$ be a TSS in univariate tytt format for which each rule is $\Lambda$-infinitary trace safe.
Then each $P$-general rule is $\Lambda$-infinitary trace safe.
\end{theorem}
\begin{proof}
Let $(\pi,\rho)$ be a structured proof from $P$ of a $P$-general rule $r$, where
$\pi=(B,\alpha,\phi)$ with \plat{\alpha= (\s \trans{a} w)}.
Let $z$ be a $\Lambda$-floating variable of $r$. Then $z$ has a $\Lambda$-liquid occurrence
$\theta=(b,\iota,\eta)$ in $(\pi,\rho)$, with
$\iota$ either an incoming occurrence in $\tp(\pi)$
or the only occurrence of a variable $x$ in $\s$.
Consider any occurrence of $z$ in the left-hand sides of the premises or the
target of $r$. It corresponds with an occurrence $\theta'=(b',\iota',\eta')$ of $z$
in either a left-hand side of a premise in $\tp(\pi)$ or
the right-hand side of $\alpha$ (thus making $\iota'$ the occurrence of $w$).
There is no $\theta''$ with $\theta'\rightarrow_z \theta''$.
This follows from Obs.\ \ref{unique outgoing} if $\iota'$ occurs in a left-hand side
of $\tp(\pi)$, or from Def.~\ref{graph} if it is the right-hand side of $\alpha$.
By Prop.\ \ref{chain},
$\theta=\theta_0 \rightarrow_z \theta_1 \rightarrow_z \dots \rightarrow_z \theta_k = \theta'$.
Obs.\ \ref{classification}, \ref{unique outgoing} and~\ref{unique liquid} together imply that
any occurrence $\theta_i$ of $z$ in this chain is $\Lambda$-liquid, and moreover
that for each such $\theta_i$ the next occurrence $\theta_{i+1}$ (if
it exists) is uniquely determined. Since moreover $\theta_k \not\rightarrow_z$
it follows that $\theta'$ is uniquely determined.
Thus there is at most one occurrence of $z$ in the left-hand sides of the premises of $r$ or in the
target of $r$, and this occurrence is at a $\Lambda$-liquid position.
\qed
\end{proof}
\noindent
A tytt rule is $\Lambda$-partial trace safe iff it is $\Lambda$-infinitary trace safe and has
bounded lookahead. Thus, Thms.\ \ref{thm:bounded} and \ref{thm:partial-trace} together say that if
all rules of a TSS $P$ in univariate tytt format are $\Lambda$-partial trace safe, then so is each $P$-general rule.
\section{Precongruence of partial trace preorder}
To prove that for each TSS in partial trace format the induced partial trace preorder is a precongruence, it suffices to show that each formula in $\mathds{O}_T$ decomposes into formulas in $\mathds{O}_T^\equiv$.
The following lemma is needed in the proof of Prop.~\ref{prop:preservation2}. There it is only used in case
$x\notin\var(t)$, but within the proof of the lemma we also need the case that $x$ has one, $\Lambda$-liquid occurrence in $t$.
\begin{lemma}\label{lem:preservation1}
Let $P$ be a TSS in partial trace format, where its rules are $\Lambda$-partial trace safe.
Let $\R$ denote the set of $P$-general ruloids.
For each term $t$, $\phi\in\mathds{O}_{T}$, $\psi\in t^{-1}_\Rs(\phi)$, and variable $x$ that
occurs at most once in $t$, at a $\Lambda$-liquid position, we have $\psi(x)\in\mathds{O}_T^\equiv$.
\end{lemma}
\begin{proof}
We apply induction on the structure of $\phi\in\mathds{O}_T$. Let $\psi\in t^{-1}_\Rs(\phi)$.
The two possible syntactic forms of $\phi$ in the BNF grammar of $\mathds{O}_T$ are considered. In case $\phi=\top$, i.e.,
$\phi=\bigwedge_{i\in\emptyset}\phi_i$, by the second clause of Def.\ \ref{def:inverse},
$\psi(x)=\top\in\mathds{O}_{T}^{\equiv}$, and we are done. In case $\phi=\diam{a}\phi'$, by the first clause of
Def.\ \ref{def:inverse}, $\psi(x)=\bigwedge_{(x\trans{b}y)\in H}\diam{b}\psi(y)[\land\chi(x)]$,
for some $P$-general ruloid $r=\frac{H}{t\trans{a}u}$ and $\chi\in u^{-1}_\Rs(\phi')$,
%& where the square brackets around the conjunct $\chi(x)$ indicate that it is optional: it is present only if $x\in\var(u)$.
where the conjunct $\chi(x)$ is present only if $x\in\var(u)$.
By Thms.\ \ref{thm:bounded} and
\ref{thm:partial-trace}, $r$ is $\Lambda$-partial trace safe. Since by assumption $x$ is $\Lambda$-floating in
$r$, by Def.\ \ref{def:format}, $x$ has at most one occurrence in total in the left-hand sides of $H$ and in $u$;
this occurrence must be at a $\Lambda$-liquid position. We apply a nested induction on the
lookahead of $x$ in $H$ (formally defined in \cite[page 19---aliased 434]{FvGdW06}). Suppose first that $x$ occurs in the
left-hand side of $H$, say $x\trans{c}z$. Since $r$ is tytt, $z$ does not occur in $\var(t)$.
So by induction on the lookahead of $z$, $\psi(z)\in\mathds{O}_T^\equiv$. Hence $\psi(x)=\diam{c}\psi(z)\in\mathds{O}_T^\equiv$.
Suppose now that $x$ does not occur in the left-hand sides of $H$. Since $x$ occurs at most once in $u$, at
a $\Lambda$-liquid position, by induction on the structure of $\phi'$,
$\chi(x)\in\mathds{O}_T^\equiv$. Hence either $\psi(x)=\chi(x)\in\mathds{O}_T^\equiv$ or $\psi(x)=\top\in\mathds{O}_T^\equiv$.
\qed
\end{proof}
\begin{proposition}\label{prop:preservation2}
Let $P$ be a TSS in partial trace format and $\R$ the set of $P$-general ruloids. For each term $t$, $\phi\in\mathds{O}_{T}$, $\psi\in t^{-1}_\Rs(\phi)$ and variable $x$:\vspace*{-1mm}
\[
\psi(x)\equiv\bigwedge_{i\in I}\psi_i\mbox{ with }\psi_i\in\mathds{O}_T^\equiv\mbox{ for all } i\in I.\vspace*{-1mm}
\]
\end{proposition}
\begin{proof}
All \vspace*{-.5mm}rules in $P$ are $\Lambda$-partial trace safe, for some $\Lambda$. We apply induction on the
structure of $\phi\in\mathds{O}_T$. Let $\psi\in t^{-1}_\Rs(\phi)$. \vspace*{-.5mm}We consider the two possible syntactic
forms of $\phi$ in the BNF grammar of $\mathds{O}_T$. In case $\phi=\top$, by
the second \vspace*{-.25mm}clause of Def.\ \ref{def:inverse}, $\psi(x)=\top$, and we are done. In case $\phi=\diam{a}\phi'$, by
the first clause \vspace*{-.5mm}of Def.\ \ref{def:inverse}, $\psi(x)=\bigwedge_{(x\trans{b}y)\in H}\diam{b}\psi(y)[\land\chi(x)]$,
for some $P$-general ruloid $r = \frac{H}{t\trans{a}u}$ and $\chi\in u^{-1}_\Rs(\phi')$,
where the conjunct $\chi(x)$ is present only if $x\in\var(u)$.
By Thms.\ \ref{thm:bounded} and \ref{thm:partial-trace}, $r$ is $\Lambda$-partial trace safe.
Since $r$ \vspace*{-1mm}is tytt, for each $(x\trans{b}y)\in H$, $y$ does not occur in $\var(t)$.
So by \vspace*{-1mm}Lem.\ \ref{lem:preservation1}, $\psi(y)\in\mathds{O}_T^{\equiv}$ for each $(x\trans{b}y)\in H$. Moreover, by
induction, $\chi(x)\equiv\bigwedge_{i\in I}\chi_i$ with $\chi_i\in\mathds{O}_T^\equiv$ for all $i\in
I$. Thus $\psi(x)$ is also of this required form.
\qed
\end{proof}
\begin{corollary}\label{cor:main}
If a TSS is in partial trace format, then the partial trace preorder it induces is a precongruence.
\end{corollary}
\begin{proof}
Consider a TSS $P$ in partial trace format. Let $t$ be a term and $\sigma,\sigma'$ closed substitutions with $\sigma(x) \sqsubseteq_T \sigma'(x)$ for all $x\in\var(t)$; we need to prove that $\sigma(t) \sqsubseteq_T \sigma'(t)$. Suppose that $\sigma(t)\models\phi \in \mathds{O}_T$. Let $\R$ denote the set of $P$-general ruloids. By Thm.\ \ref{thm:decomposition} in combination with Cor.\ \ref{cor:decomposition} there is a $\psi \in t^{-1}_\Rs(\phi)$ with $\sigma(x) \models \psi(x)$ for all $x\in\var(t)$. By Prop.\ \ref{prop:preservation2}, $\psi(x)\equiv\bigwedge_{i\in I_{x}}\psi_{i,x}$ \vspace*{.5mm}with $\psi_{i,x}\in\mathds{O}_T^\equiv$ for all $x\in\var(t)$ and $i\in I_{x}$. So $\sigma(x)\models\psi_{i,x}$ for all $x\in\var(t)$ and $i\in I_x$. By Prop.\ \ref{prop:O_characterizes}, ${\cal O}_T^\equiv(\sigma(x))\subseteq {\cal O}_T^\equiv(\sigma'(x))$ for all $x \in \var(t)$. This implies $\sigma'(x)\models\psi_{i,x}$ for all $x\in\var(t)$ and $i\in I_x$. So $\sigma'(x)\models\psi(x)$ for all $x \in \var(t)$. Therefore, by Thm.\ \ref{thm:decomposition}, $\sigma'(t)\models\phi$. So ${\cal O}_T(\sigma(t))\subseteq {\cal O}_T(\sigma'(t))$. Hence, by Prop.\ \ref{prop:O_characterizes}, $\sigma(t) \sqsubseteq_T \sigma'(t)$.
\qed
\end{proof}
\section{Conclusion and future work}
We introduced the notion of a general rule, which has a proof with minimal variable unifications.
To this end we used \emph{proof structures} as alternatives for irredundant proofs,
because irredundant proofs abstract away from the variables that are being unified.
We moreover showed that if a TSS has bounded lookahead, then the same holds for its general
rules. This means that the decomposition method of modal formulas from \cite{FvGdW06} applies
directly to TSSs with bounded lookahead, without first having to turn unbounded into bounded
lookahead by means of ordinal count-downs. Both the notion of a general rule and the preservation
of bounded lookahead were crucial in the derivation of a congruence format for the partial trace
preorder, using the decomposition method.
When restricting attention to TSSs whose rules have finitely many premises, the restriction to
bounded lookahead can be dropped from the partial trace format. The reason is that unbounded
lookahead is eliminated when converting such a TSS to pure xyft format. With this extension
included, our format extends the earlier congruence format for partial trace semantics presented
in {\sc Bloom} \cite{Blo94}. The latter can be seen as the restriction of our format to TSSs in tyft
format, allowing only rules with finitely many premises, and requiring $\Lambda$ to hold universally. The binary
Kleene star (see \cite{BFvG04}) is an example of an operator that falls in our format, and in that
of \cite{BFvG04}, but not in that of \cite{Blo94}.
The application to APC in Sect.~\ref{sec:apc} falls outside the formats of both \cite{BFvG04} and \cite{Blo94}.
As future work we want to extend the results to TSSs with negative premises, and develop a
congruence format for partial trace \emph{equivalence} that allows negative premises. Moreover, we conjecture that
the techniques and results introduced in this paper make it possible to develop congruence formats
with lookahead for ready simulation and the decorated trace semantics.
Some applications of lookahead mentioned in the introduction require a richer format,
which may be based on the basic format given here. There is a rich body of work extending existing congruence
formats with features like time, probabilities and binders.
Recently \cite{CGT16} employed the modal decomposition technique to obtain congruence formats for probabilistic
semantics. Our approach lays the groundwork to extend those formats with lookahead.
In \cite{FvGdW12,FvG16}, modal decomposition is used to derive congruence for weak semantics.
Extending this work with lookahead could allow us to develop congruence formats that for example
cover the lookahead in the $\tau$-rules from \cite{BG96}.
In \cite[Sect.\ 8]{vGl11} a congruence format for weak bisimilarity with
$\tau$-rules and lookahead is presented, but using a bisimulation-specific method.
\vspace{2mm}
\noindent
{\bf Acknowledgement} Paulien de Wind observed that the transformation from unbounded to bounded lookahead in \cite{FvGdW06} violates the partial trace format.
\newpage
\begin{thebibliography}{10}
\bibitem{BaVa92}
{\sc J.C.M.~Baeten \& F.W. Vaandrager} (1992):
\newblock {\em An algebra for process creation.}
\newblock {\sl Acta Inform.} 29:303--334.
\bibitem{Ber97}
{\sc M. Bernardo} (1997):
\newblock {\em Enriching EMPA with value passing: A symbolic approach based on lookahead.}
\newblock In {\sl Proc.\ PAPM 1997}, pp.\ 35--49.
\bibitem{Blo94}
{\sc B.~Bloom} (1994):
\newblock {\em When is partial trace equivalence adequate?}
\newblock {\sl Form. Asp. Comput.} 6:317--338.
\bibitem{BIM95}
{\sc B.~Bloom, S. Istrail \& A.R. Meyer} (1995):
\newblock {\em Bisimulation canâ€™t be traced.}
\newblock {\sl J. ACM}, 42:232--268.
\bibitem{BFvG04}
{\sc B.~Bloom, W.J. Fokkink \& R.J.~van Glabbeek} (2004):
\newblock {\em Precongruence formats for decorated trace semantics.}
\newblock {\sl ACM T. Comput. Log.} 5:26--78.
\bibitem{BG96}
{\sc R.N. Bol \& J.F.~Groote} (1996):
\newblock {\em The meaning of negative premises in transition system specifications.}
\newblock {\sl J. ACM} 43:863--914.
\bibitem{CGT16}
{\sc V. Castiglioni, D. Gebler \& S. Tini} (2016):
\newblock {\em Modal decomposition on nondeterministic probabilistic processes.}
\newblock In {\sl Proc.\ CONCUR 2016}, LIPIcs 59, Schloss Dagstuhl - Leibniz Center for Informatics, pp.\ 36:1--36:15.
\bibitem{DL12}
{\sc P.R. D'Argenio \& M.D.~Lee} (2012):
\newblock {\em Probabilistic transition system specification: Congruence and full abstraction of bisimulation.}
\newblock In {\sl Proc.\ FOSSACS 2012}, LNCS 7213, Springer, pp.\ 452--466.
\bibitem{DosReis12}
{\sc A.J. Dos Reis} (2012):
\newblock {\em Compiler Construction Using Java, JavaCC, and Yacc}, Wiley-IEEE.
\bibitem{FvG96}
{\sc W.J. Fokkink \& R.J.~van Glabbeek} (1996):
\newblock {\em Ntyft/ntyxt rules reduce to ntree rules.}
\newblock {\sl Inform. Comput.} 126:1--10.
\bibitem{FvG16}
{\sc W.J. Fokkink \& R.J.~van Glabbeek} (2016):
\newblock {\em Divide and congruence II: Delay and weak bisimilarity}.
\newblock In {\sl Proc.\ LICS 2016}, pp.\ 778--787, ACM/IEEE.
\bibitem{FvGdW06}
{\sc W.J. Fokkink, R.J.~van Glabbeek \& P.~de Wind} (2006):
\newblock {\em Compositionality of Hennessy-Milner logic by structural operational semantics.}
\newblock {\sl Theor. Comput. Sci.} 354:421--440.
\bibitem{FvGdW12}
{\sc W.J. Fokkink, R.J.~van Glabbeek \& P.~de Wind} (2012):
\newblock {\em Divide and congruence: From decomposition of modal formulas to preservation of branching and $\eta$-bisimilarity.}
\newblock {\sl Inform. Comput.} 214:59--85.
\bibitem{vGl93}
{\sc R.J.~van Glabbeek} (1993):
\newblock {\em Full abstraction in structural operational semantics (extended abstract).}
\newblock In {\sl Proc.\ AMAST 1993}, pp.\ 77--84, Springer.
\bibitem{vGl01}
{\sc R.J.~van Glabbeek} (2001):
\newblock {\em The linear time -- branching time spectrum {I}: {T}he semantics of concrete, sequential processes.}
\newblock In J.A. Bergstra, A.~Ponse \& S.A. Smolka, editors: {\sl Handbook of Process Algebra},
chapter~1,
Elsevier, pp.\ 3--99.
\bibitem{vGl11}
{\sc R.J.~van Glabbeek} (2011):
\newblock {\em On cool congruence formats for weak bisimulations.}
\newblock {\sl Theor. Comput. Sci.} 412:3283-3302.
\bibitem{GV92}
{\sc J.F. Groote \& F.W.~Vaandrager} (1992):
\newblock {\em Structured operational semantics and bisimulation as a congruence.}
\newblock {\sl Inform. Comput.} 100:202--260.
\bibitem{HM85}
{\sc M. Hennessy \& R.~Milner} (1985):
\newblock {\em Algebraic laws for non-determinism and concurrency.}
\newblock {\sl J. ACM} 32:137--161.
\bibitem{HR07}
{\sc K.V. Hindriks \& M.B.~van Riemsdijk} (2007):
\newblock {\em Satisfying maintenance goals.}
\newblock In {\sl Proc.\ DALT 2007}, LNCS 4987, Springer, pp.\ 86--103.
\bibitem{Lar86}
{\sc K.G. Larsen} (1986):
\newblock {\em Context-Dependent Bisimulation between Processes}.
\newblock PhD thesis, University of Edinburgh.
\bibitem{LL98}
{\sc L. L\'eonard \& G.~Leduc} (1998):
\newblock {\em A formal definition of time in LOTOS.}
\newblock {\sl Form. Asp. Comput.} 10:248--266.
\bibitem{Mou09}
{\sc M.R. Mousavi} (2009):
\newblock {\em Causality in the semantics of Esterel: Revisited.}
\newblock In {\sl Proc.\ SOS 2009}, EPTCS 18, pp.\ 32--45.
\bibitem{Plo04}
{\sc G.D. Plotkin} (2004):
\newblock {\em A structural approach to operational semantics.}
\newblock {\sl J. Log. Algebr. Progr.} 60/61:17--139.
\newblock Originally appeared in 1981.
\bibitem{Sim85}
{\sc R.~de Simone} (1985):
\newblock {\em Higher-level synchronising devices in \textsc{Meije}--{SCCS}.}
\newblock {\sl Theor. Comput. Sci.} 37:245--267.
\end{thebibliography}
\end{document}