\documentclass[11pt]{article}
\usepackage{amssymb,amsmath,amsthm,url}
\usepackage{graphicx}
%uncomment to get hyperlinks
%\usepackage{hyperref}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%Some macros (you can ignore everything until "end of macros")
\def\class{0}
\topmargin 0pt \advance \topmargin by -\headheight \advance
\topmargin by -\headsep
\textheight 8.9in
\oddsidemargin 0pt \evensidemargin \oddsidemargin \marginparwidth
0.5in
\textwidth 6.5in
%%%%%%
\newcommand{\getsr}{\gets_{\mbox{\tiny R}}}
\newcommand{\bits}{\{0,1\}}
\newcommand{\Ex}{\mathbb{E}}
\newcommand{\To}{\rightarrow}
\newcommand{\e}{\epsilon}
\newcommand{\R}{\mathbb{R}}
\newcommand{\N}{\mathbb{N}}
\newcommand{\Z}{\mathbb{Z}}
\newcommand{\maxpr}{\text{\rm max-pr}}
\newenvironment{summary}{\begin{quote}\textbf{Summary.}}{\end{quote}}
\newtheorem{theorem}{Theorem}
\newtheorem{axiom}{Axiom}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{claim}[theorem]{Claim}
\newtheorem{corollary}[theorem]{Corollary}
\theoremstyle{definition}
\newtheorem{exercise}{Exercise}
\newtheorem{definition}[theorem]{Definition}
\newcommand{\sstart}{\triangleright}
\newcommand{\send}{\triangleleft}
\newcommand{\cclass}[1]{\mathbf{#1}}
\renewcommand{\P}{\cclass{P}}
\newcommand{\NP}{\cclass{NP}}
\newcommand{\Time}{\cclass{Time}}
\newcommand{\BPP}{\cclass{BPP}}
\newcommand{\Size}{\cclass{Size}}
\newcommand{\Ppoly}{\cclass{P_{/poly}}}
\newcommand{\CSAT}{\ensuremath{\mathsf{CSAT}}}
\newcommand{\SAT}{\ensuremath{\mathsf{3SAT}}}
\newcommand{\IS}{\mathsf{INDSET}}
\newcommand{\poly}{\mathrm{poly}}
\newcommand{\inp}{\mathsf{in}}
\newcommand{\outp}{\mathsf{out}}
\newcommand{\Adv}{\mathsf{Adv}}
\newcommand{\Supp}{\mathsf{Supp}}
\newcommand{\dist}{\Delta}
\newcommand{\indist}{\approx}
\newcommand{\PRG}{\mathsf{G}}
\newcommand{\Gen}{\mathsf{Gen}}
\newcommand{\Enc}{\mathsf{E}}
\newcommand{\Dec}{\mathsf{D}}
\newcommand{\Com}{\mathsf{C}}
\newcommand{\Sign}{\mathsf{Sign}}
\newcommand{\Ver}{\mathsf{Ver}}
\newcommand{\eqdef}{\stackrel{\text{\tiny def}}{=}}
\newcommand{\set}[1]{ \{ #1 \} }
\newcommand{\cF}{\mathcal{F}}
\newcommand{\angles}[1]{\langle #1 \rangle}
\newcommand{\iprod}[1]{\angles{#1}}
\newcommand{\floor}[1]{\lfloor #1 \rfloor}
\newcommand{\view}{\mathsf{view}}
\newcommand{\trans}{\mathsf{trans}}
% end of macros
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\title{Lecture 18 - Zero Knowledge (cont), identification protocols}
\author{Boaz Barak}
\begin{document}
\maketitle
{ \ifnum\class=1 \fontsize{14pt}{16pt} \selectfont \fi
\begin{description}
\item[Reading] Boneh-Shoup chapter 18.
\item[Review] \textbf{Protocol QR}
\begin{description}
\item[Statement] $x$ is a quadratic residue mod $n$.
\item[Public input] $x$ , $n$
\item[Prover's (Alice) private input.] $w$ such that
$x=w^2 \pmod{n}$.
\item[P $\rightarrow$ V] Alice chooses random $u \getsr
\Z^*_n$ and sends $y=u^2$ to Bob.
\item[P $\leftarrow$ V] Bob chooses $b \getsr \bits$
\item[P $\rightarrow$ V] If $b=0$, Alice sends $u$ to
Bob. If $b=1$, Alice sends $w\cdot u \pmod {n}$.
\item[Verification.] Let $z$ denote the number sent by
Alice. Bob \emph{accepts} the proof in the case
$b=0$, $z^2 = y \pmod{n}$. In the case $b=1$, Bob
accepts the proof if $z^2 = xy \pmod{n}$.
\end{description}
\item[Simulator] Uses same for $b=0$, uses $y=z^2x^{-1}$ for $b=1$.
\item[Proofs of knowledge] A notion we did not talk a lot about in class is proof of knowledge. Basically this is a
stronger form of soundness that says that if $P^*$ convinces the verifier with noticeable probability (i.e.,
more than the soundness error), then not only this means that the statement $x$ is in $L$ but it actually means
that $P^*$ ``knows'' a witness in the sense that it could obtain a witness by running some algorithm. This is
often useful for proving security of identification protocol where simple soundness falls short of what we need
to make the proof work.
We say that $(P,V)$ is a \emph{proof of knowledge} if soundness is replaced by the following stronger requirement:
\item[Knowledge soundness with error $\delta$] For every possibly cheating prover $P^*$, and every $x$, if
$P^*$ satisfies that
\[
\Pr[\outp_V\angles{P^*,V_{x,r}} = \mathsf{accept} ] > \delta + \rho
\] (where this probability is taken over the
random choices $r$ of the verifier)
then there's a algorithm $E$ (called a knowledge extractor) with running time polynomial in $1/\rho$ and the
running time of $P^*$, that on input $x$ outputs a witness $w$ for $x$ (in our example $w$ is a square root of
$c$) with probability at least $1/2$. Note this indeed implies normal soundness with soundness error $\delta$.
Note that proof of knowledge condition makes sense even if there's no question if the statement is true, as
we'll see below.
\item[Parallel repetition.] We can show that parallel repetition reduces the soundness to $2^{-k}$ by giving
the even stronger statement: let $\vec{b} \in \bits^k$ denote the queries the verifier makes in $k$ copies
of the parallel repeated version. For every fixed $k$ initial messages $y_1,....,y_k$, and $\vec{b} \neq
\vec{b'}$, one can obtain the square root of $x$ from two accepting transcripts of the form $(y_1\cdots
y_k; \vec{b} ; z_1\cdots c_k)$ and $(y_1\cdots y_k; \vec{b'} ; z_1\cdots c_k)$. This allows to show proof
of knowledge using \emph{rewinding}.
A three round protocol that is (perfect) honest verifier zero knowledge and has this knowledge soundness
property is called a $\Sigma$ protocol.
\item[Honest verifier zero knowledge] Parallel repetition does preserve honest verifier zero knowledge.
\item[Schnorr's identification protocol] Schnorr suggested the following proof of knowledge for the discrete
logarithm:
\item[Review] \textbf{Protocol Schnorr}
\begin{description}
\item[Statement] Alice knows discrete log of $h$ w.r.t. $g$, where these are members of some group $G$ of
order $q$, and $g$ is a generator.
\item[Public input] $g$, $h$
\item[Prover's (Alice) private input.] $x$ such that $h = g^x$.
\item[P $\rightarrow$ V] Alice chooses random $r \in \Z_q$, and sends $a = g^r$.
\item[P $\leftarrow$ V] Bob chooses $b \getsr \Z_q$ and sends $b$ to Alice.
\item[P $\rightarrow$ V] Alice sends $c=r + xb \pmod{q}$ to Bob.
\item[Verification.] Bob verifies that $ah^b = g^c$.
\end{description}
\item[Completeness] obvious.
\item[Proof of knowledge] We have the $\Sigma$ condition: if $b \neq b'$ then given $a$ and $b \neq b'$ and $c
\neq c'$ such that $ah^b = g^c$ and $ah^{b'} = g^{c'}$ we divide the two equations by each other to get
$h^{b-b'} = g^{c-c'}$ but since we know $b,b'$ we can take this to the power $(b-b')^{-1} \pmod{q}$ to get
an equation of the form $h = g^x$.
\item[Honest verifier zero knowledge] The simulator does the following: choose $b,c \getsr \Z_q$, choose $a$ as
$h^{-b}g^c$.
\item[Application for identification w.r.t passive adversaries]
\item[Random oracle] making $\Sigma$ protocols non-interactive.
\item[OR trick] We now show how to transform a $\Sigma$ protocol into an identification protocol that is secure
w.r.t. active adversaries. We suppose we have a $\Sigma$ protocol for proving knowledge of $x$ given
$f(x)$, where $f$ is a one-way function. We show the following \textbf{OR Protocol} to prove, given
$y_1,y_2$ that Alice knows \emph{either} a preimage for $y_1$ \emph{or} a preimage for $y_2$ (or possibly
both). We assume there is a $\Sigma$ protocol $\Pi$ with messages $(a,b,c)$ for proving knowledge of one
preimage.
We first describe the protocol from the verifier's (Bob) point of view, without explaining how Alice implements
her strategy.
\begin{description}
\item[Public input] $y_1,y_2$
\item[Alice (prover's) private input] either $x_1$ such that $y_1=f(x_1)$ or $x_2$ such that $y_2=f(x_2)$.
\item[First message] Alice sends two messages $a_1,a_2$ for two instances of the $\Sigma$ protocol: one to
prove she knows $x_1$ and the other to prove she knows $x_2$. (Note that she doesn't know both.)
\item[Second message] Bob chooses $b$ at random from the challenge space of the $\Sigma$ protocol (say
$\mathrm{GF}(2)^k$ or $\Z_q$) and sends $b$ to Alice.
\item[Third message] Alice chooses $b_1,b_2$ such that $b_1 + b_2 = b$ and continues both interactions as
if Bob sent $b_1$ in the first instance and $b_2$ in the second instance. Bob verifies that both
instances accept.
\end{description}
\item[Completeness] Here is one of the rare examples that even completeness is not trivial--- how does Alice
manage to succeed in two instances where she only knows one witness? The idea is the following: suppose
that she knows only $x_2$. She will choose $b_1$ at random herself, and use the \emph{simulator} to compute
her first message $a_1$ in the first instance. Then when Bob sends her $b$, she will just compute $b_2 = b
- b_1$ and continue in both instances, in the first using the simulator and the second using the real
prover strategy.
\item[Soundness and knowledge soundness] This is still a $\Sigma$ protocol: suppose that we have fixed messages
$a_1,a_2$ and $b \neq b'$ such that Alice can find $b_1,b_2 , b'_1,b'_2$ and $c_1,c_2, c'_1,c'_2$ such that
$b=b_1+b_2$, $b'=b'_1+b'_2$ and all the following four transcripts are accpeting:
\begin{align*}
(a_1,b_1,c_1) \\
(a_1,b'_1,c'_1) \\
(a_2,b_2,c_2) \\
(a_2,b'_2,c_2)
\end{align*}
we claim that it must be the case that either $b'_1 \neq b_1$ or $b_2 \neq b'_2$. Indeed, otherwise we'd
have $b'_1 = b_1$ and $b'_2 = b_2$ and hence $b_1 + b_2 = b'_1 + b'_2$.
\item[Honest verifier zero knowledge] This is also honest verifier ZK, but this is actually not the important
property we need from this protocol.
\item[Security against cheating verifiers.] We show the following result:
\begin{lemma} Let $Bob^*$ be any (possibly) cheating polynomial-time verifier, and suppose that $x_1,x_2$ are chosen at
random and we set $y_i=f(x_i)$ then for $j=1,2$ and $i=1,2$ the probability that $Bob^*$ outputs a preimage of
$y_i$ when interacting with Alice that gets as input $x_j$ is negligible.
\end{lemma}
\begin{proof} Let's prove the lemma for $i=2$ (the proof for $i=1$ is symmetric).
The main point is the following: no matter what Bob does, the interaction with Alice that gets as input $x_1$
and the interaction with Alice that gets as input $x_2$ is \emph{identical}. In both cases, Bob gets exactly
the same distribution as if Alice would have run the honest prover for both instances and chosen at random
$b_1,b_2$ such that $b_1 + b_2 = b$. So it suffices to prove this for $i=2$ and $j=1$. But then neither Bob nor
Alice get the preimage of $y_2$, and if Bob could obtain such a preimage with non-negligible probability then
we can combine both Bob and Alice to get an invertor for the one-way function $f$.
\end{proof}
\item[Zero knowledge protocol for Hamiltonian cycle]
\end{description}
\end{document}