1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
|
\documentclass[10pt]{article}
\usepackage{fullpage}
\usepackage[utf8x]{inputenc}
\usepackage{amsmath,amsfonts,amsthm}
\usepackage[english]{babel}
\usepackage[capitalize, noabbrev]{cleveref}
\usepackage{algorithm}
\usepackage{algpseudocode}
\newenvironment{enumerate*}%
{\vspace{-2ex} \begin{enumerate} %
\setlength{\itemsep}{-1ex} \setlength{\parsep}{0pt}}%
{\end{enumerate}}
\newenvironment{itemize*}%
{\vspace{-2ex} \begin{itemize} %
\setlength{\itemsep}{-1ex} \setlength{\parsep}{0pt}}%
{\end{itemize}}
\newenvironment{description*}%
{\vspace{-2ex} \begin{description} %
\setlength{\itemsep}{-1ex} \setlength{\parsep}{0pt}}%
{\end{description}}
\DeclareMathOperator{\E}{\mathbb{E}}
\let\P\relax
\DeclareMathOperator{\P}{\mathbb{P}}
\DeclareMathOperator*{\argmin}{\arg\min}
\newcommand{\ex}[1]{\E\left[#1\right]}
\newcommand{\prob}[1]{\P\left[#1\right]}
\newcommand{\inprod}[1]{\left\langle #1 \right\rangle}
\newcommand{\R}{\mathbf{R}}
\newcommand{\poly}{\text{poly}}
\newcommand{\N}{\mathbf{N}}
\newcommand{\C}{\mathcal{C}}
\newcommand{\eps}{\varepsilon}
\newcommand{\cl}[1]{\text{\textbf{#1}}}
\newcommand{\eqdef}{\mathbin{\stackrel{\rm def}{=}}}
\newcommand{\llbracket}{[\![}
\newtheorem{theorem}{Theorem}
\newtheorem{lemma}{Lemma}
\algrenewcommand\algorithmicensure{\textbf{Output:}}
\algrenewcommand\algorithmicrequire{\textbf{Input:}}
\author{Thibaut Horel}
\title{CS 225 Problem Set 2 -- Solutions}
\begin{document}
\maketitle
Collaborator: Debmalya Mandal
\section*{Problem 2.9}
\paragraph{1.}
Let $\lambda$ be an eigenvalue of $M$ and $x$ be an associated eigenvector. Let
$i^*\in\{1,\ldots, n\}$ such that $|x_{i^*}| = \|x\|_{\infty}$. Since $x$ is an
eigenvector we have:
\begin{displaymath}
\sum_{j=1}^n m_{i^*,j} x_j = \lambda x_{i^*}
\end{displaymath}
$x$ is non-zero, hence $x_{i^*}$ is non-zero. Dividing both sides by $x_{i^*}$,
we obtain:
\begin{displaymath}
|\lambda| =
\left|\sum_{j=1}^n m_{i^*,j} \frac{x_j}{x_{i^*}}\right|
\leq \sum_{j=1}^n m_{i^*,j} \left|\frac{x_j}{x_{i^*}}\right|
\leq \sum_{j=1}^n m_{i^*, j} = 1
\end{displaymath}
where the first inequality used the triangle inequality and the second
inequality used the definition of $x_{i^*}$.
\paragraph{2.} Assume $G$ is disconnected. Hence it has at least two connected
components. We can write $V = C_1\cup C_2$ with $C_1\cap C_2 = \emptyset$ and
$ C_1\times C_2\cap E = \emptyset$. Then it is clear that $x^1
= \mathbf{1}_{C_1}$ (the indicator vector of $C_1$) and $x^2
= \mathbf{1}_{C_2}$ are eigenvectors of $M$ for the eigenvalue 1. For example
for $x^1$:
\begin{displaymath}
\sum_{j=1}^n m_{i,j} x^1_j =
\begin{cases}
1&\text{if $i\in C_1$}\\
0&\text{otherwise}
\end{cases}
\end{displaymath}
where the first case follows from the fact that if $i\in C_1$, all the non-zero
terms $m_{i,j}$ will be preserved by the multiplication by $x^1_j$ since
$m_{i,j}\neq 0\Leftrightarrow j\in C_1$ and the fact that $\sum_{j=1}^n m_{i,j}
= 1$. Similarly if $i\in C_2$ $m_{i,j} = 0$ whenever $j\in C_1$.
Furthermore $x^1$ and $x^2$ are clearly orthogonal, so they span a vector space
of dimension 2. Hence the multiplicity of $1$ as an eigenvalue is at least 2.
\paragraph{} Let us now assume that $1$ has multiplicity at least $2$. We can
find an eigenvector $x$ for $1$ orthogonal to $\lambda_1$, that is: $\sum_i x_i
= 0$. Let us define $C_1 = \{i\in V: x_i > 0\}$ and $C_2 = \{i\in V: x_j\leq 0\}$.
Because $x\neq 0$ and $\sum_i x_i = 0$, both $C_1$ and $C_2$ are non-empty.
They are clearly disjoint and their union is $V$. We show that $C_1$ and $C_2$
are disconnected. Let $i^*$ such that $|x_{i^*}| = \|x\|_{\infty}$. Without
loss of generality we can assume $x_{i^*} > 0$ (otherwise we consider $x'
= -x$).
\begin{displaymath}
\sum_{j=1}^n m_{i^*,j}x_j = x_{i^*}
\end{displaymath}
Dividing by $x_{i^*}$ both sides (it is non-zero for the same reason as in
\textbf{1.}):
\begin{displaymath}
1 = \left|\sum_{j=1}^n m_{i^*,j}\frac{x_j}{x_{i^*}}\right|
\leq \sum_{j=1}^n m_{i^*,j}\left|\frac{x_j}{x_{i^*}}\right|
\leq \sum_{j=1}^n m_{i^*,j} = 1
\end{displaymath}
where we again used the triangle inequality, the definition of $x_{i^*}$ and
the fact that the sum of the entries of the rows of $M$ is one. Hence, the
chain of inequalities is a chain of equalities. This implies that for all $j$
such that $m_{i^*,j} \neq 0$, $x_j=x_{i^*}$. But we can then repeatedly apply the
same argument to all the neighbors of $i^*$, the neighbors of their neighbors,
etc. By induction (over the distance to $i^*$), we see that for all vertices
$j$ in the same connected component as $i^*$, $x_j = x_{i^*}>0$. This implies
that $C_2$ is disconnected from $C_1$ (none of the vertices in $C_2$ are in the
same connected component as $x_{i^*}$).
\paragraph{3.} Assuming that $G$ is connected, it is easy to see that $G$
bipartite is equivalent to $G^2$ being disconnected. Hence assuming $G$
connected and using \textbf{2.}:
$G$ bipartite $\Leftrightarrow$ $G^2$ disconnected $\Leftrightarrow$ $1$ is an eigenvalue of $M^2$
of multiplicity at least 2 $\Leftrightarrow$ $\exists x\perp \mathbf{1}, x\neq
0,\; M^2x
= x$ $\Leftrightarrow$ $\exists x\perp\mathbf{1}, x\neq 0,\; Mx
= \pm x$ $\Leftrightarrow$ $\exists x\perp\mathbf{1}, x\neq 0,\; Mx = -x$ $\Leftrightarrow$ $-1$ is an eigenvalue of
$M$.
The antepenultimate equivalence uses that the eigenvalues of $M^2$ are the
squares of the eigenvalues of $M$, and the penultimate equivalence uses that
$x$ cannot be an eigenvector for the eigenvalue $1$ because of \textbf{2}. The
second equivalence also uses \textbf{2.}.
\paragraph{4.} First observe that $\max_{\|x\|_2 = 1} \inprod{Mx, x}$
= $\lambda_{max}(M)$ for any symmetric matrix $M$. Indeed, using the spectral
theorem and writing $M = P^T DP$ with $D$ = $\text{diag}(\lambda_1,\dots,\lambda_n)$
and observing that $\|x\|_2 =1\Leftrightarrow \|Px\|_2 = 1$ we have:
\begin{displaymath}
\max_{\|x\|_2 = 1} \inprod{Mx, x} = \max_{\|y\|_2=1} y^TDy = \max_{\|y\|_2
= 1} \sum_{i=1}^n\lambda_i y_i^2
\end{displaymath}
the right-hand side maximum is clearly obtained when $y_i = 1$ for some
coordinate $i$ such that $\lambda_i = \lambda_{max}(M)$ and $y_j = 0$ for
$j\neq i$. For this $y$ the value is $\lambda_{max}(M)$.
Now observe that $\max_x\inprod{Mx, x}$ when $x$ is taken in the vector space
orthogonal to $\mathbf{1}$ is exactly the largest eigenvalue of $M$ when
restricted to the vector space orthogonal to $\mathbf{1}$ (restricting $M$ to
this subspace is allowed since this subspace is invariant by $M$). But since
the eigenspaces of $M$ are orthogonal, the largest eigenvalue of $M$ restricted
to the orthogonal of $\mathbf{1}$ is the second largest eigenvalue of $M$.
Hence:
\begin{displaymath}
\lambda_2(M) = \max_{x} \inprod{Mx, x}
\end{displaymath}
where the maximum is taken over $x$ of length 1 in the orthogonal space of $\mathbf{1}$.
\paragraph{} Let us now show the second identity given in the problem
statement:
\begin{displaymath}
\sum_{(i,j)\in E} (x_i-x_j)^2 = \sum_{(i,j)\in E} x_i^2 +
\sum_{(i,j)\in E} x_j^2 - 2\sum_{(i,j)\in E} x_ix_j = \frac{d}{2}\|x\|_2^2
+ \frac{d}{2}\|x\|_2^2 - d\sum_{i,j} m_{i,j}x_i x_j
\end{displaymath}
where the second equality used that for all $i$ (resp. $j$) there are
$\frac{d}{2}$ edges leaving (entering) and that $m_{i,j} = \frac{2}{d}a_{i,j}$
where $a_{i,j}$ is the $(i,j)$ entry of the adjacency matrix of $G$. For
$\|x\|_2 = 1$:
\begin{displaymath}
1- \frac{1}{d}\sum_{(i,j)\in E} (x_i-x_j)^2
= \sum_{i,j}m_{i,j}x_i x_j = \inprod{Mx, x}
\end{displaymath}
taking the $\max$ both sides gives the second identity.
Let us now consider $x$ such that $\|x\|_2 = 1$ and $\sum_{i} x_i
= 0$ and consider $i^*$ such that $|x_{i^*}| = \|x\|_{\infty}$. Because
$\|x\|_2 =1$, we have $|x_{i^*}| \geq \frac{1}{\sqrt{n}}$. Because, $\sum_i x_i
= 0$ there is $x_k$ such that $x_k$ has the opposite sign of $x_{i^*}$. Since $G$
is connected, there is a path of length $l$ at most $n$ from $i^*$ to $k$. Let us
index this path $i^* = i_1,\ldots, i_l = k$. Then:
\begin{displaymath}
\sum_{(i,j)\in E} (x_i-x_j)^2 \geq \sum_{s=1}^l (x_{i_{s+1}}
- x_{i_s})^2
\end{displaymath}
where we restricted the sum to the path from $i^*$ to $k$. Applying the
Cauchy-Schwartz inequality:
\begin{displaymath}
\sum_{(i,j)\in E} (x_i-x_j)^2 \geq \frac{1}{l} \left(\sum_{s=1}^l x_{i_{s+1}}
- x_{i_s}\right)^2= \frac{1}{l} (x_k - x_{i^*})^2\geq \frac{1}{l} x_{i^*}^2
\end{displaymath}
where the last inequality used that $x_{i^*}$ and $x_k$ have opposite signs.
Finally using $x_{i^*}^2 \geq \frac{1}{n}$ and $l\leq n$:
\begin{displaymath}
\sum_{(i,j)\in E} (x_i-x_j)^2 \geq \frac{1}{n^2}
\end{displaymath}
Taking the $\min$ over $x$, this implies, using the variational expression for
$\lambda_2$ (second largest eigenvalue) established at the beginning of the
question: $\lambda_2\leq 1-\frac{1}{dn^2}$.
\paragraph{5.} We consider the graph $G^2$. As seen in \textbf{3.}, $G^2$ is
connected since $G$ is connected and nonbipartite. Hence, we can apply question
\textbf{4.} to $G^2$: $\lambda_2 (M^2)\leq 1-\frac{1}{d^2n^2}$ (the degree of
$G^2$ is $d^2$). But the eigenvalues of $M^2$ are the squares of the
eigenvalues of $M$. So all the eigenvalues of $M$ other than 1 have absolute
value at most:
\begin{displaymath}
\sqrt{1-\frac{1}{d^2n^2}}\leq 1 - \frac{1}{2d^2n^2}
\end{displaymath}
where we used $\sqrt{1-x}\leq 1-\frac{x}{2}$ which concludes the question by
definition of $\lambda(G)$.
\paragraph{6.} The analysis we did in \textbf{4.} used that a path from
$i^*$ to $k$ had length $l\leq n$. If we know the diameter $D$, we have $l\leq
D$. The same analysis gives $\lambda_2\leq 1- \frac{1}{dDn}$.
Finally, \textbf{5.} implies that $\gamma(G) \geq \frac{1}{2d^2n^2}$.
\section*{Problem 3.1}
Let us consider a problem $(Y, N)$ in $\cl{prBPP}$ where $Y$ (resp. $N$) is
the set of “yes” (resp. “no”) instances. There is a polynomial time algorithm
$A$ such that for all $x$, $\prob{ A(x) \text{ correct}}\geq 1-\frac{1}{2^n}$.
The algorithm uses $m = poly(n)$ coin tosses. For input $x$, let us denote by
$A_x$ the set of coin tosses $r$ such that $A(x; r)$ says “yes”.
Using exactly the same analysis as in the proof of Theorem 3.14, we can prove:
\begin{itemize}
\item $x\in Y \implies \P_s\big[\forall r\in \{0,1\}^m,\; P(x,s,r) = 1\big]
> 1 - \frac{2^m}{2^{nm}}$ where $s$ is a $m$-tuple of random bits.
\item $x\in N \implies \forall s\in\{0,1\}^m,\; \P_r\big[P(x,s,r)
= 1\big]<\frac{m}{2^{n}}$ were $r$ is a $m$-tuple of random bits.
\end{itemize}
for $P(x,s,r) = \bigvee_{i=1}^m \big(A(x;r\oplus s_i) = 1\big)$. Clearly $P$
can be computed in polynomial time.
If we define:
\begin{itemize}
\item $Y' = \big\{(x,s):\, \forall r\in\{0,1\}^m,\; P(x,s,r) = 1\big\}$
\item $N' = \big\{(x,s):\, \P_r[ P(x,s,r) = 1]<\frac{m}{2^n}\big\}$
\end{itemize}
then $(Y', N')$ is clearly in $\cl{prco-RP}$ (for input $(x,s)$ take $r$ at
random and output $P(x,s,r)$). Since we are assuming $\cl{prRP} = \cl{prP}$,
then $\cl{prco-RP} = \cl{prco-P} = \cl{prP}$. Hence there exists $Q$,
a polynomial time predicate such that:
\begin{itemize}
\item $x\in Y \implies \P_s\big[Q(x,s) = 1\big]
> 1- \frac{2^m}{2^{nm}}$ where $s$ is a $m$-tuple of random bits.
\item $x\in N \implies \forall s\in\{0,1\}^m,\; Q(x,s) = 0$.
\end{itemize}
But now we see that:
\begin{itemize}
\item $Y'' = \{x:\, \P_s[Q(x,s) = 1]>1-\frac{2^m}{2^{nm}}\}$
\item $N'' = \{x:\, \forall s,\; Q(x,s) = 0\}$
\end{itemize}
is in $\cl{prRP} = \cl{prP}$. Hence there is a polynomial time predicate $R$
such that:
\begin{itemize}
\item $x\in Y\implies R(x) = 1$
\item $x\in N\implies R(x) = 0$
\end{itemize}
which proves that $(Y,N)$ is in $\cl{prP}$. This implies that $\cl{prBPP}
= \cl{prP}$ and thus that $\cl{BPP} = \cl{P}$.
\section*{Problem 3.2}
\paragraph{1.} Let us assume that $S_1,\ldots,S_{i-1}$ have already been
constructed and satisfy the $(l,a)$ design requirements. Then, for $S_i$ chosen
uniformly at random in $[d]$, we have:
\begin{displaymath}
\E_{S_i}\big[\#\{j<i : |S_i\cap S_j|\geq a\}\big]
= \sum_{j=1}^{i-1}\P_{S_i}[|S_i\cap S_j|\geq a]
\end{displaymath}
For a fixed $j$, we see that:
\begin{displaymath}
\P_{S_i}[|S_i\cap S_j|\geq a]\leq
\frac{{l \choose a}{d-a\choose l-a}}{{d\choose l}}
=\frac{{l\choose a}^2}{{d\choose a}}
\end{displaymath}
the inequality is simple combinatorics: for $S_i$ to overlap with $S_j$ by more
than $a$ elements, we first pick a subset of $a$ elements in $S_j$, and then
$l-a$ elements among the remaining elements. The equality uses the definition
of the binomial coefficient and basic algebra.
Summing over $m$ and using the assumed bound on $m$ we directly obtain:
\begin{displaymath}
\E_{S_i}\big[\#\{j<i : |S_i\cap S_j|\geq a\}\big] < 1
\end{displaymath}
\paragraph{2.} We can rewrite the result obtained in \textbf{1.} as, for all
$S_1,\ldots, S_{i-1}$:
\begin{equation}
\label{eq:foo}
\exists S_i\in[d],\; \forall j<i,\; |S_i\cap S_j|< a
\end{equation}
otherwise we would have:
\begin{displaymath}
\forall S_i\in[d],\; \#\{j<i : |S_i\cap S_j|\geq a\}\geq 1
\end{displaymath}
and integrating over uniformly random subset $S_i$ would contradict
\textbf{1.}.
Equation~\eqref{eq:foo} shows that we will be able to construct an $(l,a)$
design: for $i\in\{1,\ldots,m\}$, we are always guaranteed that there exists
a set $S_i$ of size $l$ which does not overlap by more than $a$ with any of the
previously selected sets. We now need to control the sizes of the different
parameters.
For fixed $(l,m)$ and $a = \gamma\log m$ for some $\gamma > 0$, we need:
\begin{displaymath}
m\leq {d\choose a}/{l \choose a}^2
\end{displaymath}
we use the standard inequalities:
\begin{displaymath}
\left(\frac{n}{k}\right)^k\leq
{n\choose k} \leq \left(\frac{ne}{k}\right)^k
\end{displaymath}
to prove that the condition will be satisfied if:
\begin{displaymath}
\left(\frac{de}{a}\right)^a\geq m\left(\frac{l}{a}\right)^{2a}
\end{displaymath}
This is equivalent to:
\begin{displaymath}
d\geq \frac{1}{e}m^{1/a} \frac{l^2}{a}
= \frac{2^{1/\gamma}}{e}\frac{l^2}{a} = O\left(\frac{l^2}{a}\right)
\end{displaymath}
where the first equality used that $a =\gamma \log m$.
\paragraph{3.} We are going to construct the sets $S_1,\dots, S_{m}$ one by
one, and for each $i$, we are going to construct $S_i$ element by element using
the Method of Conditional Expectation. If all the parameters are as in
\textbf{2.}, we know from \textbf{1.} that if $S_1,\dots,S_{i-1}$ have already
been constructed, then:
\begin{displaymath}
\E_{S_i}\left[\#\{j<i : |S_i\cap S_j|\geq a\}\right]<1
\end{displaymath}
For concision, let use write $\phi(S_i) = \#\{j<i : |S_i\cap S_j|\geq a\}$
where $S_i$ is a uniformly random subset of $[d]$ of size $l$ and
denote by $S_i^k$ the set constructed after $k$ steps, then we have:
\begin{displaymath}
\E_{S_i}\left[\phi(S_i)\,|\, S_i^k\subseteq S_i\right] =
\E_{e\in [d]\setminus S_i^k}\left[\E_{S_i}\left[\phi(S_i)\,|\,
S_i^k\cup\{e\}\subseteq S_i\right]\right]
\end{displaymath}
where $e$ is chosen uniformly at random in $[d]\setminus S_i^k$. Hence, there
exists $e\in[d]\setminus S_i^k$ such that if we define $S_i^{k+1}=
S_i^k\cup\{e\}$:
\begin{displaymath}
\E_{S_i}\left[\phi(S_i)\,|\, S_i^k\subseteq S_i\right] \geq
\E_{S_i}\left[\phi(S_i)\,|\, S_i^{k+1}\subseteq S_i\right]
\end{displaymath}
one $e$ which satisfies this property is for example:
\begin{equation}
\label{eq:foo2}
e^*\in\argmin_{e\in [d]\setminus S_i^k}\E_{S_i}\left[\phi(S_i)\,|\,
S_i^k\cup\{e\}\subseteq S_i\right]
\end{equation}
Hence for $S_i^k$ constructed by repeatedly adding the element $e^*$ as
defined in \eqref{eq:foo2}, we are guaranteed:
\begin{displaymath}
\forall k,\;\E_{S_i}\left[\phi(S_i)\,|\, S_i^k\subseteq S_i\right] \leq
\E_{S_i}\left[\phi(S_i)\right]< 1
\end{displaymath}
And in particular for $k=l$, $S_i$ is entirely determined and we have
$\phi(S_i) = 0$ (since it is an integer). That is, $S_i$ is a valid set to add
to our design.
The only thing left to show is that computing the design can be done in
polynomial time. Denoting by $T$ the running time of computing the
objective function in \eqref{eq:foo2}, constructing the design will take time
$O(mldT) = O(T\poly(m,d))$, since $d = O(l^2/a)$ and $a = \gamma\log m$.
The objective function in \eqref{eq:foo2} can be
rewritten:
\begin{displaymath}
f_k(e) \eqdef \sum_{j=1}^{i-1}\P_S\left[|(S\cup S_i^k\cup\{e\})\cap
S_j|\geq a\right]
\end{displaymath}
where $S$ is a uniformly random subset of $[d]\setminus S_i^k\cup\{e\}$ of size
$l - k-1$.
But as in \textbf{1.}, we can compute $\P_S\left[|(S\cup S_i^k\cup\{e\})\cap
S_j|\geq a\right]$ using combinatorics. Writing $x_e = |S_i^k\cup\{e\}\cap
S_j|$:
\begin{displaymath}
\P_S\left[|(S\cup S_i^k\cup\{e\})\cap S_j|\geq a\right]
= \frac{{l-x_e\choose a - x_e}{d - k - 1 - a + x_e\choose l - a + x_e}}{{d - k -1\choose l-k-1}}
\end{displaymath}
Hence $f_k(e)$ which can be computed in time $T = poly(m, l, a, d) = poly(m,d)$
since $a = \gamma\log m$ and $d = O(l^2/a)$.
Note that for a practical implementation, using standard recursive identities
for binomial coefficients, the computation of the objective function can be
made quite efficient: the value of $f_k(e)$ can be computed in $O(1)$
arithmetic operations from $f_{k-1}(e)$.
\end{document}
|