1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
|
\documentclass[10pt]{article}
\usepackage{fullpage}
\usepackage[utf8x]{inputenc}
\usepackage{amsmath,amsfonts,amsthm}
\usepackage[english]{babel}
\usepackage[capitalize, noabbrev]{cleveref}
\usepackage{algorithm}
\usepackage{algpseudocode}
\newenvironment{enumerate*}%
{\vspace{-2ex} \begin{enumerate} %
\setlength{\itemsep}{-1ex} \setlength{\parsep}{0pt}}%
{\end{enumerate}}
\newenvironment{itemize*}%
{\vspace{-2ex} \begin{itemize} %
\setlength{\itemsep}{-1ex} \setlength{\parsep}{0pt}}%
{\end{itemize}}
\newenvironment{description*}%
{\vspace{-2ex} \begin{description} %
\setlength{\itemsep}{-1ex} \setlength{\parsep}{0pt}}%
{\end{description}}
\DeclareMathOperator{\E}{\mathbb{E}}
\let\P\relax
\DeclareMathOperator{\P}{\mathbb{P}}
\newcommand{\ex}[1]{\E\left[#1\right]}
\newcommand{\prob}[1]{\P\left[#1\right]}
\newcommand{\inprod}[1]{\left\langle #1 \right\rangle}
\newcommand{\R}{\mathbf{R}}
\newcommand{\N}{\mathbf{N}}
\newcommand{\C}{\mathcal{C}}
\newcommand{\eps}{\varepsilon}
\newcommand{\cl}[1]{\text{\textbf{#1}}}
\newcommand{\eqdef}{\mathbin{\stackrel{\rm def}{=}}}
\newcommand{\llbracket}{[\![}
\newtheorem{theorem}{Theorem}
\newtheorem{lemma}{Lemma}
\algrenewcommand\algorithmicensure{\textbf{Output:}}
\algrenewcommand\algorithmicrequire{\textbf{Input:}}
\author{Thibaut Horel}
\title{CS 225 Problem Set 2 -- Solutions}
\begin{document}
\maketitle
Collaborator: Debmalya Mandal
\section*{Problem 2.9}
\paragraph{1.}
Let $\lambda$ be an eigenvalue of $M$ and $x$ be an associated eigenvector. Let
$i^*\in\{1,\ldots, n\}$ such that $|x_{i^*}| = \|x\|_{\infty}$. Since $x$ is an
eigenvector we have:
\begin{displaymath}
\sum_{j=1}^n m_{i^*,j} x_j = \lambda x_{i^*}
\end{displaymath}
$x$ is non-zero, hence $x_{i^*}$ is non-zero. Dividing both sides by $x_{i^*}$,
we obtain:
\begin{displaymath}
|\lambda| =
\left|\sum_{j=1}^n m_{i^*,j} \frac{x_j}{x_{i^*}}\right|
\leq \sum_{j=1}^n m_{i^*,j} \left|\frac{x_j}{x_{i^*}}\right|
\leq \sum_{j=1}^n m_{i^*, j} = 1
\end{displaymath}
where the first inequality used the triangle inequality and the second
inequality used the definition of $x_{i^*}$.
\paragraph{2.} Assume $G$ is disconnected. Hence it has at least two connected
components. We can write $V = C_1\cup C_2$ with $C_1\cap C_2 = \emptyset$ and
$ C_1\times C_2\cap E = \emptyset$. Then it is clear that $x^1
= \mathbf{1}_{C_1}$ (the indicator vector of $C_1$) and $x^2
= \mathbf{1}_{C_2}$ are eigenvectors of $M$ for the eigenvalue 1. For example
for $x^1$:
\begin{displaymath}
\sum_{j=1}^n m_{i,j} x^1_j =
\begin{cases}
1&\text{if $i\in C_1$}\\
0&\text{otherwise}
\end{cases}
\end{displaymath}
where the first case follows from the fact that if $i\in C_1$, all the non-zero
terms $m_{i,j}$ will be preserved by the multiplication by $x^1_j$ since
$m_{i,j}\neq 0\Leftrightarrow j\in C_1$ and the fact that $\sum_{j=1}^n m_{i,j}
= 1$. Similarly if $i\in C_2$ $m_{i,j} = 0$ whenever $j\in C_1$.
Furthermore $x^1$ and $x^2$ are clearly orthogonal, so they span a vector space
of dimension 2. Hence the multiplicity of $1$ as an eigenvalue is at least 2.
\paragraph{} Let us now assume that $1$ has multiplicity at least $2$. We can
find an eigenvector $x$ for $1$ orthogonal to $\lambda_1$, that is: $\sum_i x_i
= 0$. Let us define $C_1 = \{i\in V: x_i > 0\}$ and $C_2 = \{i\in V: x_j\leq 0\}$.
Because $x\neq 0$ and $\sum_i x_i = 0$, both $C_1$ and $C_2$ are non-empty.
They are clearly disjoint and their union is $V$. We show that $C_1$ and $C_2$
are disconnected. Let $i^*$ such that $|x_{i^*}| = \|x\|_{\infty}$. Without
loss of generality we can assume $x_{i^*} > 0$ (otherwise we consider $x'
= -x$).
\begin{displaymath}
\sum_{j=1}^n m_{i^*,j}x_j = x_{i^*}
\end{displaymath}
Dividing by $x_{i^*}$ both sides (it is non-zero for the same reason as in
\textbf{1.}):
\begin{displaymath}
1 = \left|\sum_{j=1}^n m_{i^*,j}\frac{x_j}{x_{i^*}}\right|
\leq \sum_{j=1}^n m_{i^*,j}\left|\frac{x_j}{x_{i^*}}\right|
\leq \sum_{j=1}^n m_{i^*,j} = 1
\end{displaymath}
where we again used the triangle inequality, the definition of $x_{i^*}$ and
the fact that the sum of the entries of the rows of $M$ is one. Hence, the
chain of inequalities is a chain of equalities. This implies that for all $j$
such that $m_{i^*,j} \neq 0$, $x_j=x_{i^*}$. But we can then repeatedly apply the
same argument to all the neighbors of $i^*$, the neighbors of their neighbors,
etc. By induction (over the distance to $i^*$), we see that for all vertices
$j$ in the same connected component as $i^*$, $x_j = x_{i^*}>0$. This implies
that $C_2$ is disconnected from $C_1$ (none of the vertices in $C_2$ are in the
same connected component as $x_{i^*}$).
\paragraph{3.} Assuming that $G$ is connected, it is easy to see that $G$
bipartite is equivalent to $G^2$ being disconnected. Hence assuming $G$
connected and using \textbf{2.}:
$G$ bipartite $\Leftrightarrow$ $G^2$ disconnected $\Leftrightarrow$ $1$ is an eigenvalue of $M^2$
of multiplicity at least 2 $\Leftrightarrow$ $\exists x\perp \mathbf{1}, x\neq
0,\; M^2x
= x$ $\Leftrightarrow$ $\exists x\perp\mathbf{1}, x\neq 0,\; Mx
= \pm x$ $\Leftrightarrow$ $\exists x\perp\mathbf{1}, x\neq 0,\; Mx = -x$ $\Leftrightarrow$ $-1$ is an eigenvalue of
$M$.
The antepenultimate equivalence uses that the eigenvalues of $M^2$ are the
squares of the eigenvalues of $M$, and the penultimate equivalence uses that
$x$ cannot be an eigenvector for the eigenvalue $1$ because of \textbf{2}. The
second equivalence also uses \textbf{2.}.
\paragraph{4.} First observe that $\max_{\|x\|_2 = 1} \inprod{Mx, x}$
= $\lambda_{max}(M)$ for any symmetric matrix $M$. Indeed, using the spectral
theorem and writing $M = P^T DP$ with $D$ = $\text{diag}(\lambda_1,\dots,\lambda_n)$
and observing that $\|x\|_2 =1\Leftrightarrow \|Px\|_2 = 1$ we have:
\begin{displaymath}
\max_{\|x\|_2 = 1} \inprod{Mx, x} = \max_{\|y\|_2=1} y^TDy = \max_{\|y\|_2
= 1} \sum_{i=1}^n\lambda_i y_i^2
\end{displaymath}
the right-hand side maximum is clearly obtained when $y_i = 1$ for some
coordinate $i$ such that $\lambda_i = \lambda_{max}(M)$ and $y_j = 0$ for
$j\neq i$. For this $y$ the value is $\lambda_{max}(M)$.
Now observe that $\max_x\inprod{Mx, x}$ when $x$ is taken in the vector space
orthogonal to $\mathbf{1}$ is exactly the largest eigenvalue of $M$ when
restricted to the vector space orthogonal to $\mathbf{1}$ (restricting $M$ to
this subspace is allowed since this subspace is invariant by $M$). But since
the eigenspaces of $M$ are orthogonal, the largest eigenvalue of $M$ restricted
to the orthogonal of $\mathbf{1}$ is the second largest eigenvalue of $M$.
Hence:
\begin{displaymath}
\lambda_2(M) = \max_{x} \inprod{Mx, x}
\end{displaymath}
where the maximum is taken over $x$ of length 1 in the orthogonal space of $\mathbf{1}$.
\paragraph{} Let us now show the second identity given in the problem
statement:
\begin{displaymath}
\sum_{(i,j)\in E} (x_i-x_j)^2 = \sum_{(i,j)\in E} x_i^2 +
\sum_{(i,j)\in E} x_j^2 - 2\sum_{(i,j)\in E} x_ix_j = \frac{d}{2}\|x\|_2^2
+ \frac{d}{2}\|x\|_2^2 - d\sum_{i,j} m_{i,j}x_i x_j
\end{displaymath}
where the second equality used that for all $i$ (resp. $j$) there are
$\frac{d}{2}$ edges leaving (entering) and that $m_{i,j} = \frac{2}{d}a_{i,j}$
where $a_{i,j}$ is the $(i,j)$ entry of the adjacency matrix of $G$. For
$\|x\|_2 = 1$:
\begin{displaymath}
1- \frac{1}{d}\sum_{(i,j)\in E} (x_i-x_j)^2
= \sum_{i,j}m_{i,j}x_i x_j = \inprod{Mx, x}
\end{displaymath}
taking the $\max$ both sides gives the second identity.
Let us now consider $x$ such that $\|x\|_2 = 1$ and $\sum_{i} x_i
= 0$ and consider $i^*$ such that $|x_{i^*}| = \|x\|_{\infty}$. Because
$\|x\|_2 =1$, we have $|x_{i^*}| \geq \frac{1}{\sqrt{n}}$. Because, $\sum_i x_i
= 0$ there is $x_k$ such that $x_k$ has the opposite sign of $x_{i^*}$. Since $G$
is connected, there is a path of length $l$ at most $n$ from $i^*$ to $k$. Let us
index this path $i^* = i_1,\ldots, i_l = k$. Then:
\begin{displaymath}
\sum_{(i,j)\in E} (x_i-x_j)^2 \geq \sum_{s=1}^l (x_{i_{s+1}}
- x_{i_s})^2
\end{displaymath}
where we restricted the sum to the path from $i^*$ to $k$. Applying the
Cauchy-Schwartz inequality:
\begin{displaymath}
\sum_{(i,j)\in E} (x_i-x_j)^2 \geq \frac{1}{l} \left(\sum_{s=1}^l x_{i_{s+1}}
- x_{i_s}\right)^2= \frac{1}{l} (x_k - x_{i^*})^2\geq \frac{1}{l} x_{i^*}^2
\end{displaymath}
where the last inequality used that $x_{i^*}$ and $x_k$ have opposite signs.
Finally using $x_{i^*}^2 \geq \frac{1}{n}$ and $l\leq n$:
\begin{displaymath}
\sum_{(i,j)\in E} (x_i-x_j)^2 \geq \frac{1}{n^2}
\end{displaymath}
Taking the $\min$ over $x$, this implies, using the variational expression for
$\lambda_2$ (second largest eigenvalue) established at the beginning of the
question: $\lambda_2\leq 1-\frac{1}{dn^2}$.
\paragraph{5.} We consider the graph $G^2$. As seen in \textbf{3.}, $G^2$ is
connected since $G$ is connected and nonbipartite. Hence, we can apply question
\textbf{4.} to $G^2$: $\lambda_2 (M^2)\leq 1-\frac{1}{d^2n^2}$ (the degree of
$G^2$ is $d^2$). But the eigenvalues of $M^2$ are the squares of the
eigenvalues of $M$. So all the eigenvalues of $M$ other than 1 have absolute
value at most:
\begin{displaymath}
\sqrt{1-\frac{1}{d^2n^2}}\leq 1 - \frac{1}{2d^2n^2}
\end{displaymath}
where we used $\sqrt{1-x}\leq 1-\frac{x}{2}$ which concludes the question by
definition of $\lambda(G)$.
\paragraph{6.} The analysis we did in \textbf{4.} used that the length of
a path from $i^*$ to $k$ had length $l\leq n$. If we know the diameter $D$, we
have $l\leq D$. The same analysis gives $\lambda_2\leq 1- \frac{1}{dDn}$.
Finally, \textbf{5.} implies that $\gamma(G) \geq \frac{1}{2d^2n^2}$.
\section*{Problem 3.1}
\section*{Problem 3.2}
\end{document}
|