summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--appendix.tex152
-rw-r--r--approximation.tex4
-rw-r--r--paper.tex2
-rw-r--r--proofs.tex0
4 files changed, 81 insertions, 77 deletions
diff --git a/appendix.tex b/appendix.tex
index b757e8e..ca6d1df 100644
--- a/appendix.tex
+++ b/appendix.tex
@@ -8,7 +8,7 @@ For all $\lambda\in[0,1]^{n},$
\end{lemma}
\begin{proof}
- The bound $F_{\mathcal{N}}(\lambda)\leq L_{\mathcal{N}(\lambda)}$ follows by the concavity of the $\log\det$ function.
+ The bound $F_{\mathcal{N}}(\lambda)\leq L_{\mathcal{N}}(\lambda)$ follows by the concavity of the $\log\det$ function and Jensen's inequality.
To show the lower bound,
we first prove that $\frac{1}{2}$ is a lower bound of the ratio $\partial_i
F(\lambda)/\partial_i L(\lambda)$, where
@@ -93,80 +93,84 @@ In particular,
\begin{gather*}
\forall S\subseteq\mathcal{N},\quad A(S)^{-1} \succeq A(S\cup\{i\})^{-1}
\end{gather*}
-as $A(S)\preceq A(S\cup\{i\})$. Observe that
- % \begin{gather*}
- % \forall
-$P_{\mathcal{N}\setminus\{i\}}^\lambda(S)\geq P_{\mathcal{N}\setminus\{i\}}^\lambda(S\cup\{i\})$ for all $S\subseteq\mathcal{N}\setminus\{i\}$, and
- % ,\\
- $P_{\mathcal{N}\setminus\{i\}}^\lambda(S) \geq P_\mathcal{N}^\lambda(S),$ for all $S\subseteq\mathcal{N}$.
- %\end{gather*}
- Hence,
- \begin{align*}
- \partial_i F(\lambda)
- % & = \sum_{\substack{S\subseteq\mathcal{N}\\ i\in\mathcal{N}\setminus S}} P_\mathcal{N}^\lambda(S) \log\Big(1 + \T{x_i}A(S)^{-1}x_i\Big)\\
- & \geq \frac{1}{4}
- \sum_{\substack{S\subseteq\mathcal{N}\\ i\in\mathcal{N}\setminus S}}
- P_{\mathcal{N}\setminus\{i\}}^\lambda(S)
- \log\Big(1 + \T{x_i}A(S)^{-1}x_i\Big)\\
- &\hspace{-3.5em}+\frac{1}{4}
- \sum_{\substack{S\subseteq\mathcal{N}\\ i\in\mathcal{N}\setminus S}}
- P_{\mathcal{N}\setminus\{i\}}^\lambda(S\cup\{i\})
- \log\Big(1 + \T{x_i}A(S\cup\{i\})^{-1}x_i\Big)\\
- &\geq \frac{1}{4}
- \sum_{S\subseteq\mathcal{N}}
- P_\mathcal{N}^\lambda(S)
- \log\Big(1 + \T{x_i}A(S)^{-1}x_i\Big).
- \end{align*}
- Using that $A(S)\succeq I_d$ we get that $\T{x_i}A(S)^{-1}x_i \leq
- \norm{x_i}_2^2 \leq 1$. Moreover, $\log(1+x)\geq x$ for all $x\leq 1$.
- Hence,
- \begin{displaymath}
- \partial_i F(\lambda) \geq
- \frac{1}{4}
- \T{x_i}\bigg(\sum_{S\subseteq\mathcal{N}}P_\mathcal{N}^\lambda(S)A(S)^{-1}\bigg)x_i.
- \end{displaymath}
- Finally, using that the inverse is a matrix convex function over symmetric
- positive definite matrices:
- \begin{displaymath}
- \partial_i F(\lambda) \geq
- \frac{1}{4}
- \T{x_i}\bigg(\sum_{S\subseteq\mathcal{N}}P_\mathcal{N}^\lambda(S)A(S)\bigg)^{-1}x_i
- = \frac{1}{4}\T{x_i}\tilde{A}(\lambda)^{-1}x_i
- = \frac{1}{2}
- \partial_i L(\lambda).
- \end{displaymath}
+as $A(S)\preceq A(S\cup\{i\})$. Observe that since $1\leq \lambda_i\leq 1$,
+$P_{\mathcal{N}\setminus\{i\}}^\lambda(S) \geq P_\mathcal{N}^\lambda(S)$ and
+$P_{\mathcal{N}\setminus\{i\}}^\lambda(S)\geq P_{\mathcal{N}}^\lambda(S\cup\{i\})$
+for all $S\subseteq\mathcal{N}\setminus\{i\}$. Hence,
+\begin{align*}
+ \partial_i F(\lambda)
+ & \geq \frac{1}{4}
+ \sum_{\substack{S\subseteq\mathcal{N}\\ i\in\mathcal{N}\setminus S}}
+ P_{\mathcal{N}}^\lambda(S)
+ \log\Big(1 + \T{x_i}A(S)^{-1}x_i\Big)\\
+ &\hspace{-3.5em}+\frac{1}{4}
+ \sum_{\substack{S\subseteq\mathcal{N}\\ i\in\mathcal{N}\setminus S}}
+ P_{\mathcal{N}}^\lambda(S\cup\{i\})
+ \log\Big(1 + \T{x_i}A(S\cup\{i\})^{-1}x_i\Big)\\
+ &\geq \frac{1}{4}
+ \sum_{S\subseteq\mathcal{N}}
+ P_\mathcal{N}^\lambda(S)
+ \log\Big(1 + \T{x_i}A(S)^{-1}x_i\Big).
+\end{align*}
+Using that $A(S)\succeq I_d$ we get that $\T{x_i}A(S)^{-1}x_i \leq
+\norm{x_i}_2^2 \leq 1$. Moreover, $\log(1+x)\geq x$ for all $x\leq 1$.
+Hence,
+\begin{displaymath}
+ \partial_i F(\lambda) \geq
+ \frac{1}{4}
+ \T{x_i}\bigg(\sum_{S\subseteq\mathcal{N}}P_\mathcal{N}^\lambda(S)A(S)^{-1}\bigg)x_i.
+\end{displaymath}
+Finally, using that the inverse is a matrix convex function over symmetric
+positive definite matrices:
+\begin{displaymath}
+ \partial_i F(\lambda) \geq
+ \frac{1}{4}
+ \T{x_i}\bigg(\sum_{S\subseteq\mathcal{N}}P_\mathcal{N}^\lambda(S)A(S)\bigg)^{-1}x_i
+ = \frac{1}{4}\T{x_i}\tilde{A}(\lambda)^{-1}x_i
+ = \frac{1}{2}
+ \partial_i L(\lambda).
+\end{displaymath}
-Having bound the ratio between the partial derivatives, we now bound the ratio $F(\lambda)/L(\lambda)$ from below. Consider the following cases.
- First, if the minimum of the ratio
- $F(\lambda)/L(\lambda)$ is attained at a point interior to the hypercube, then it is
- a critical point, \emph{i.e.}, $\partial_i \big(F(\lambda)/L(\lambda)\big)=0$ for all $i\in \mathcal{N}$; hence, at such a critical point:
- \begin{equation}\label{eq:lhopital}
- \frac{F(\lambda)}{L(\lambda)}
- = \frac{\partial_i F(\lambda)}{\partial_i
- L(\lambda)} \geq \frac{1}{2}.
- \end{equation}
- Second, if the minimum is attained as
- $\lambda$ converges to zero in, \emph{e.g.}, the $l_2$ norm, by the Taylor approximation, one can write:
- \begin{displaymath}
- \frac{F(\lambda)}{L(\lambda)}
- \sim_{\lambda\rightarrow 0}
- \frac{\sum_{i\in \mathcal{N}}\lambda_i\partial_i F(0)}
- {\sum_{i\in\mathcal{N}}\lambda_i\partial_i L(0)}
- \geq \frac{1}{2},
- \end{displaymath}
- \emph{i.e.}, the ratio $\frac{F(\lambda)}{L(\lambda)}$ is necessarily bounded from below by 1/2 for small enough $\lambda$.
- Finally, if the minimum is attained on a face of the hypercube $[0,1]^n$ (a face is
- defined as a subset of the hypercube where one of the variable is fixed to
- 0 or 1), without loss of generality, we can assume that the minimum is
- attained on the face where the $n$-th variable has been fixed
- to 0 or 1. Then, either the minimum is attained at a point interior to the
- face or on a boundary of the face. In the first sub-case, relation
- \eqref{eq:lhopital} still characterizes the minimum for $i< n$.
- In the second sub-case, by repeating the argument again by induction, we see
- that all is left to do is to show that the bound holds for the vertices of
- the cube (the faces of dimension 1). The vertices are exactly the binary
- points, for which we know that both relaxations are equal to the value
- function $V$. Hence, the ratio is equal to 1 on the vertices.
+Having bound the ratio between the partial derivatives, we now bound the ratio
+$F(\lambda)/L(\lambda)$ from below. Consider the following cases.
+
+First, if the minimum is attained as $\lambda$ converges to zero in,
+\emph{e.g.}, the $l_2$ norm, by the Taylor approximation, one can write:
+\begin{displaymath}
+ \frac{F(\lambda)}{L(\lambda)}
+ \sim_{\lambda\rightarrow 0}
+ \frac{\sum_{i\in \mathcal{N}}\lambda_i\partial_i F(0)}
+ {\sum_{i\in\mathcal{N}}\lambda_i\partial_i L(0)}
+ \geq \frac{1}{2},
+\end{displaymath}
+\emph{i.e.}, the ratio $\frac{F(\lambda)}{L(\lambda)}$ is necessarily bounded
+from below by 1/2 for small enough $\lambda$.
+
+Second, if the minimum of the ratio $F(\lambda)/L(\lambda)$ is attained at
+a vertex of the hypercube $[0,1]^n$ different from 0. $F$ and $L$ being
+relaxations of the value function $V$, they are equal to $V$ on the vertices
+which are exactly the binary points. Hence the minimum is equal to 1 in this
+case. In particular it is greater than $1/2$.
+
+Finally, if the minimum is attained at a point $\lambda^*$ with at least one
+coordinate belonging to $(0,1)$, let $i$ be one such coordinate and consider
+the function $G_i$:
+\begin{displaymath}
+ G_i: x \mapsto \frac{F}{L}(\lambda_1^*,\ldots,\lambda_{i-1}^*, x,
+ \lambda_{i+1}^*, \ldots, \lambda_n^*).
+\end{displaymath}
+Then this function attains a minimum at $\lambda^*_i\in(0,1)$ and its
+derivative is zero at this point. Hence:
+\begin{displaymath}
+ 0 = G_i'(\lambda^*_i) = \partial_i\left(\frac{F}{L}\right)(\lambda^*)
+\end{displaymath}.
+But $\partial_i(F/L)(\lambda^*)=0$ implies that
+\begin{displaymath}
+ \frac{F(\lambda^*)}{L(\lambda^*)} = \frac{\partial_i
+ F(\lambda^*)}{\partial_i L(\lambda^*)}\geq \frac{1}{2}
+\end{displaymath}
+using the lower bound on the ratio of the partial derivatives. This concludes
+the proof of the lemma.
\end{proof}
We now prove that $F$ admits the following exchange property: let
diff --git a/approximation.tex b/approximation.tex
index 81a6d0c..23dc212 100644
--- a/approximation.tex
+++ b/approximation.tex
@@ -23,7 +23,7 @@ problem with a constant approximation ratio to \EDP{}
(Proposition~\ref{prop:relaxation}) and then showing how to approximately solve
this problem in a monotone way.
-\subsection{A concave relaxation of \EDP}\label{sec:concave}
+\subsection{A Concave Relaxation of \EDP}\label{sec:concave}
A classical way of relaxing combinatorial optimization problems is
\emph{relaxing by expectation}, using the so-called \emph{multi-linear}
@@ -87,7 +87,7 @@ proposition which is also proved in the Appendix.
$L^*_c \leq 2 OPT + 2\max_{i\in\mathcal{N}}V(i)$.
\end{proposition}
-\subsection{Solving a convex problem monotonously}\label{sec:monotonicity}
+\subsection{Solving a Convex Problem Monotonously}\label{sec:monotonicity}
Note, that the feasible set in Problem~\eqref{eq:primal} increases (for the
inclusion) when the cost decreases. As a consequence, $c\mapsto L^*_c$ is
diff --git a/paper.tex b/paper.tex
index 7b821b6..1850eb9 100644
--- a/paper.tex
+++ b/paper.tex
@@ -31,7 +31,7 @@
\section{Preliminaries}\label{sec:peel}
\input{problem}
-\section{Approximation results}\label{sec:approximation}
+\section{Approximation Results}\label{sec:approximation}
\input{approximation}
\section{Mechanism for \SEDP{}}\label{sec:mechanism}
\input{main}
diff --git a/proofs.tex b/proofs.tex
deleted file mode 100644
index e69de29..0000000
--- a/proofs.tex
+++ /dev/null