summaryrefslogtreecommitdiffstats
path: root/dual.tex
diff options
context:
space:
mode:
authorThibaut Horel <thibaut.horel@gmail.com>2013-06-07 12:38:45 +0200
committerThibaut Horel <thibaut.horel@gmail.com>2013-06-07 12:38:45 +0200
commit55473070decc94d4bf261cdd01d718305ea07073 (patch)
tree00a8405201e13cbca6ce1459b6c7159d0f7222e5 /dual.tex
parentaa5094388e98fc60fb5f4829c4294711ee8267aa (diff)
downloadrecommendation-55473070decc94d4bf261cdd01d718305ea07073.tar.gz
Fixing small typos (Stratis' suggestions)
Diffstat (limited to 'dual.tex')
-rw-r--r--dual.tex263
1 files changed, 263 insertions, 0 deletions
diff --git a/dual.tex b/dual.tex
new file mode 100644
index 0000000..a2d28c6
--- /dev/null
+++ b/dual.tex
@@ -0,0 +1,263 @@
+\documentclass{article}
+\usepackage[T1]{fontenc}
+\usepackage[utf8]{inputenc}
+\usepackage{amsmath, amsfonts, amsthm}
+\newtheorem{proposition}{Proposition}
+\input{definitions}
+
+\begin{document}
+Let $c$ be a cost vector in $[0,1]^n$, and $x_1,\ldots,x_n$, $n$ vectors in
+$\mathbf{R}^d$ such that for all $i\in\{1,\ldots,n\}$, $b\leq \T{x_i}{x_i}\leq
+1$ for some $b\in(0,1]$. Let us consider the following convex optimization
+problem:
+\begin{equation}\tag{$P_c$}\label{eq:primal}
+ \begin{split}
+ \mathrm{maximize}\quad & L(\lambda) \defeq\log\det\left(I_d + \sum_{i=1}^n
+ \lambda_i x_i x_i^T\right)\\
+ \textrm{subject to}\quad & c^T\lambda \leq 1 \;;\; 0\leq\lambda\leq \mathbf{1}
+\end{split}
+\end{equation}
+We denote by $L^*_c$ its optimal value.
+
+Let $\alpha\in\mathbf{R}^+$, consider the perturbed optimization problem:
+\begin{equation}\tag{$P_{c, \alpha}$}\label{eq:perturbed-primal}
+ \begin{split}
+ \mathrm{maximize}\quad & L(\lambda) \defeq\log\det\left(I_d + \sum_{i=1}^n
+ \lambda_i x_i x_i^T\right)\\
+ \textrm{subject to}\quad & c^T\lambda \leq 1 \;;\; \alpha\leq\lambda\leq \mathbf{1}
+\end{split}
+\end{equation}
+and denote by $L^*_c(\alpha)$ its optimal value. Note that we have $L^*_c = L^*_c(0)$.
+
+We will assume that $\alpha<\frac{1}{n}$ so that \eqref{eq:perturbed-primal} has at
+least one feasible point: $(\frac{1}{n},\ldots,\frac{1}{n})$.
+
+Let us introduce the lagrangian of problem, \eqref{eq:perturbed-primal}:
+\begin{displaymath}
+ \mathcal{L}_{c, \alpha}(\lambda, \mu, \nu, \xi) \defeq L(\lambda)
+ + \T{\mu}(\lambda-\alpha\mathbf{1}) + \T{\nu}(\mathbf{1}-\lambda) + \xi(1-\T{c}\lambda)
+\end{displaymath}
+so that:
+\begin{displaymath}
+ L^*_c(\alpha) = \min_{\mu, \nu, \xi\geq 0}\max_\lambda \mathcal{L}_{c, \alpha}(\lambda, \mu, \nu, \xi)
+\end{displaymath}
+Similarly, we define $\mathcal{L}_{c}\defeq\mathcal{L}_{c, 0}$ the lagrangian of \eqref{eq:primal}.
+
+Let $\lambda^*$ be primal optimal for \eqref{eq:perturbed-primal}, and $(\mu^*,
+\nu^*, \xi^*)$ be dual optimal for the same problem. In addition to primal and
+dual feasibility, the KKT conditions give $\forall i\in\{1, \ldots, n\}$:
+\begin{gather*}
+ \partial_i L(\lambda^*) + \mu_i^* - \nu_i^* - \xi^* c_i = 0\\
+ \mu_i^*(\lambda_i^* - \alpha) = 0\\
+ \nu_i^*(1 - \lambda_i^*) = 0
+\end{gather*}
+
+\begin{lemma}\label{lemma:derivative-bounds}
+ Let $\partial_i L(\lambda)$ denote the $i$-th derivative of $L$, for $i\in\{1,\ldots, n\}$, then:
+ \begin{displaymath}
+ \forall\lambda\in[0, 1]^n,\;\frac{b}{2^n} \leq \partial_i L(\lambda) \leq 1
+ \end{displaymath}
+\end{lemma}
+
+\begin{proof}
+ Let us define:
+ \begin{displaymath}
+ S(\lambda)\defeq I_d + \sum_{i=1}^n \lambda_i x_i\T{x_i}
+ \quad\mathrm{and}\quad
+ S_k \defeq I_d + \sum_{i=1}^k x_i\T{x_i}
+ \end{displaymath}
+
+ We have $\partial_i L(\lambda) = \T{x_i}S(\lambda)^{-1}x_i$. Since
+ $S(\lambda)\geq I_d$, $\partial_i L(\lambda)\leq \T{x_i}x_i \leq 1$, which
+ is the right-hand side of the lemma.
+
+ For the left-hand side, note that $S(\lambda) \leq S_n$. Hence
+ $\partial_iL(\lambda)\geq \T{x_i}S_n^{-1}x_i$.
+
+ Using the Sherman-Morrison formula, for all $k\geq 1$:
+ \begin{displaymath}
+ \T{x_i}S_k^{-1} x_i = \T{x_i}S_{k-1}^{-1}x_i
+ - \frac{(\T{x_i}S_{k-1}^{-1}x_k)^2}{1+\T{x_k}S_{k-1}^{-1}x_k}
+ \end{displaymath}
+
+ By the Cauchy-Schwarz inequality:
+ \begin{displaymath}
+ (\T{x_i}S_{k-1}^{-1}x_k)^2 \leq \T{x_i}S_{k-1}^{-1}x_i\;\T{x_k}S_{k-1}^{-1}x_k
+ \end{displaymath}
+
+ Hence:
+ \begin{displaymath}
+ \T{x_i}S_k^{-1} x_i \geq \T{x_i}S_{k-1}^{-1}x_i
+ - \T{x_i}S_{k-1}^{-1}x_i\frac{\T{x_k}S_{k-1}^{-1}x_k}{1+\T{x_k}S_{k-1}^{-1}x_k}
+ \end{displaymath}
+
+ But $\T{x_k}S_{k-1}^{-1}x_k\leq 1$ and $\frac{a}{1+a}\leq \frac{1}{2}$ if
+ $0\leq a\leq 1$, so:
+ \begin{displaymath}
+ \T{x_i}S_{k}^{-1}x_i \geq \T{x_i}S_{k-1}^{-1}x_i
+ - \frac{1}{2}\T{x_i}S_{k-1}^{-1}x_i\geq \frac{\T{x_i}S_{k-1}^{-1}x_i}{2}
+ \end{displaymath}
+
+ By induction:
+ \begin{displaymath}
+ \T{x_i}S_n^{-1} x_i \geq \frac{\T{x_i}x_i}{2^n}
+ \end{displaymath}
+
+ Using that $\T{x_i}{x_i}\geq b$ concludes the proof of the left-hand side
+ of the lemma's inequality.
+\end{proof}
+
+\begin{lemma}\label{lemma:proximity}
+We have:
+\begin{displaymath}
+ L^*_c - \alpha n^2\leq L^*_c(\alpha) \leq L^*_c
+\end{displaymath}
+In particular, $|L^*_c - L^*_c(\alpha)| \leq \alpha n^2$.
+\end{lemma}
+
+\begin{proof}
+ $\alpha\mapsto L^*_c(\alpha)$ is a decreasing function as it is the
+ maximum value of the $L$ function over a set-decreasing domain, which gives
+ the rightmost inequality.
+
+ Let $\mu^*, \nu^*, \xi^*$ be dual optimal for $(P_{c, \alpha})$, that is:
+ \begin{displaymath}
+ L^*_{c}(\alpha) = \max_\lambda \mathcal{L}_{c, \alpha}(\lambda, \mu^*, \nu^*, \xi^*)
+ \end{displaymath}
+
+ Note that $\mathcal{L}_{c, \alpha}(\lambda, \mu^*, \nu^*, \xi^*)
+ = \mathcal{L}_{c}(\lambda, \mu^*, \nu^*, \xi^*)
+ - \alpha\T{\mathbf{1}}\mu^*$, and that for any $\lambda$ feasible for
+ problem \eqref{eq:primal}, $\mathcal{L}_{c}(\lambda, \mu^*, \nu^*, \xi^*)
+ \geq L(\lambda)$. Hence,
+ \begin{displaymath}
+ L^*_{c}(\alpha) \geq L(\lambda) - \alpha\T{\mathbf{1}}\mu^*
+ \end{displaymath}
+ for any $\lambda$ feasible for \eqref{eq:primal}. In particular, for $\lambda$ primal optimal for $\eqref{eq:primal}$:
+ \begin{equation}\label{eq:local-1}
+ L^*_{c}(\alpha) \geq L^*_c - \alpha\T{\mathbf{1}}\mu^*
+ \end{equation}
+
+ Let us denote by the $M$ the support of $\mu^*$, that is $M\defeq
+ \{i|\mu_i^* > 0\}$, and by $\lambda^*$ a primal optimal point for
+ $\eqref{eq:perturbed-primal}$. From the KKT conditions we see that:
+ \begin{displaymath}
+ M \subseteq \{i|\lambda_i^* = \alpha\}
+ \end{displaymath}
+
+
+ Let us first assume that $|M| = 0$, then $\T{\mathbf{1}}\mu^*=0$ and the lemma follows.
+
+ We will now assume that $|M|\geq 1$. In this case $\T{c}\lambda^*
+ = 1$, otherwise we could increase the coordinates of $\lambda^*$ in $M$,
+ which would increase the value of the objective function and contradict the
+ optimality of $\lambda^*$. Note also, that $|M|\leq n-1$, otherwise, since
+ $\alpha< \frac{1}{n}$, we would have $\T{c}\lambda^*\ < 1$, which again
+ contradicts the optimality of $\lambda^*$. Let us write:
+ \begin{displaymath}
+ 1 = \T{c}\lambda^* = \alpha\sum_{i\in M}c_i + \sum_{i\in \bar{M}}\lambda_i^*c_i
+ \leq \alpha |M| + (n-|M|)\max_{i\in \bar{M}} c_i
+ \end{displaymath}
+ That is:
+ \begin{equation}\label{local-2}
+ \max_{i\in\bar{M}} c_i \geq \frac{1 - |M|\alpha}{n-|M|}> \frac{1}{n}
+ \end{equation}
+ where the last inequality uses again that $\alpha<\frac{1}{n}$. From the
+ KKT conditions, we see that for $i\in M$, $\nu_i^* = 0$ and:
+ \begin{equation}\label{local-3}
+ \mu_i^* = \xi^*c_i - \partial_i L(\lambda^*)\leq \xi^*c_i\leq \xi^*
+ \end{equation}
+ since $\partial_i L(\lambda^*)\geq 0$ and $c_i\leq 1$.
+
+ Furthermore, using the KKT conditions again, we have that:
+ \begin{equation}\label{local-4}
+ \xi^* \leq \inf_{i\in \bar{M}}\frac{\partial_i L(\lambda^*)}{c_i}\leq \inf_{i\in \bar{M}} \frac{1}{c_i}
+ = \frac{1}{\max_{i\in\bar{M}} c_i}
+ \end{equation}
+ where the last inequality uses Lemma~\ref{lemma:derivative-bounds}.
+
+ Combining \eqref{local-2}, \eqref{local-3} and \eqref{local-4}, we get that:
+ \begin{displaymath}
+ \sum_{i\in M}\mu_i^* \leq |M|\xi^* \leq n\xi^*\leq \frac{n}{\max_{i\in\bar{M}} c_i} \leq n^2
+ \end{displaymath}
+
+ This implies that:
+ \begin{displaymath}
+ \T{\mathbf{1}}\mu^* = \sum_{i=1}^n \mu^*_i = \sum_{i\in M}\mu_i^*\leq n^2
+ \end{displaymath}
+ which in addition to \eqref{eq:local-1} proves the lemma.
+\end{proof}
+
+\begin{lemma}\label{lemma:monotonicity}
+ If $c'$ = $(c_i', c_{-i})$, with $c_i'\leq c_i - \delta$, we have:
+ \begin{displaymath}
+ L^*_{c'}(\alpha) \geq L^*_c(\alpha) + \frac{\alpha\delta b}{2^n}
+ \end{displaymath}
+\end{lemma}
+
+\begin{proof}
+ Let $\mu^*, \nu^*, \xi^*$ be dual optimal for $(P_{c', \alpha})$. Noting that, $\mathcal{L}_{c', \alpha}(\lambda, \mu^*, \nu^*, \xi^*) \geq
+ \mathcal{L}_{c, \alpha}(\lambda, \mu^*, \nu^*, \xi^*) + \lambda_i\xi^*\delta$, we get similarly to Lemma~\ref{lemma:proximity}:
+ \begin{displaymath}
+ L^*_{c'}(\alpha) \geq L(\lambda) + \lambda_i\xi^*\delta
+ \end{displaymath}
+ for any $\lambda$ feasible for \eqref{eq:perturbed-primal}. In particular, for $\lambda^*$ primal optimal for \eqref{eq:perturbed-primal}:
+ \begin{displaymath}
+ L^*_{c'}(\alpha) \geq L^*_{c}(\alpha) + \alpha\xi^*\delta
+ \end{displaymath}
+ since $\lambda_i^*\geq \alpha$.
+
+ Using the KKT conditions for $(P_{c', \alpha})$, we can write:
+ \begin{displaymath}
+ \xi^* = \inf_{i:\lambda^{'*}_i>\alpha} \frac{\T{x_i}S(\lambda^{'*})^{-1}x_i}{c_i'}
+ \end{displaymath}
+ with $\lambda^{'*}$ optimal for $(P_{c', \alpha})$. Since $c_i'\leq 1$, using Lemma~\ref{lemma:derivative-bounds}, we get that $\xi^*\geq \frac{b}{2^n}$, which concludes the proof.
+\end{proof}
+
+\begin{proposition}
+ Let $\delta\in(0,1]$. For any $\varepsilon\in(0,1]$, there exists
+ a routine which computes an approximate solution $\tilde{L}^*_c$ to
+ \eqref{eq:primal} such that:
+ \begin{enumerate}
+ \item $|\tilde{L}^*_c - L^*_c| \leq \varepsilon$
+ \item for all $c' = (c_i', c_{-i})$ with $c_i'\leq c_i-\delta$, $\tilde{L}^*_c \leq \tilde{L}^*_{c'}$
+ \item the routine's running time is $O\big(poly(n, d, \log\log\frac{1}{b\varepsilon\delta})\big)$
+ \end{enumerate}
+\end{proposition}
+
+\begin{proof}
+Let $\varepsilon$ in $(0, 1]$. The routine works as follows: set $\alpha\defeq
+\varepsilon(\delta + n^2)^{-1}$ and return an approximation $\tilde{L}^*_c$ of
+$L^*_c(\alpha)$ with an accuracy $\frac{1}{2^{n+1}}\alpha\delta b$ computed by
+a standard convex optimization algorithm. Note that this choice of $\alpha$
+implies $\alpha<\frac{1}{n}$ as required.
+
+\begin{enumerate}
+ \item using Lemma~\ref{lemma:proximity}:
+\begin{displaymath}
+ |\tilde{L}^*_c - L^*_c| \leq |\tilde{L}^*_c - L^*_c(\alpha)| + |L^*_c(\alpha) - L^*_c|
+ \leq \alpha\delta + \alpha n^2 = \varepsilon
+\end{displaymath}
+
+\item let $c' = (c_i', c_{-i})$ with $c_i'\leq c_i-\delta$, then:
+\begin{displaymath}
+ \tilde{L}^*_{c'} \geq L^*_{c'} - \frac{\alpha\delta b}{2^{n+1}}
+ \geq L^*_c + \frac{\alpha\delta b}{2^{n+1}}
+ \geq \tilde{L}^*_c
+\end{displaymath}
+where the first and inequality come from the accuracy of the approximation, and
+the inner inequality follows from Lemma~\ref{lemma:monotonicity}.
+
+\item the accuracy of the approximation $\tilde{L}^*_c$ is:
+\begin{displaymath}
+ A\defeq\frac{\varepsilon\delta b}{2^{n+1}(\delta + n^2)}
+\end{displaymath}
+\sloppy
+hence, the standard convex optimization algorithm runs in time $O(poly(n, d,\log\log A^{-1}))$. Note that:
+\begin{displaymath}
+ \log\log A^{-1} = O\bigg(\log\log\frac{1}{\varepsilon\delta b} + \log n\bigg)
+\end{displaymath}
+which yields the wanted running time for the routine.\qedhere
+\end{enumerate}
+\end{proof}
+\end{document}