summaryrefslogtreecommitdiffstats
path: root/approximation.tex
diff options
context:
space:
mode:
authorStratis Ioannidis <stratis@Stratiss-MacBook-Air.local>2013-09-22 07:55:14 +0200
committerStratis Ioannidis <stratis@Stratiss-MacBook-Air.local>2013-09-22 07:55:14 +0200
commitcc1eea524d8fd1314d85fe7b56cddd95fd75302d (patch)
tree8d028b58c801d7263e2457e5cf9934e279b51e70 /approximation.tex
parent1cb6e4b28e77b8c1a87e54bbd7097d7f8af0e371 (diff)
parent50900bfc44961b87379cd2d3464b677d9f5be1ac (diff)
downloadrecommendation-cc1eea524d8fd1314d85fe7b56cddd95fd75302d.tar.gz
Merge branch 'master' of ssh://74.95.195.229:1444/git/data_value
Diffstat (limited to 'approximation.tex')
-rwxr-xr-xapproximation.tex11
1 files changed, 7 insertions, 4 deletions
diff --git a/approximation.tex b/approximation.tex
index 4978279..3e3f482 100755
--- a/approximation.tex
+++ b/approximation.tex
@@ -47,7 +47,7 @@ expectation of $V$ under the distribution $P_\mathcal{N}^\lambda$:
F(\lambda)
\defeq \mathbb{E}_{S\sim P_\mathcal{N}^\lambda}\big[V(S)\big]
% = \sum_{S\subseteq\mathcal{N}} P_\mathcal{N}^\lambda(S) V(S)
-= \mathbb{E}_{S\sim P_\mathcal{N}^\lambda}\left[ \log\det\left( I_d + \sum_{i\in S} x_i\T{x_i}\right) \right],\qquad \lambda\in[0,1]^n.
+= \mathbb{E}_{S\sim P_\mathcal{N}^\lambda}\left[ \log\det\left( I_d + \sum_{i\in S} x_i\T{x_i}\right) \right],\quad \lambda\in[0,1]^n.
\end{equation}
Function $F$ is an extension of $V$ to the domain $[0,1]^n$, as it equals $V$ on integer inputs: $F(\id_S) = V(S)$ for all
$S\subseteq\mathcal{N}$, where $\id_S$ denotes the indicator vector of $S$. %\citeN{pipage} have shown how to use this extension to obtain approximation guarantees for an interesting class of optimization problems through the \emph{pipage rounding} framework, which has been successfully applied in \citeN{chen, singer-influence}.
@@ -55,9 +55,12 @@ Contrary to problems such as \textsc{Knapsack}, the
multi-linear extension \eqref{eq:multi-linear} cannot be optimized in
polynomial time for the value function $V$ we study here, given by \eqref{modified}. Hence, we introduce an extension $L:[0,1]^n\to\reals$ s.t.~
\begin{equation}\label{eq:our-relaxation}
-\quad L(\lambda) \defeq
-\log\det\left(I_d + \sum_{i\in\mathcal{N}} \lambda_i x_i\T{x_i}\right)=
-\log\det\left(\mathbb{E}_{S\sim P_\mathcal{N}^\lambda}\bigg[I_d + \sum_{i\in S} x_i\T{x_i} \bigg]\right), \qquad \lambda\in[0,1]^n.
+ \begin{split}
+ L(\lambda) &\defeq
+\log\det\left(I_d + \sum_{i\in\mathcal{N}} \lambda_i x_i\T{x_i}\right)\\
+&= \log\det\left(\mathbb{E}_{S\sim P_\mathcal{N}^\lambda}\bigg[I_d + \sum_{i\in S} x_i\T{x_i} \bigg]\right),
+\quad \lambda\in[0,1]^n.
+\end{split}
\end{equation}
Note that $L$ also extends $V$, and follows naturally from the multi-linear extension by swapping the
expectation and $\log \det$ in \eqref{eq:multi-linear}. Crucially, it is \emph{strictly concave} on $[0,1]^n$, a fact that we exploit in the next section to maximize $L$ subject to the budget constraint in polynomial time.