summaryrefslogtreecommitdiffstats
path: root/main.tex
diff options
context:
space:
mode:
authorStratis Ioannidis <stratis@stratis-Latitude-E6320.(none)>2013-02-10 18:08:19 -0800
committerStratis Ioannidis <stratis@stratis-Latitude-E6320.(none)>2013-02-10 18:08:19 -0800
commit3eab27a691c9c1319dcadbcd2b2325044b2cef14 (patch)
tree9ed05e3dc34a707d72660f690a79c0cd1ba8c72f /main.tex
parent91db9186fa25585c02a035e99386f73c75ad2601 (diff)
parenta42eaba2bf8403982fe4861ea4eaf36245a3f54c (diff)
downloadrecommendation-3eab27a691c9c1319dcadbcd2b2325044b2cef14.tar.gz
Merge branch 'master' of ssh://palosgit01/git/data_value
Diffstat (limited to 'main.tex')
-rw-r--r--main.tex187
1 files changed, 73 insertions, 114 deletions
diff --git a/main.tex b/main.tex
index 64d4604..903d1ed 100644
--- a/main.tex
+++ b/main.tex
@@ -99,10 +99,10 @@ multilinear extension through the \emph{pipage rounding} framework of \citeN{pip
Following Chen \citeN{chen} and \citeN{singer-influence},
we in turn introduce a relaxation $L$ specifically tailored to the value
function of \EDP:
-\begin{displaymath}
+\begin{equation}\label{eq:our-relaxation}
\forall\,\lambda\in[0,1]^n,\quad L(\lambda) \defeq
\log\det\left(I_d + \sum_{i\in\mathcal{N}} \lambda_i x_i\T{x_i}\right),
-\end{displaymath}
+\end{equation}
and then use $L^* = OPT'_{-i^*}$ with $R=L$ in our mechanism. The function $L$
is well-known to be concave and even self-concordant (see \emph{e.g.},
\cite{boyd2004convex}). In this case, the analysis of Newton's method for
@@ -183,57 +183,13 @@ There is no $2$-approximate, truthful, budget feasible, individually rational me
Suppose, for contradiction, that such a mechanism exists. Consider two experiments with dimension $d=2$, such that $x_1 = e_1=[1 ,0]$, $x_2=e_2=[0,1]$ and $c_1=c_2=B/2+\epsilon$. Then, one of the two experiments, say, $x_1$, must be in the set selected by the mechanism, otherwise the ratio is unbounded, a contradiction. If $x_1$ lowers its value to $B/2-\epsilon$, by monotonicity it remains in the solution; by threshold payment, it is paid at least $B/2+\epsilon$. So $x_2$ is not included in the solution by budget feasibility and individual rationality: hence, the selected set attains a value $\log2$, while the optimal value is $2\log 2$.
\end{proof}
-\begin{comment}
-$P_\mathcal{N}^\lambda(S)$ is the probability of choosing the set $S$ if
-we select each element $i$ in $\mathcal{N}$ independently with probability
-$\lambda_i$: %or to reject it with probability $1-\lambda_i$:
-\begin{displaymath}
- P_\mathcal{N}^\lambda(S) = \prod_{i\in S} \lambda_i
- \prod_{i\in\mathcal{N}\setminus S}( 1 - \lambda_i)
-\end{displaymath}
-Consider the general \emph{multi-linear}
-extension:
-\begin{equation}\label{eq:multilinear}
- F(\lambda)
- = \mathbb{E}_{S\sim P_\mathcal{N}^\lambda}\left[V(S)\right]
- = \sum_{S\subseteq\mathcal{N}} P_\mathcal{N}^\lambda(S) V(S)
-\end{equation}
-\junk{
-
-$P_\mathcal{N}^\lambda(S)$ is the probability of choosing the set $S$ if
-we select each element $i$ in $\mathcal{N}$ independently with probability
-$\lambda_i$: %or to reject it with probability $1-\lambda_i$:
-\begin{displaymath}
- P_\mathcal{N}^\lambda(S) = \prod_{i\in S} \lambda_i
- \prod_{i\in\mathcal{N}\setminus S}( 1 - \lambda_i)
-\end{displaymath}
-
-For \EDP\ the multi-linear extension
-%of \eqref{modified}
-can be written:
-\begin{displaymath}
- F(\lambda) = \mathbb{E}_{S\sim
- P_\mathcal{N}^\lambda}\Big[\log\det \big(I_d + \sum_{i\in S} x_i\T{x_i}\big) \Big].
-\end{displaymath}
-%As in the case of \textsc{Coverage},
-\eqref{relax} is not a convex optimization problem, and is not easy to solve directly. %As in ~\cite{singer-influence},
-We consider an additional relaxation $L$ that
-follows naturally by swapping the expectation and the $\log\det$
-in the above formula:
-\begin{align}\label{eq:concave}
- L(\lambda) & \defeq \log\det\Big(\mathbb{E}_{S\sim
- P_\mathcal{N}^\lambda}\big[I_d + \sum_{i\in S} x_i\T{x_i}\big]\Big)\notag \\
- &= \log\det\left(I_d + \sum_{i\in\mathcal{N}}
- \lambda_i x_i\T{x_i}\right)
-\end{align}
-\end{comment}
\subsection{Proof of Theorem~\ref{thm:main}}\label{sec:proofofmainthm}
%\stratis{individual rationality???}
%The proof of the properties of the mechanism is broken down into lemmas.
We now present the proof of Theorem~\ref{thm:main}. Truthfulness and individual
-rationality follows from monotonicity and threshold payments. Monotonicity and
+rationality follow from monotonicity and threshold payments. Monotonicity and
budget feasibility follow the same steps as the analysis of \citeN{chen};
for the sake of completeness, we restate their proof in the Appendix.
@@ -345,41 +301,57 @@ This solution is:
%\end{proof}
\subsection{Proof of Lemma~\ref{lemma:relaxation}}\label{sec:relaxation}
-%Recall that, since $L$ is a fractional relaxation of $V$,
-%\begin{displaymath}
-% OPT \leq OPT(L, B).
-%\end{displaymath}
-%However, for the purpose of proving theorem~\ref{thm:main}, we need to bound
-%$L$ from above (up to a multiplicative factor) by $V$.
-\junk{
-To prove Lemma~\ref{lemma:relaxation}, we use the
-\emph{pipage rounding} method of Ageev and Sviridenko~\cite{pipage}, where
-$L$ is first bounded from above by the multi-linear relaxation $F$, which is itself
-subsequently bounded by $V$. While the latter part is general and can be applied
-to the multi-linear extension of any submodular function, the former part is
-specific to our choice for the function $L$. %and is our main technical contribution (lemma~\ref{lemma:relaxation-ratio}).
+We need to prove that for our relaxation $L$ \eqref{eq:our-relaxation}, $OPT'$
+is close to $OPT$ as stated in Lemma~\ref{lemma:relaxation}. As discussed at
+the beginning of Section~\ref{sec:main}, following the same approach as
+\citeN{singer-influence}, we make use of the \emph{pipage
+rounding} framework of \citeN{pipage}.
-The proof has two aspects. The easier part is that $F$ is bounded by $V$.
-This is called \emph{cross-convexity} in the literature (see, \emph{e.g.},
-\cite{dughmi}), or $\varepsilon$-convexity by Ageev and Sviridenko~\cite{pipage}.
-}
+This framework uses the \emph{multi-linear} extension $F$ of the submodular
+function $V$. Let $P_\mathcal{N}^\lambda(S)$ be the probability of choosing the set $S$ if we select each element $i$ in $\mathcal{N}$ independently with probability $\lambda_i$:
+\begin{displaymath}
+ P_\mathcal{N}^\lambda(S) \defeq \prod_{i\in S} \lambda_i
+ \prod_{i\in\mathcal{N}\setminus S}( 1 - \lambda_i).
+\end{displaymath}
+Then, the \emph{multi-linear} extension $F$ is defined by:
+\begin{equation}\label{eq:multilinear}
+ F(\lambda)
+ \defeq \mathbb{E}_{S\sim P_\mathcal{N}^\lambda}\big[V(S)\big]
+ = \sum_{S\subseteq\mathcal{N}} P_\mathcal{N}^\lambda(S) V(S)
+\end{equation}
-We prove that our multi-linear function $F$ has a property
- which allows to trade one fractional component of the solution for another until
-one of them becomes integral, without losing any value.
-This property is referred to in the literature as \emph{cross-convexity} (see, \emph{e.g.},
-\cite{dughmi}), or $\varepsilon$-convexity by Ageev and
-Sviridenko~\cite{pipage}.
-\junk{
+For \EDP{} the multi-linear extension can be written:
+\begin{equation}\label{eq:multi-linear-logdet}
+ F(\lambda) = \mathbb{E}_{S\sim
+ P_\mathcal{N}^\lambda}\bigg[\log\det \big(I_d + \sum_{i\in S} x_i\T{x_i}\big) \Big].
+\end{equation}
+Note that the relaxation $L$ that we introduced in \eqref{eq:our-relaxation},
+follows naturally from the \emph{multi-linear} relaxation by swapping the
+expectation and the $\log\det$ in \eqref{eq:multi-linear-logdet}:
+\begin{displaymath}
+ L(\lambda) = \log\det\left(\mathbb{E}_{S\sim
+ P_\mathcal{N}^\lambda}\bigg[I_d + \sum_{i\in S} x_i\T{x_i} \bigg]\right).
+\end{displaymath}
-a variant of the $\varepsilon$-convexity of the multi-linear
-extension \eqref{eq:multilinear} introduced by Ageev and Sviridenko
-\cite{pipage} which allows to trade one fractional component of the solution for another until
-one of them becomes integral, without loosing any value. This property is also
+The pipage rounding framework then proceeds as follows:
+\begin{itemize}
+\item First, we prove (Lemma \ref{lemma:rounding}) that $F$ admits the
+following rounding property: let $\lambda$ be a feasible element of $[0,1]^n$,
+it is possible to trade one fractional component of $\lambda$ for another until
+one of them becomes integral, obtaining a new element $\tilde{\lambda}$ which
+is both feasible and for which $F(\lambda)\geq F(\lambda)$. Here, by
+feasibility of a point $\lambda$, we mean that it satisfies the budget
+constraint $\sum_{i=1}^n \lambda_i c_i \leq B$. This rounding property is
referred to in the literature as \emph{cross-convexity} (see, \emph{e.g.},
-\cite{dughmi}).
-}
+\cite{dughmi}), or $\varepsilon$-convexity by \citeN{pipage}.
+\item Next, we prove the central result of bounding $L$ appropriately in terms
+of the multi-linear relaxation $F$ (Lemma \ref{lemma:relaxation-ratio}).
+\item Finally, we conclude the proof of Lemma~\ref{lemma:relaxation} by
+combining Lemma~\ref{lemma:rounding} and Lemma~\ref{lemma:relaxation-ratio}.
+\end{itemize}
+
+\begin{comment}
Formally, %this property states that
if we define:
\begin{displaymath}
@@ -393,11 +365,8 @@ $\reals^{n}$, then $\tilde{F}_\lambda$ is convex. Hence its maximum over the int
\end{displaymath}
is attained at one of the boundaries of $I_\lambda$ for which one of the $i$-th
or the $j$-th component of $\lambda$ becomes integral.
+\end{comment}
-The lemma below proves that we can similarly trade a fractional component for
-another until one of them becomes integral \emph{while maintaining the
-feasibility of the point at which $F$ is evaluated}. Here, by feasibility of
-a point $\lambda$, we mean that it satisfies the budget constraint $\sum_{i=1}^n \lambda_i c_i \leq B$.
\begin{lemma}[Rounding]\label{lemma:rounding}
For any feasible $\lambda\in[0,1]^{n}$, there exists a feasible
$\bar{\lambda}\in[0,1]^{n}$ such that at most one of its components is
@@ -424,43 +393,33 @@ a point $\lambda$, we mean that it satisfies the budget constraint $\sum_{i=1}^n
\lambda_\varepsilon = \lambda + \varepsilon\left(e_i-\frac{c_i}{c_j}e_j\right)
\end{displaymath}
It is easy to see that if $\lambda$ is feasible, then:
- \begin{multline}\label{eq:convex-interval}
+ \begin{equation}\label{eq:convex-interval}
\forall\varepsilon\in\Big[\max\Big(-\lambda_i,(\lambda_j-1)\frac{c_j}{c_i}\Big), \min\Big(1-\lambda_i, \lambda_j
- \frac{c_j}{c_i}\Big)\Big],\;\\
+ \frac{c_j}{c_i}\Big)\Big],\;
\lambda_\varepsilon\;\;\textrm{is feasible}
- \end{multline}
+ \end{equation}
Furthermore, the function $F_\lambda$ is convex; indeed:
\begin{align*}
F_\lambda(\varepsilon)
& = \mathbb{E}_{S'\sim P_{\mathcal{N}\setminus\{i,j\}}^\lambda(S')}\Big[
(\lambda_i+\varepsilon)\Big(\lambda_j-\varepsilon\frac{c_i}{c_j}\Big)V(S'\cup\{i,j\})\\
- & + (\lambda_i+\varepsilon)\Big(1-\lambda_j+\varepsilon\frac{c_i}{c_j}\Big)V(S'\cup\{i\})\\
- & + (1-\lambda_i-\varepsilon)\Big(\lambda_j-\varepsilon\frac{c_i}{c_j}\Big)V(S'\cup\{j\})\\
- & + (1-\lambda_i-\varepsilon)\Big(1-\lambda_j+\varepsilon\frac{c_i}{c_j}\Big)V(S')\Big]
+ & + (\lambda_i+\varepsilon)\Big(1-\lambda_j+\varepsilon\frac{c_i}{c_j}\Big)V(S'\cup\{i\})
+ + (1-\lambda_i-\varepsilon)\Big(\lambda_j-\varepsilon\frac{c_i}{c_j}\Big)V(S'\cup\{j\})\\
+ & + (1-\lambda_i-\varepsilon)\Big(1-\lambda_j+\varepsilon\frac{c_i}{c_j}\Big)V(S')\Big]
\end{align*}
Thus, $F_\lambda$ is a degree 2 polynomial whose dominant coefficient is:
- \begin{multline*}
+ \begin{displaymath}
\frac{c_i}{c_j}\mathbb{E}_{S'\sim
P_{\mathcal{N}\setminus\{i,j\}}^\lambda(S')}\Big[
V(S'\cup\{i\})+V(S'\cup\{i\})\\
-V(S'\cup\{i,j\})-V(S')\Big]
- \end{multline*}
+ \end{displaymath}
which is positive by submodularity of $V$. Hence, the maximum of
$F_\lambda$ over the interval given in \eqref{eq:convex-interval} is
attained at one of its limit, at which either the $i$-th or $j$-th component of
$\lambda_\varepsilon$ becomes integral.
\end{proof}
-Next, we prove the central result of bounding $L$ appropriately in terms of $F$.
-
-\junk{
-To prove Lemma~\ref{lemma:relaxation}, we use the
-\emph{pipage rounding} method of Ageev and Sviridenko~\cite{pipage}, where
-$L$ is first bounded from above by the multi-linear relaxation $F$, which is itself
-subsequently bounded by $V$. While the latter part is general and can be applied
-to the multi-linear extension of any submodular function, the former part is
-specific to our choice for the function $L$. %and is our main techn
-}
\begin{lemma}\label{lemma:relaxation-ratio}
% The following inequality holds:
@@ -510,11 +469,11 @@ For all $\lambda\in[0,1]^{n},$
$S$ can be written as
\begin{align*}
V(S\cup \{i\}) - V(S)& = \frac{1}{2}\log\det(I_d
- + \T{X_S}X_S + x_i\T{x_i})\\
- & - \frac{1}{2}\log\det(I_d + \T{X_S}X_S)\\
+ + \T{X_S}X_S + x_i\T{x_i})
+ - \frac{1}{2}\log\det(I_d + \T{X_S}X_S)\\
& = \frac{1}{2}\log\det(I_d + x_i\T{x_i}(I_d +
-\T{X_S}X_S)^{-1})\\
-& = \frac{1}{2}\log(1 + \T{x_i}A(S)^{-1}x_i)
+\T{X_S}X_S)^{-1})
+ = \frac{1}{2}\log(1 + \T{x_i}A(S)^{-1}x_i)
\end{align*}
where $A(S) =I_d+ \T{X_S}X_S$.
% $ V(S\cup\{i\}) - V(S) = \frac{1}{2}\log\left(1 + \T{x_i} A(S)^{-1}x_i\right)$.
@@ -528,12 +487,12 @@ Using this,
The computation of the derivative of $L$ uses standard matrix
calculus. Writing $\tilde{A}(\lambda) = I_d+\sum_{i\in
\mathcal{N}}\lambda_ix_i\T{x_i}$:
- \begin{align*}
- \det \tilde{A}(\lambda + h\cdot e_i) & = \det\big(\tilde{A}(\lambda)
- + hx_i\T{x_i}\big)\\
- & =\det \tilde{A}(\lambda)\big(1+
+ \begin{displaymath}
+ \det \tilde{A}(\lambda + h\cdot e_i) = \det\big(\tilde{A}(\lambda)
+ + hx_i\T{x_i}\big)
+ =\det \tilde{A}(\lambda)\big(1+
h\T{x_i}\tilde{A}(\lambda)^{-1}x_i\big)
- \end{align*}
+ \end{displaymath}
Hence:
\begin{displaymath}
\log\det\tilde{A}(\lambda + h\cdot e_i)= \log\det\tilde{A}(\lambda)
@@ -595,14 +554,14 @@ In particular,
\end{displaymath}
Finally, using that the inverse is a matrix convex function over symmetric
positive definite matrices:
- \begin{align*}
- \partial_i F(\lambda) &\geq
+ \begin{displaymath}
+ \partial_i F(\lambda) \geq
\frac{1}{4}
- \T{x_i}\bigg(\sum_{S\subseteq\mathcal{N}}P_\mathcal{N}^\lambda(S)A(S)\bigg)^{-1}x_i\\
- & = \frac{1}{4}\T{x_i}\tilde{A}(\lambda)^{-1}x_i\\
- & = \frac{1}{2}
+ \T{x_i}\bigg(\sum_{S\subseteq\mathcal{N}}P_\mathcal{N}^\lambda(S)A(S)\bigg)^{-1}x_i
+ = \frac{1}{4}\T{x_i}\tilde{A}(\lambda)^{-1}x_i
+ = \frac{1}{2}
\partial_i L(\lambda)
- \end{align*}
+ \end{displaymath}
Having bound the ratio between the partial derivatives, we now bound the ratio $F(\lambda)/L(\lambda)$ from below. Consider the following cases.
First, if the minimum of the ratio