summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStratis Ioannidis <stratis@stratis-Latitude-E6320.(none)>2013-02-10 17:35:46 -0800
committerStratis Ioannidis <stratis@stratis-Latitude-E6320.(none)>2013-02-10 17:35:46 -0800
commit5bd3c3aef9533a2697c4b3660c53c1411fa18c77 (patch)
treefa56b31f79d1b508a8574ed21dba265b77793c21
parent73ee783f77425349498562925e0047e1f1aee1c9 (diff)
parentdf1a97cb16319a2e4eb151574d3a2f9038682e8e (diff)
downloadrecommendation-5bd3c3aef9533a2697c4b3660c53c1411fa18c77.tar.gz
Merge branch 'master' of ssh://palosgit01/git/data_value
-rw-r--r--main.tex44
1 files changed, 21 insertions, 23 deletions
diff --git a/main.tex b/main.tex
index 1f92091..64d4604 100644
--- a/main.tex
+++ b/main.tex
@@ -45,7 +45,7 @@ approximation ratio
\paragraph{Strategic case}
We could design the allocation function of our mechanism by following the full
information approach: allocating to agent $i^*$ when $V(i^*)\geq V(S_G)$ and to
-$S_G$ otherwise. However, Singer~\cite{singer-influence} notes that this
+$S_G$ otherwise. However, \citeN{singer-influence} notes that this
allocation function is not monotonous, and thus breaks incentive compatibility
by Myerson's theorem (Theorem~\ref{thm:myerson}).
@@ -55,14 +55,13 @@ full-information case after removing $i^*$ from the set $\mathcal{N}$:
OPT_{-i^*} \defeq \max_{S\subseteq\mathcal{N}\setminus\{i^*\}} \Big\{V(S) \;\Big| \;
\sum_{i\in S}c_i\leq B\Big\}
\end{equation}
-Singer~\cite{singer-mechanisms} and Chen \emph{et al.}~\cite{chen} prove that
-allocating to $i^*$ when $V(i^*)\geq C.OPT_{-i^*}$ (for some constant $C$) and
-to $S_G$ otherwise yields a 8.34-approximation mechanism. However,
-$OPT_{-i^*}$, the solution of the full-information case, cannot be computed in
-poly-time when the underlying problem is NP-hard (unless P=NP), as is the case
-for \EDP{}.
+\citeN{singer-mechanisms} and \citeN{chen} prove that allocating to $i^*$ when
+$V(i^*)\geq C.OPT_{-i^*}$ (for some constant $C$) and to $S_G$ otherwise yields
+a 8.34-approximation mechanism. However, $OPT_{-i^*}$, the solution of the
+full-information case, cannot be computed in poly-time when the underlying
+problem is NP-hard (unless P=NP), as is the case for \EDP{}.
-Instead, Singer~\cite{singer-mechanisms} and Chen \emph{et al.}~\cite{chen}
+Instead, \citeN{singer-mechanisms} and Chen \emph{et al.}~\cite{chen}
suggest to replace $OPT_{-i^*}$ by a quantity $L^*$ satisfying the following
properties:
\begin{itemize}
@@ -94,11 +93,10 @@ Section~\ref{sec:relaxation}) yields an constant-approximation mechanism for
In \cite{singer-influence}, the optimization program \eqref{relax}
cannot be solved efficiently when $R$ is the multilinear extension of $V$.
Consequently, Singer introduces a second relaxation which he relates to the
-multilinear extension through the \emph{pipage rounding} framework of Ageev and
-Sviridenko~\cite{pipage}.
+multilinear extension through the \emph{pipage rounding} framework of \citeN{pipage}.
\paragraph{Our approach}
-Following Chen \emph{et al.}~\cite{chen} and Singer~\cite{singer-influence},
+Following Chen \citeN{chen} and \citeN{singer-influence},
we in turn introduce a relaxation $L$ specifically tailored to the value
function of \EDP:
\begin{displaymath}
@@ -114,7 +112,7 @@ $O(\log\log\varepsilon^{-1})$ iterations. Being the solution to a maximization
problem, $L^*$ satisfies the required monotonicity property. The main challenge
will be to prove that $OPT'_{-i^*}$, for the relaxation $R=L$, is close to
$OPT$. As in~\cite{singer-influence} this will be done by using the
-\emph{pipage rounding} framework of~\cite{pipage} and forms the technical bulk
+\emph{pipage rounding} framework of~\citeN{pipage} and forms the technical bulk
of the paper.
The resulting mechanism for \EDP{} is composed of
@@ -140,10 +138,10 @@ set can be found in~\cite{singer-mechanisms}.
\begin{algorithmic}[1]
\State $\mathcal{N} \gets \mathcal{N}\setminus\{i\in\mathcal{N} : c_i > B\}$
\State $i^* \gets \argmax_{j\in\mathcal{N}}V(j)$
- \State $\xi \gets \argmax_{\lambda\in[0,1]^{n}} \{L(\lambda)
+ \State $L^* \gets \argmax_{\lambda\in[0,1]^{n}} \{L(\lambda)
\mid \lambda_{i^*}=0,\sum_{i \in \mathcal{N}\setminus\{i^*\}}c_i\lambda_i\leq B\}$
\Statex
- \If{$L(\xi) < C \cdot V(i^*)$} \label{c}
+ \If{$L^* < C \cdot V(i^*)$} \label{c}
\State \textbf{return} $\{i^*\}$
\Else
\State $i \gets \argmax_{1\leq j\leq n}\frac{V(j)}{c_j}$
@@ -229,18 +227,18 @@ in the above formula:
\lambda_i x_i\T{x_i}\right)
\end{align}
\end{comment}
+
\subsection{Proof of Theorem~\ref{thm:main}}\label{sec:proofofmainthm}
%\stratis{individual rationality???}
%The proof of the properties of the mechanism is broken down into lemmas.
We now present the proof of Theorem~\ref{thm:main}. Truthfulness and individual
rationality follows from monotonicity and threshold payments. Monotonicity and
-budget feasibility follow the same steps as the analysis of Chen \emph{et al.} \cite{chen};
+budget feasibility follow the same steps as the analysis of \citeN{chen};
for the sake of completeness, we restate their proof in the Appendix.
- Our proof of the approximation ratio uses a bound on our concave relaxation
-$L$ (Lemma~\ref{lemma:relaxation}). This is our main technical
-contribution; the proof of this lemma can be found in Section~\ref{sec:relaxation}.
-\begin{lemma}\label{lemma:complexity}
+
+The complexity of the mechanism is given by the following lemma.
+\begin{lemma}[Complexity]\label{lemma:complexity}
For any $\varepsilon > 0$, the complexity of the mechanism is
$O(\text{poly}(n, d, \log\log \varepsilon^{-1}))$.
\end{lemma}
@@ -266,7 +264,7 @@ contribution; the proof of this lemma can be found in Section~\ref{sec:relaxatio
Finally, we prove the approximation ratio of the mechanism. We use the
following lemma which establishes that $OPT'$, the optimal value \eqref{relax} of the fractional relaxation $L$ under the budget constraints
is not too far from $OPT$.
-\begin{lemma}\label{lemma:relaxation}
+\begin{lemma}[Approximation]\label{lemma:relaxation}
%\begin{displaymath}
$ OPT' \leq 2 OPT
+ 2\max_{i\in\mathcal{N}}V(i)$
@@ -319,11 +317,11 @@ Using Lemma~\ref{lemma:relaxation} we can complete the proof of Theorem~\ref{thm
+ \frac{(e-1)\varepsilon}{C(e-1)- 6e + 2}
\end{align*}
Finally, using again Lemma~\ref{lemma:greedy-bound}, we get:
- \begin{multline}\label{eq:bound2}
+ \begin{equation}\label{eq:bound2}
OPT(V, \mathcal{N}, B) \leq \frac{3e}{e-1}\left( 1 + \frac{4e}{C
- (e-1) -6e +2}\right) V(S_G)\\
+ (e-1) -6e +2}\right) V(S_G)
+\frac{2e\varepsilon}{C(e-1)- 6e + 2}
- \end{multline}
+ \end{equation}
To minimize the coefficients of $V_{i^*}$ and $V(S_G)$ in \eqref{eq:bound1} and \eqref{eq:bound2} respectively,
we wish to chose for $C=C^*$ such that:
\begin{displaymath}