summaryrefslogtreecommitdiffstats
path: root/main.tex
diff options
context:
space:
mode:
Diffstat (limited to 'main.tex')
-rw-r--r--main.tex118
1 files changed, 85 insertions, 33 deletions
diff --git a/main.tex b/main.tex
index 65128b1..6fdf39c 100644
--- a/main.tex
+++ b/main.tex
@@ -26,7 +26,7 @@ considered.
%its value as a singleton set: $V(\{i\})$. If there is no ambiguity, we will write
%$V(i)$ instead of $V(\{i\})$.
Unfortunately, even for the full information case, the greedy algorithm gives an
-unbounded approximation ratio. Instead, we have:
+unbounded approximation ratio. Instead,
\junk{
Let $i^*
= \argmax_{i\in\mathcal{N}} V(i)$ be the element of maximum value.
@@ -40,7 +40,7 @@ following result from Singer \cite{singer-influence} which follows from
Chen \emph{et al.}~\cite{chen}: \stratis{Is it Singer or Chen? Also, we need to introduce $V(i)$ somewhere...}
}
}
-
+\junk{
\begin{lemma}~\cite{singer-influence}\label{lemma:greedy-bound}
Let $S_G$ be the set computed by the greedy algorithm and let
%define $i^*$:
@@ -53,9 +53,11 @@ We have:
OPT \leq \frac{e}{e-1}\big( 3 V(S_G) + 2 V(i^*)\big).
\end{displaymath}
\end{lemma}
-
-Hence, taking the maximum between $V(S_G)$ and $V(i^*)$ yields an approximation
-ratio of $\frac{5e}{e-1}$. However,
+}
+let
+$i^* = \argmax_{i\in\mathcal{N}} V(i).$
+Taking the maximum between $V(S_G)$ and $V(i^*)$ yields an approximation
+ratio of $\frac{5e}{e-1}$. ~\cite{singer-influence}. However,
this approach breaks incentive compatibility and therefore cannot be directly
applied to the strategic case.~\cite{singer-influence}
\junk{
@@ -69,7 +71,7 @@ the allocation function and hence also truthfulness, by Myerson's Theorem.
For the strategic case,
\begin{itemize}
-\iem
+\item
When the underlying
full information problem \eqref{eq:non-strategic} can be solved in
polynomial time, Chen et al. \cite{chen} prove that
@@ -79,8 +81,22 @@ $V(i^*) \geq C\cdot OPT_{-i^*}$ (for some constant $C$)
and to $S_G$ otherwise yields a 8.34-approximation mechanism. However, this is not a poly-time mechanism when the underlying problem is NP hard (unless P=NP), as is the case for \EDP.
\item
-
-
+For NP-hard
+problems, consider
+the optimal value of a \emph{fractional relaxation} of the function $V$ over the set
+$\mathcal{N}$. A function $R:[0, 1]^{n}\to\reals_+$ defined on the hypercube $[0, 1]^{n}$ is a fractional relaxation of $V$
+over the set $\mathcal{N}$ if %(a) $R$ is a function defined on the hypercube $[0, 1]^{n}$ and (b)
+ $R(\id_S) = V(S)$ for all $S\subseteq\mathcal{N}$, where
+$\id_S$ denotes the indicator vector of $S$. The optimization program
+\eqref{eq:non-strategic} extends naturally to such relaxations:
+\begin{align}
+ OPT' = \argmax_{\lambda\in[0, 1]^{n}}
+ \left\{R(\lambda) \mid \sum_{i=1}^{n} \lambda_i c_i
+ \leq B\right\}\label{relax}
+\end{align}
+Substituting $OPT'_{-i^*}$ for $OPT_{-i^*}$ like before works for specific problems like \textsc{Knapsack}~\cite{chen} and
+\textsc{Coverage}~\cite{singer-influence}. For other instances of sub modular function, this overall technology
+has to be applied and extended.
\end{itemize}
@@ -99,7 +115,7 @@ full information problem \eqref{eq:non-strategic} can be solved in
polynomial time, Chen et al. \cite{chen} prove that $OPT_{-i^*}$, the optimal solution to \eqref{eq:non-strategic} when $i^*$ is excluded from set $\mathcal{N}$, can play the role of this quantity: that is, allocating to $i^*$ when
$V(i^*) \geq C\cdot OPT_{-i^*}$ (for some constant $C$)
and to $S_G$ otherwise yields a 8.34-approximation mechanism. However, this is not a poly-time mechanism when the underlying problem is NP hard (unless P=NP), as is the case for \EDP.
-}
+
\fussy
For NP-hard
@@ -120,15 +136,28 @@ $\id_S$ denotes the indicator vector of $S$. The optimization program
\end{align}
To attain truthful constant approximation mechanism for \textsc{Knapsack}, Chen \emph{et al.}~\cite{chen} substitute $OPT'_{-i^*}$ for $OPT_{-i^*}$ in the above program, where $R$ is a relaxation of the \textsc{Knapsack} objective.
Similarly, Singer~\cite{singer-influence} follows the same approach to obtain a mechanism for \textsc{Coverage}.
+}
-A relaxation which is commonly used, due to its well-behaved properties in the
-context of maximizing submodular functions, is the \emph{multi-linear}
+\paragraph{Our approach}
+We build on~\cite{chen,singer-influence}.
+We introduce a relaxation specifically tailored to the value
+function of \EDP.
+$P_\mathcal{N}^\lambda(S)$ is the probability of choosing the set $S$ if
+we select each element $i$ in $\mathcal{N}$ independently with probability
+$\lambda_i$: %or to reject it with probability $1-\lambda_i$:
+\begin{displaymath}
+ P_\mathcal{N}^\lambda(S) = \prod_{i\in S} \lambda_i
+ \prod_{i\in\mathcal{N}\setminus S}( 1 - \lambda_i)
+\end{displaymath}
+Consider the general \emph{multi-linear}
extension:
\begin{equation}\label{eq:multilinear}
F(\lambda)
= \mathbb{E}_{S\sim P_\mathcal{N}^\lambda}\left[V(S)\right]
= \sum_{S\subseteq\mathcal{N}} P_\mathcal{N}^\lambda(S) V(S)
\end{equation}
+\junk{
+
$P_\mathcal{N}^\lambda(S)$ is the probability of choosing the set $S$ if
we select each element $i$ in $\mathcal{N}$ independently with probability
$\lambda_i$: %or to reject it with probability $1-\lambda_i$:
@@ -145,13 +174,19 @@ extension using the \emph{pipage rounding} method of Ageev and Sviridenko
\cite{pipage}.
Here, following these ideas, we introduce a relaxation specifically tailored to the value
-function of \EDP. The multi-linear extension of \eqref{modified} can be written:
+function of \EDP.
+}
+For \EDP\ the multi-linear extension
+%of \eqref{modified}
+can be written:
\begin{displaymath}
F(\lambda) = \mathbb{E}_{S\sim
P_\mathcal{N}^\lambda}\Big[\log\det \big(I_d + \sum_{i\in S} x_i\T{x_i}\big) \Big].
\end{displaymath}
-As in the case of \textsc{Coverage}, \eqref{relax} is not a convex optimization problem, and is not easy to solve directly. %As in ~\cite{singer-influence},
-We thus introduce an additional relaxation $L$. Our relaxation follows naturally by swapping the expectation and the $\log\det$
+%As in the case of \textsc{Coverage},
+\eqref{relax} is not a convex optimization problem, and is not easy to solve directly. %As in ~\cite{singer-influence},
+We consider an additional relaxation $L$ that
+follows naturally by swapping the expectation and the $\log\det$
in the above formula:
\begin{align}\label{eq:concave}
L(\lambda) & \defeq \log\det\Big(\mathbb{E}_{S\sim
@@ -163,10 +198,16 @@ This function is well-known to be concave and even self-concordant (see \emph{e.
\cite{boyd2004convex}). In this case, the analysis of Newton's method for
self-concordant functions in \cite{boyd2004convex}, shows that it is possible
to find the maximum of $L$ to any precision $\varepsilon$ in
-a number of iterations $O(\log\log\varepsilon^{-1})$. The main challenge will
-be to prove that $OPT'$, for the relaxation $R=L$, is close to $V(S_G)$. To do so, our
+a number of iterations $O(\log\log\varepsilon^{-1})$.
+The main challenge will
+be to prove that $OPT'$ in~\ref{relax}, for the relaxation $R=L$, is close to $V(S_G)$, which we will address later and is the technical
+bulk of the
+paper.
+\junk{
+To do so, our
main technical contribution is to prove that $L$ has a bounded
approximation ratio to the value function $V$ (Lemma~\ref{lemma:relaxation}).
+}
\begin{algorithm}[!t]
\caption{Mechanism for \EDP{}}\label{mechanism}
@@ -191,23 +232,30 @@ approximation ratio to the value function $V$ (Lemma~\ref{lemma:relaxation}).
\end{algorithmic}
\end{algorithm}
-The resulting mechanism for \EDP{} is composed of (a) the allocation function
-presented in Algorithm~\ref{mechanism}, and (b) the payment function which pays each
-allocated agent $i$ her threshold payment as described in Myerson's Theorem. The constant $C$ is an absolute
-constant that determined in Section~\ref{sec:proofofmainthm}
-(see \eqref{eq:c}).
+The resulting mechanism for \EDP{} is composed of
+\begin{itemize}
+\item
+ the allocation function
+presented in Algorithm~\ref{mechanism}, and
+\item
+he payment function which pays each
+allocated agent $i$ her threshold payment as described in Myerson's Theorem.
+%The constant $C$ is an absolute
+%constant that determined in Section~\ref{sec:proofofmainthm}
+%(see \eqref{eq:c}).
In the case where
$\{i^*\}$ is the allocated set, her threshold payment is $B$ (she would be have
been dropped on line 1 of Algorithm~\ref{mechanism} had she reported a higher
cost). In the case where $S_G$ is the allocated set, threshold payments'
-characterization from Singer \cite{singer-mechanisms} gives a formula to
+characterization from~\cite{singer-mechanisms} gives a formula to
compute these payments.
+ \end{itemize}
We can now state our main result:
\begin{theorem}\label{thm:main}
The allocation described in Algorithm~\ref{mechanism}, along with threshold payments, is truthful, individually rational
and budget feasible. Furthermore, for any $\varepsilon>0$, the mechanism
- has complexity $O\left(\text{poly}(n, d,
+ runs in time $O\left(\text{poly}(n, d,
\log\log \varepsilon^{-1})\right)$ and returns a set $S^*$ such that:
\begin{align*}
OPT
@@ -218,7 +266,7 @@ We can now state our main result:
\end{theorem}
%\stratis{Add lowerbound here too.}
Note that this implies we construct a poly-time mechanism with accuracy arbitrarily close to 19.68, by taking $\varepsilon = \ldots$\stratis{fix me}.
-In addition, we prove the following lower bound.
+In addition, we prove the following simple lower bound.
\begin{theorem}\label{thm:lowerbound}
There is no $2$-approximate, truthful, budget feasible, individually rational mechanism for EDP.
\end{theorem}
@@ -230,9 +278,10 @@ Suppose, for contradiction, that such a mechanism exists. Consider two experimen
\subsection{Proof of Theorem~\ref{thm:main}}\label{sec:proofofmainthm}
%\stratis{individual rationality???}
%The proof of the properties of the mechanism is broken down into lemmas.
+
We now present the proof of Theorem~\ref{thm:main}. Truthfulness and individual
rationality follows from monotonicity and threshold payments. Monotonicity and
-budget feasibility follows from the analysis of Chen \emph{et al.} \cite{chen};
+budget feasibility follow more or less from the analysis of Chen \emph{et al.} \cite{chen};
we briefly restate the proofs below for the sake of completeness.
Our proof of the approximation ratio and uses a bound on our concave relaxation
$L$ (Lemma~\ref{lemma:relaxation}). This is our main technical
@@ -270,10 +319,12 @@ The mechanism is monotone.
The mechanism is budget feasible.
\end{lemma}
\begin{proof}
-Suppose that $L(\xi) < C V(i^*)$. Then the mechanism selects $i^*$; as the bid of $i^*$ does not affect the above condition, the threshold payment of $i^*$ is $B$ and the mechanism is budget feasible.
-Suppose thus that $L(\xi) \geq C V(i^*)$.
+Suppose that $L(\xi) < C V(i^*)$. Then the mechanism selects $i^*$. Since the bid of $i^*$ does not affect the above condition, the threshold payment of $i^*$ is $B$ and the mechanism is budget feasible.
+Suppose that $L(\xi) \geq C V(i^*)$.
Denote by $S_G$ the set selected by the greedy algorithm, and for $i\in S_G$, denote by
-$S_i$ the subset of the solution set that was selected by the greedy algorithm just prior to the addition of $i$---both sets determined for the present cost vector $c$. Chen \emph{et al.}~\cite{chen} show that, for any submodular function $V$, and for all $i\in S_G$:
+$S_i$ the subset of the solution set that was selected by the greedy algorithm just prior to the addition of $i$---both sets determined for the present cost vector $c$.
+%Chen \emph{et al.}~\cite{chen} show that,
+Then for any submodular function $V$, and for all $i\in S_G$:
%the reported cost of an agent selected by the greedy heuristic, and holds for
%any submodular function $V$:
\begin{equation}\label{eq:budget}
@@ -297,25 +348,26 @@ Hence, the total payment is bounded by the telescopic sum:
\end{lemma}
\begin{proof}
- The value function $V$ \eqref{modified} can be computed in time
+ The value function $V$ in \eqref{modified} can be computed in time
$O(\text{poly}(n, d))$ and the mechanism only involves a linear
number of queries to the function $V$.
-
The function $\log\det$ is concave and self-concordant (see
\cite{boyd2004convex}), so for any $\varepsilon$, its maximum can be find
to a precision $\varepsilon$ in $O(\log\log\varepsilon^{-1})$ of iterations of Newton's method. Each iteration can be
done in time $O(\text{poly}(n, d))$. Thus, line 3 of
Algorithm~\ref{mechanism} can be computed in time
$O(\text{poly}(n, d, \log\log \varepsilon^{-1}))$. Hence the allocation
- function's complexity is as stated.
-
+ function's complexity is as stated.
+ %Payments can be easily computed in time $O(\text{poly}(n, d))$ as in prior work.
+\junk{
Using Singer's characterization of the threshold payments
\cite{singer-mechanisms}, one can verify that they can be computed in time
$O(\text{poly}(n, d))$.
+ }
\end{proof}
Finally, we prove the approximation ratio of the mechanism. We use the
-following lemma, which establishes that $OPT'$, the optimal value \eqref{relax} of the fractional relaxation $L$ under the budget constraints
+following lemma which we prove later, which establishes that $OPT'$, the optimal value \eqref{relax} of the fractional relaxation $L$ under the budget constraints
is not too far from $OPT$.
\begin{lemma}\label{lemma:relaxation}
%\begin{displaymath}