summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--general.tex36
-rw-r--r--main.tex53
-rw-r--r--paper.tex3
3 files changed, 78 insertions, 14 deletions
diff --git a/general.tex b/general.tex
index 49adab2..957521a 100644
--- a/general.tex
+++ b/general.tex
@@ -58,6 +58,42 @@ Theorem~\ref{thm:main}:
where $\mu$ is the smallest eigenvalue of $R$.
\end{theorem}
+\subsection{$D$-Optimality and Beyond}
+
+Ideally, motivated by the $D$-optimality criterion, we would like to design a mechanism that maximizes \eqref{dcrit} within a good approximation ratio. In what follows, we consider a slightly more general objective as follows:
+\begin{center}
+\textsc{ExperimentalDesignProblem} (EDP)
+\begin{subequations}
+\begin{align}
+\text{Maximize}\quad V(S) &= \log\det(I_d+\T{X_S}X_S) \label{modified} \\
+\text{subject to}\quad \sum_{i\in S} c_i&\leq B
+\end{align}\label{edp}
+\end{subequations}
+\end{center} where $I_d\in \reals^{d\times d}$ is the identity matrix.
+\junk{One possible interpretation of \eqref{modified} is that, prior to the auction, the experimenter has free access to $d$ experiments whose features form an orthonormal basis in $\reals^d$. However, \eqref{modified} can also be motivated in the context of \emph{Bayesian experimental design} \cite{chaloner1995bayesian}. In short, the objective \eqref{modified} arises naturally when the experimenter retrieves the model $\beta$ through \emph{ridge regression}, rather than the linear regression \eqref{leastsquares} over the observed data; we explore this connection in Section~\ref{sec:bed}.
+}
+
+We present our results with this version of the objective function because it is simple and captures the versions
+we will need later (including an arbitrary matrix in place of $I_d$ or a zero matrix which will correspond to ~\eqref{dcrit} ).
+
+%Note that maximizing \eqref{modified} is equivalent to maximizing \eqref{dcrit} in the full-information case. In particular, $\det(\T{X_S}X_S)> \det(\T{X_{S'}}X_{S'})$ iff $\det(I_d+\T{X_S}X_S)>\det(I_d+\T{X_{S'}}X_{S'})$. In addition,
+\EDP{} \emph{and} the corresponding problem with objective \eqref{dcrit} are NP-hard; to see this, note that \textsc{Knapsack} reduces to EDP with dimension $d=1$ by mapping the weight of each item $w_i$ to an experiment with $x_i=w_i$. Moreover, \eqref{modified} is submodular, monotone and satisfies $V(\emptyset) = 0$.
+%, allowing us to use the extensive machinery for the optimization of submodular functions, as well as recent results in the
+% context of budget feasible auctions \cite{chen,singer-mechanisms}.
+
+%\stratis{A potential problem is that all of the above properties hold also for, \emph{e.g.}, $V(S)=\log(1+\det(\T{X_S}X_S))$\ldots}
+
+As \eqref{dcrit} may take arbitrarily small negative values, to define a meaningful approximation one would consider the (equivalent) maximization of $V(S) = \det\T{X_S}X_S$. %, for some strictly increasing, on-to function $f:\reals_+\to\reals_+$.
+However, the following lower bound implies that such an optimization goal cannot be attained under the constraints of truthfulness, budget feasibility, and individual rationality.
+\begin{lemma}
+For any $M>1$, there is no $M$-approximate, truthful, budget feasible, individually rational mechanism for a budget feasible reverse auction with value function $V(S) = \det{\T{X_S}X_S}$.
+For any $M>1$, there is no $M$-approximate, truthful, budget feasible, individually rational mechanism for a budget feasible reverse auction with $V(S) = \det{\T{X_S}X_S}$.
+\end{lemma}
+\begin{proof}
+\input{proof_of_lower_bound1}
+\end{proof}
+
+
\subsection{Beyond Linear Models}
Selecting experiments that maximize the information gain in the Bayesian setup
leads to a natural generalization to other learning examples beyond linear
diff --git a/main.tex b/main.tex
index 384800b..f3939ec 100644
--- a/main.tex
+++ b/main.tex
@@ -40,7 +40,6 @@ following result from Singer \cite{singer-influence} which follows from
Chen \emph{et al.}~\cite{chen}: \stratis{Is it Singer or Chen? Also, we need to introduce $V(i)$ somewhere...}
}
}
-\junk{
\begin{lemma}~\cite{singer-influence}\label{lemma:greedy-bound}
Let $S_G$ be the set computed by the greedy algorithm and let
%define $i^*$:
@@ -53,11 +52,9 @@ We have:
OPT \leq \frac{e}{e-1}\big( 3 V(S_G) + 2 V(i^*)\big).
\end{displaymath}
\end{lemma}
-}
-let
-$i^* = \argmax_{i\in\mathcal{N}} V(i).$
-Taking the maximum between $V(S_G)$ and $V(i^*)$ yields an approximation
-ratio of $\frac{5e}{e-1}$. ~\cite{singer-influence}. However,
+Thus,
+taking the maximum between $V(S_G)$ and $V(i^*)$ yields an approximation
+ratio of $\frac{5e}{e-1}$. However,
this approach breaks incentive compatibility and therefore cannot be directly
applied to the strategic case.~\cite{singer-influence}
\junk{
@@ -378,10 +375,10 @@ following lemma which establishes that $OPT'$, the optimal value \eqref{relax}
The proof of Lemma~\ref{lemma:relaxation} is our main technical contribution, and can be found in Section \ref{sec:relaxation}.
-\paragraph{Finishing Proof of }
+\paragraph{Finishing Proof of Theorem~\ref{thm:main} }
Note that the lower bound $OPT' \geq OPT
$ also holds trivially, as $L$ is a fractional relaxation of $V$ over $\mathcal{N}$.
-Using this lemma, we can complete the proof of Theorem~\ref{thm:main} by showing that, %\begin{lemma}\label{lemma:approx}
+Using Lemma~\ref{lemma:relaxation} we can complete the proof of Theorem~\ref{thm:main} by showing that, %\begin{lemma}\label{lemma:approx}
%C_{\textrm{max}} = \max\left(1+C,\frac{3e}{e-1}\left( 1 + \frac{8e}{C
%(e-1) -10e +2}\right)\right)
for any $\varepsilon > 0$, if $OPT_{-i}'$, the optimal value of $L$ when $i^*$ is excluded from $\mathcal{N}$, has been computed to a precision
@@ -449,6 +446,9 @@ This solution is:
Placing the expression of $C^*$ in \eqref{eq:bound1} and \eqref{eq:bound2}
gives the approximation ratio in \eqref{approxbound}, and concludes the proof of Theorem~\ref{thm:main}.\hspace*{\stretch{1}}\qed
%\end{proof}
+
+
+
\subsection{Proof of Lemma~\ref{lemma:relaxation}}\label{sec:relaxation}
%Recall that, since $L$ is a fractional relaxation of $V$,
%\begin{displaymath}
@@ -456,6 +456,8 @@ This solution is:
%\end{displaymath}
%However, for the purpose of proving theorem~\ref{thm:main}, we need to bound
%$L$ from above (up to a multiplicative factor) by $V$.
+
+\junk{
To prove Lemma~\ref{lemma:relaxation}, we use the
\emph{pipage rounding} method of Ageev and Sviridenko~\cite{pipage}, where
$L$ is first bounded from above by the multi-linear relaxation $F$, which is itself
@@ -463,12 +465,27 @@ subsequently bounded by $V$. While the latter part is general and can be applied
to the multi-linear extension of any submodular function, the former part is
specific to our choice for the function $L$. %and is our main technical contribution (lemma~\ref{lemma:relaxation-ratio}).
-We first prove a variant of the $\varepsilon$-convexity of the multi-linear
+The proof has two aspects. The easier part is that $F$ is bounded by $V$.
+This is called \emph{cross-convexity} in the literature (see, \emph{e.g.},
+\cite{dughmi}), or $\varepsilon$-convexity by Ageev and Sviridenko~\cite{pipage}.
+}
+
+
+We prove that our multi-linear function $F$ has a property
+ which allows to trade one fractional component of the solution for another until
+one of them becomes integral, without losing any value.
+This property is referred to in the literature as \emph{cross-convexity} (see, \emph{e.g.},
+\cite{dughmi}), or $\varepsilon$-convexity by Ageev and Sviridenko~\cite{pipage} .
+\junk{
+
+a variant of the $\varepsilon$-convexity of the multi-linear
extension \eqref{eq:multilinear} introduced by Ageev and Sviridenko
\cite{pipage} which allows to trade one fractional component of the solution for another until
one of them becomes integral, without loosing any value. This property is also
referred to in the literature as \emph{cross-convexity} (see, \emph{e.g.},
-\cite{dughmi}). Formally, %this property states that
+\cite{dughmi}).
+}
+Formally, %this property states that
if we define:
\begin{displaymath}
\tilde{F}_\lambda(\varepsilon) \defeq F\big(\lambda + \varepsilon(e_i
@@ -539,6 +556,18 @@ a point $\lambda$, we mean that it satisfies the budget constraint $\sum_{i=1}^n
$\lambda_\varepsilon$ becomes integral.
\end{proof}
+
+Next, we prove the central result of bounding $L$ appropriately in terms of $F$.
+
+\junk{
+To prove Lemma~\ref{lemma:relaxation}, we use the
+\emph{pipage rounding} method of Ageev and Sviridenko~\cite{pipage}, where
+$L$ is first bounded from above by the multi-linear relaxation $F$, which is itself
+subsequently bounded by $V$. While the latter part is general and can be applied
+to the multi-linear extension of any submodular function, the former part is
+specific to our choice for the function $L$. %and is our main techn
+}
+
\begin{lemma}\label{lemma:relaxation-ratio}
% The following inequality holds:
For all $\lambda\in[0,1]^{n},$
@@ -700,7 +729,9 @@ Having bound the ratio between the partial derivatives, we now bound the ratio $
points, for which we know that both relaxations are equal to the value
function $V$. Hence, the ratio is equal to 1 on the vertices.
\end{proof}
- To prove Lemma~\ref{lemma:relaxation}, let us consider a feasible point $\lambda^*\in[0,1]^{n}$ such that $L(\lambda^*)
+
+\paragraph{Proof of Lemma~\ref{lemma:relaxation}}
+Let us consider a feasible point $\lambda^*\in[0,1]^{n}$ such that $L(\lambda^*)
= OPT'$. By applying Lemma~\ref{lemma:relaxation-ratio}
and Lemma~\ref{lemma:rounding} we get a feasible point $\bar{\lambda}$ with at most
one fractional component such that:
diff --git a/paper.tex b/paper.tex
index dfc97d7..def5c72 100644
--- a/paper.tex
+++ b/paper.tex
@@ -14,9 +14,6 @@
\begin{document}
\maketitle
-\begin{abstract}
-\input{abstract}
-\end{abstract}
\section{Introduction}
\input{intro}