summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xapproximation.tex4
-rwxr-xr-xproblem.tex22
2 files changed, 13 insertions, 13 deletions
diff --git a/approximation.tex b/approximation.tex
index cc278e1..1b94615 100755
--- a/approximation.tex
+++ b/approximation.tex
@@ -47,7 +47,7 @@ expectation of $V$ under the distribution $P_\mathcal{N}^\lambda$:
F(\lambda)
\defeq \mathbb{E}_{S\sim P_\mathcal{N}^\lambda}\big[V(S)\big]
% = \sum_{S\subseteq\mathcal{N}} P_\mathcal{N}^\lambda(S) V(S)
-= \mathbb{E}_{S\sim P_\mathcal{N}^\lambda}\left[ \log\det\left( I_d + \sum_{i\in S} x_i\T{x_i}\right) \right],\quad \lambda\in[0,1]^n.
+= \mathbb{E}_{S\sim P_\mathcal{N}^\lambda}\big[ \log\det\big( I_d + \textstyle\sum_{i\in S} x_i\T{x_i}\big) \big],\quad \lambda\in[0,1]^n.
\end{equation}
Function $F$ is an extension of $V$ to the domain $[0,1]^n$, as it equals $V$ on integer inputs: $F(\id_S) = V(S)$ for all
$S\subseteq\mathcal{N}$, where $\id_S$ denotes the indicator vector of $S$. %\citeN{pipage} have shown how to use this extension to obtain approximation guarantees for an interesting class of optimization problems through the \emph{pipage rounding} framework, which has been successfully applied in \citeN{chen, singer-influence}.
@@ -56,7 +56,7 @@ multi-linear extension \eqref{eq:multi-linear} cannot be optimized in
polynomial time for the value function $V$ we study here, given by \eqref{modified}. Hence, we introduce an extension $L:[0,1]^n\to\reals$ s.t.~
\begin{equation}\label{eq:our-relaxation}
L(\lambda) \defeq
-\log\det\left(I_d + \sum_{i\in\mathcal{N}} \lambda_i x_i\T{x_i}\right),
+\log\det\big(I_d + \textstyle\sum_{i\in\mathcal{N}} \lambda_i x_i\T{x_i}\big),
%= \log\det\left(\mathbb{E}_{S\sim P_\mathcal{N}^\lambda}\bigg[I_d + \sum_{i\in S} x_i\T{x_i} \bigg]\right),
\quad \lambda\in[0,1]^n.
\end{equation}
diff --git a/problem.tex b/problem.tex
index 798e69a..0e9c75f 100755
--- a/problem.tex
+++ b/problem.tex
@@ -35,8 +35,8 @@ maximum a posteriori estimation leads to the following maximization
\cite{hastie}:
\begin{align}
\begin{split}
- \hat{\beta} = \argmax_{\beta\in\reals^d} \prob(\beta\mid y_S)
- &=\argmin_{\beta\in\reals^d} \big(\sum_{i\in S} (y_i - \T{\beta}x_i)^2
+ \hat{\beta} = \textstyle\argmax_{\beta\in\reals^d} \prob(\beta\mid y_S)
+ &=\textstyle\argmin_{\beta\in\reals^d} \big(\textstyle\sum_{i\in S} (y_i - \T{\beta}x_i)^2
+ \T{\beta}R\beta\big)\\
& = (R+\T{X_S}X_S)^{-1}X_S^Ty_S \label{ridge}
\end{split}
@@ -64,7 +64,7 @@ Under the linear model, and the Gaussian prior, the information gain takes the f
\begin{align}
I(\beta;y_S)&= \frac{1}{2}\log\det(R+ \T{X_S}X_S) - \frac{1}{2}\log\det R\label{dcrit} %\\
\end{align}
-Maximizing $I(\beta;y_S)$ is therefore equivalent to maximizing $\log\det(R+ \T{X_S}X_S)$, which is known in the experimental design literature as the Bayes
+Maximizing $I(\beta;y_S)$ is therefore equivalent to maximizing $\log\det(R+ \T{X_S}X_S)$, which is known in literature as the Bayes
$D$-optimality criterion
\cite{pukelsheim2006optimal,atkinson2007optimum,chaloner1995bayesian}.
@@ -104,18 +104,18 @@ The cost $c_i$ can capture, \emph{e.g.}, the amount the subject $i$ deems suffic
\medskip\\\hspace*{\stretch{1}}\textsc{ExperimentalDesignProblem} (\EDP)\hspace*{\stretch{1}}
\begin{subequations}
\begin{align}
-\text{Maximize}\quad V(S) &= \log\det(I_d+\T{X_S}X_S) \label{modified} \\
-\text{subject to}\quad \sum_{i\in S} c_i&\leq B\label{lincon}
+\text{Maximize}\quad &V(S) = \log\det(I_d+\T{X_S}X_S) \label{modified} \\
+\text{subject to}\quad &\textstyle\sum_{i\in S} c_i\leq B\label{lincon}
\end{align}\label{edp}
\end{subequations}
\emph{W.l.o.g.}, we assume that $c_i\in [0,B]$ for all $i\in \mathcal{N}$, as no $i$ with $c_i>B$ can be in an $S$ satisfying \eqref{lincon}. Denote by
\begin{equation}\label{eq:non-strategic}
- OPT = \max_{S\subseteq\mathcal{N}} \Big\{V(S) \;\Big| \;
- \sum_{i\in S}c_i\leq B\Big\}
+ OPT = \textstyle\max_{S\subseteq\mathcal{N}} \big\{V(S) \;\Big| \;
+ \textstyle \sum_{i\in S}c_i\leq B\big\}
\end{equation}
the optimal value achievable in the full-information case.
\EDP{}, as defined above, is NP-hard; to see this, note that \textsc{Knapsack}
-reduces to EDP with dimension $d=1$ by mapping the weight of each item, say,
+reduces to EDP with $d=1$ by mapping the weight of each item, say,
$w_i$, to an experiment with $x_i^2=w_i$.% Moreover, \eqref{modified} is submodular, monotone and satisfies $V(\emptyset) = 0$.
The value function \eqref{modified} has the following properties, which are proved in Appendix~\ref{app:properties}. First, it is non-negative, \emph{i.e.}, $V(S)\geq 0$
@@ -149,13 +149,13 @@ yields an approximation ratio of $\frac{5 e }{e-1}~$\cite{singer-mechanisms}; t
\subsection{Budget-Feasible Experimental Design: Strategic Case}
-We study the following \emph{strategic} setting, in which the costs $c_i$ are {\em not} common knowledge and their reporting can be manipulated by the experiment subjects. The latter are strategic and wish to maximize their utility, which is the difference of the payment they receive and their true cost. We note that, though subjects may misreport $c_i$, they cannot lie about $x_i$ (\emph{i.e.}, all public features are verifiable prior to the experiment) nor $y_i$ (\emph{i.e.}, the subject cannot falsify her measurement).
-In this setting, experimental design reduces to a \emph{budget feasible reverse auction}, as introduced by \citeN{singer-mechanisms}; we review the formal definition in Appendix~\ref{app:budgetfeasible}. In short, given a budget $B$ and a value function $V:2^{\mathcal{N}}\to\reals_+$, a \emph{reverse auction mechanism} $\mathcal{M} = (S,p)$ comprises (a) an \emph{allocation function} $S:\reals_+^n \to 2^\mathcal{N}$, determining the set
+We study the following \emph{strategic} setting, in which the costs $c_i$ are {\em not} common knowledge and their reporting can be manipulated by the experiment subjects. The latter are strategic and wish to maximize their utility, which is the difference of the payment they receive and their true cost. Note that, though subjects may misreport $c_i$, they cannot lie about $x_i$ (\emph{i.e.}, all public features are verifiable prior to the experiment) nor $y_i$ (\emph{i.e.}, the subject cannot falsify her measurement).
+Experimental design thus reduces to a \emph{budget feasible reverse auction}, as introduced by \citeN{singer-mechanisms}; we review the formal definition in Appendix~\ref{app:budgetfeasible}. In short, given a budget $B$ and a value function $V:2^{\mathcal{N}}\to\reals_+$, a \emph{reverse auction mechanism} $\mathcal{M} = (S,p)$ comprises (a) an \emph{allocation function} $S:\reals_+^n \to 2^\mathcal{N}$, determining the set
of experiments to be purchased, and (b) a \emph{payment function}
$p:\reals_+^n\to \reals_+^n$, determining the payments $[p_i(c)]_{i\in \mathcal{N}}$ received by experiment subjects.
We seek mechanisms that are \emph{normalized} (unallocated experiments receive zero payments), \emph{individually rational} (payments for allocated experiments exceed costs), have \emph{no positive transfers} (payments are non-negative), and are \emph{budget feasible} (the sum of payments does not exceed the budget $B$).
-We relax the notion of truthfulness to \emph{$\delta$-truthfulness}, requiring that reporting one's true cost is an \emph{almost-dominant} strategy: no subject increases their utility by reporting a cost that differs more than $\delta>0$ from their true cost. Under this definition, a mechanism is truthful if $\delta=0$.
+We relax the notion of truthfulness to \emph{$\delta$-truthfulness}, requiring that reporting one's true cost is an \emph{almost-dominant} strategy: no subject increases their utility by reporting a cost that differs more than $\delta>0$ (\emph{e.g.}, a tenth of a cent) from their true cost. Under this definition, a mechanism is truthful if $\delta=0$.
In addition, we would like the allocation $S(c)$ to be of maximal value; however, $\delta$-truthfulness, as well as the hardness of \EDP{}, preclude achieving this goal. Hence, we seek mechanisms with that are \emph{$(\alpha,\beta)$-approximate}, \emph{i.e.}, there exist $\alpha\geq 1$ and $\beta>0$ s.t.~$OPT \leq \alpha V(S(c))+\beta$, and are \emph{computationally efficient}, in that $S$ and $p$ can be computed in polynomial time.
We note that the constant approximation algorithm \eqref{eq:max-algorithm} breaks