summaryrefslogtreecommitdiffstats
path: root/slides
diff options
context:
space:
mode:
Diffstat (limited to 'slides')
-rw-r--r--slides/slides.tex77
1 files changed, 77 insertions, 0 deletions
diff --git a/slides/slides.tex b/slides/slides.tex
index 11ec280..81f6deb 100644
--- a/slides/slides.tex
+++ b/slides/slides.tex
@@ -256,6 +256,83 @@
\end{displaymath}
\end{theorem}
\end{frame}
+
+
+\begin{frame}{Greedy revisited}
+ \begin{block}{Greedy\only<2>{\alert{'}}($V$, $B$)}
+ \begin{algorithmic}
+ \State $i \gets \argmax_{1\leq j\leq n}\frac{V(j)}{c_j}$
+ \State $S \gets \emptyset$
+ \While{$c_i\leq \only<1>{B - \sum_{j\in S} c_j} \only<2->{\alert{\frac{B}{2}\frac{V(S\cup\{i\})-V(S)}{V(S\cup\{i\})}}}$}
+ \State $S \gets S\cup\{i\}$
+ \State $i \gets \argmax_{1\leq j\leq n}
+ \frac{V(S\cup\{j\})-V(S)}{c_j}$
+ \EndWhile
+ \State \textbf{return} $S$
+ \end{algorithmic}
+ \end{block}
+
+ \vspace{1cm}
+
+ \alert{Problem:} The threshold payments are not budget feasible.
+\end{frame}
+
+\begin{frame}{MaxGreedy revisited}
+ \begin{block}{MaxGreedy}
+ \begin{algorithmic}
+ \State $i^* \gets \argmax_{1\leq j\leq n} V(j)$
+ \State $S \gets$ Greedy\alert{'}($V$, $B$)
+ \If{$V(i^*)\geq \only<1>{V(S)}\only<2,3>{\alert{OPT_{-i^*}}}\only<4>{\alert{OPT'_{i^*}}}$}
+ \State \textbf{return} $\{i^*\}$
+ \Else
+ \State \textbf{return} $S$
+ \EndIf
+ \end{algorithmic}
+ \end{block}
+
+ \vspace{0.5cm}
+
+ \only<1>{\alert{Problem:} MaxGreedy is not monotonic}
+
+ \only<2,3>{\alert{Solution 1:} use $OPT_{-i^*} = \max_{S, i^*\notin S}\{V(S) \mid \sum_{j\in S} c_j\}$. Gives a 8.34 approximation}
+
+ \only<3>{\alert{Problem:} $OPT_{-i^*}$ cannot be computed in polynomial time}
+
+ \only<4->{\alert{Solution 2:} use a concave relaxation:
+ \begin{displaymath}
+ OPT'_{i^*} = \max_{\lambda\in[0, 1]^n, \lambda_{i^*} = 0} L(\lambda)
+ = \max_{\lambda\in[0, 1]^n, \lambda_{i^*} = 0}\log\det\big(I_d + \sum_{i=1}^n \lambda_i x_ix_i^T\big)
+ \end{displaymath}
+ }
+\end{frame}
+
+\begin{frame}{Proof}
+\begin{itemize}
+ \item Monotonicity: OK
+ \pause
+ \item Budget feasibility: OK
+ \pause
+ \item Approximation ratio: what do we lose by using a concave relaxation? Powerful
+ method, \alert{pipage rounding} [Ageev, Sviridenko]
+ \begin{itemize}
+ \item let's introduce $F(\lambda) = \mathbb{E}\Big[\log\det(I_d+\sum_{i=1}^n \mu_ix_ix_i^T)\Big]$, $\mu_i\sim\mathcal{B}(\lambda_i)$
+ \item $L(\lambda) \leq 2 F(\lambda)$ (technical lemma)
+ \item $F$ is well-behaved: $\lambda$ can be round to a binary vector $\bar{\lambda}$ s.t $F(\lambda)\leq F(\bar{\lambda})$
+ \item $OPT'_{-i^*}\leq 2 OPT_{-i^*} + V(i^*)$
+ \item we can conclude by using the approximation ratio for $OPT_{-i^*}$
+ \end{itemize}
+\end{itemize}
+\end{frame}
+
+\begin{frame}{Conclusion}
+ \begin{itemize}
+ \item Experimental design + Auction theory = powerful framework when working with user data
+ \vspace{1cm}
+ \item Can we extend this to learning tasks beyond linear regression?
+ \vspace{1cm}
+ \item $\simeq \alert{13}$ approximation ratio. Lower bound: \alert{$2$}
+ \end{itemize}
+\end{frame}
\end{document}