aboutsummaryrefslogtreecommitdiffstats
path: root/results.tex
diff options
context:
space:
mode:
Diffstat (limited to 'results.tex')
-rw-r--r--results.tex16
1 files changed, 14 insertions, 2 deletions
diff --git a/results.tex b/results.tex
index d46a771..9dd5b46 100644
--- a/results.tex
+++ b/results.tex
@@ -82,6 +82,8 @@ As shown above, $w_i^{N_i} \rightarrow w_i$ as $N_i \rightarrow +\infty$. We can
\mathbb{P}\left(\middle|w_i^{N_i} - w_i \middle| \geq \epsilon w_i \right) \leq 2e^{-2(1-p)N_i\frac{\epsilon^2 w_i^2}{w^2}}
\end{equation}
+\emph{TODO: multiplicative boudns are very bad for zero weights...Need to look at additive bounds for these zeros.}
+
For $N_i$ sufficiently large for each element $i$, we have $\forall i \in \Omega, (1-\epsilon) w_i \leq w_i^{N_i} \leq (1 + \epsilon) w_i$. Under this assumption, if we choose the $k$ elements with largest estimated weight $W_i^{N_i}$, we obtain a $\frac{1-\epsilon}{1+\epsilon}$-approximation to OPT, where OPT is the value of the maximum weight set of $k$ elements for the function $f$. To ensure that $N_i$ is sufficiently large for each element, we note that $\mathbb{E}(N_i) = pN$ and use a Chernoff bound coupled with a classic union bound:
\begin{equation}
@@ -91,17 +93,27 @@ For $N_i$ sufficiently large for each element $i$, we have $\forall i \in \Omega
As such, for $C > 0$ and $N \geq (C+1)\frac{8}{p}\log n$, we have $\forall i \in \Omega, N_i \geq \frac{pN}{2}$ with probability at least $1-\frac{1}{n^C}$
\begin{proposition}
-Assume that $N$ pairs of set-value $(S_j, f(S_j))_{j \in N}$ are observed, where $S_j \sim D_p$ and $p \equiv \frac{k}{n}$. If $N > (C_k +1)^2 \frac{n}{\sigma^2}$, then we can $\epsilon$-learn $f$ and optimize it to a $(1+\epsilon)/(1-\epsilon)$ factor for any cardinality constraint with probability $XXX$.
+Assume that $N$ pairs of set-value $(S_j, f(S_j))_{j \in N}$ are observed, where $S_j \sim D_p$ and $p \equiv \frac{k}{n}$. If $N > XXX$, then we can $\epsilon$-learn $f$ and optimize it to a $(1+\epsilon)/(1-\epsilon)$ factor for any cardinality constraint with probability $XXX$.
\end{proposition}
\section{Passive Optimization of Submodular functions}
\subsection{Inverting the system}
+A result by \cite{balcan2011learning} states that:
+
+\begin{proposition}
+Let ${\cal F}$ be the class of non-negative, monotone, submodular functions over $Omega$. There is an algorithm that PMAC-learns ${\cal F}$ with approximation factor $\sqrt{n+1}$, by finding a function $\tilde f: S\mapsto \sqrt{\frac{1}{(n+1)z}w^T \chi(S)}$ such that, with high probability, XXX
+\end{proposition}
+
+It follows that if we optimize $\tilde f$, which can easily be done by solving for the system $Ax = b^2$ where $(b^2)_i = (b_i)^2$, then we obtain a $(\sqrt{n+1})$-approximation algorithm to optimizing $f$ under cardinality constraint. Once again, we optimize under \emph{any} cardinality constraint.
+
\subsection{Average weight method}
+Same thing can probably be said for the average weight method.
+
\bibliography{optimize}
-\bibliographystyle{plain}
+\bibliographystyle{apalike}
\end{document} \ No newline at end of file