summaryrefslogtreecommitdiffstats
path: root/general.tex
diff options
context:
space:
mode:
authorStratis Ioannidis <stratis@stratis-Latitude-E6320.(none)>2012-11-05 09:13:20 -0800
committerStratis Ioannidis <stratis@stratis-Latitude-E6320.(none)>2012-11-05 09:13:20 -0800
commitd4d9933432e1f15f9839d0e0ca14ff0f8656b814 (patch)
tree37c7d4b7bf0d1febed847a5ff26eaa3aa4fd4778 /general.tex
parentc94a36daa447f4c7821fad729b80622f59083625 (diff)
parentb3c19ae2cbbdeec29f1c383b319119b0672e14f8 (diff)
downloadrecommendation-d4d9933432e1f15f9839d0e0ca14ff0f8656b814.tar.gz
Merge branch 'master' of ssh://palosgit01/git/data_value
Diffstat (limited to 'general.tex')
-rw-r--r--general.tex6
1 files changed, 3 insertions, 3 deletions
diff --git a/general.tex b/general.tex
index 46337d6..3815c83 100644
--- a/general.tex
+++ b/general.tex
@@ -45,7 +45,7 @@ analysis of the approximation ratio, we get the following result which extends
Theorem~\ref{thm:main}:
\begin{theorem}
- There exists a truthful and budget feasible mechanism for the objective
+ There exists a truthful, individually rational and budget feasible mechanism for the objective
function $\tilde{V}$ given by \eqref{eq:normalized}. Furthermore, for any $\varepsilon
> 0$, in time $O(\text{poly}(|\mathcal{N}|, d, \log\log \varepsilon^{-1}))$,
the algorithm computes a set $S^*$ such that:
@@ -64,8 +64,8 @@ Selecting experiments that maximize the information gain in the Bayesian setup l
where $h\in \mathcal{H}$ for some subset $\mathcal{H}$ of all possible mappings $h:\Omega\to\reals$, called the \emph{hypothesis space}, and $\varepsilon_i$ are random variables in $\reals$, not necessarily identically distributed, that are independent \emph{conditioned on $h$}. This model is quite broad, and captures many learning tasks, such as:
\begin{enumerate}
\item\textbf{Generalized Linear Regression.} $\Omega=\reals^d$, $\mathcal{H}$ is the set of linear maps $\{h(x) = \T{\beta}x \text{ s.t. } \beta\in \reals^d\}$, and $\varepsilon_i$ are independent zero-mean normal variables, where $\expt{\varepsilon_i^2}=\sigma_i$.
-\item\textbf{Logistic Regression.} $\Omega=\reals^d$, $\mathcal{H}$ is the set of maps $\{h(x) = \frac{e^{\T{\beta} x}}{1+e^{\T{\beta} x}} \text{ s.t. } \beta\in\reals^d\}$, and $\varepsilon_i$ are independent conditioned on $h$ such that $$\varepsilon_i=\begin{cases} 1- h(x_i),& \text{w.~prob.}~h(x_i)\\-h(x_i),&\text{w.~prob.}~1-h(x_i)\end{cases}$$
-\item\textbf{Learning Binary Functions with Bernoulli Noise.} $\Omega = \{0,1\}^d$, and $\mathcal{H}$ is some subset of $2^{\Omega\times\{0,1\}}$, and $$\varepsilon_i =\begin{cases}0, &\text{w.~prob.}~p\\\bar{h}(x_i)-h(x_i), \text{w.~prob.}~1-p\end{cases}$$
+\item\textbf{Logistic Regression.} $\Omega=\reals^d$, $\mathcal{H}$ is the set of maps $\{h(x) = \frac{e^{\T{\beta} x}}{1+e^{\T{\beta} x}} \text{ s.t. } \beta\in\reals^d\}$, and $\varepsilon_i$ are independent conditioned on $h$ such that $$\varepsilon_i=\begin{cases} 1- h(x_i),& \text{w.~prob.}\;h(x_i)\\-h(x_i),&\text{w.~prob.}\;1-h(x_i)\end{cases}$$
+\item\textbf{Learning Binary Functions with Bernoulli Noise.} $\Omega = \{0,1\}^d$, and $\mathcal{H}$ is some subset of $2^{\Omega}$, and $$\varepsilon_i =\begin{cases}0, &\text{w.~prob.}\;p\\\bar{h}(x_i)-h(x_i), &\text{w.~prob.}\;1-p\end{cases}$$
\end{enumerate}
In this setup, assume that the experimenter has a prior distribution on the hypothesis $h\in \mathcal{H}$. Then, the information gain objective can be written again as the mutual information between $\beta$ and $y_S$.