diff options
| author | jeanpouget-abadie <jean.pougetabadie@gmail.com> | 2015-12-03 23:14:24 -0500 |
|---|---|---|
| committer | jeanpouget-abadie <jean.pougetabadie@gmail.com> | 2015-12-03 23:14:24 -0500 |
| commit | 3bcc519d2fcfdad6c57d50da13e3227fa5f0c13a (patch) | |
| tree | 61712089678fe59c3cbb6e4438ae6508e9b2b81c /poster/Finale_poster/poster.tex | |
| parent | 4375265f87f9a3188982ed15424a109797ea4f51 (diff) | |
| download | cascades-3bcc519d2fcfdad6c57d50da13e3227fa5f0c13a.tar.gz | |
adding fig to poster + small typo
Diffstat (limited to 'poster/Finale_poster/poster.tex')
| -rw-r--r-- | poster/Finale_poster/poster.tex | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/poster/Finale_poster/poster.tex b/poster/Finale_poster/poster.tex index ea94df7..5ec04e4 100644 --- a/poster/Finale_poster/poster.tex +++ b/poster/Finale_poster/poster.tex @@ -72,7 +72,7 @@ \begin{block}{MLE} \begin{itemize} \item Define for node $j$, $y^t = x^{t+1}_j$ - \item Observations $\{(x^t, y^t)\}$ are drawn from a GLM. MLE problem: + \item Observations $\{(x^t, y^t)\}$ are drawn from a GLM.~MLE problem: \begin{equation*} \begin{split} \hat{\theta}\in \arg\max_\theta \sum_{t}~& y^t\log f(\Theta_j\cdot x^t) @@ -122,14 +122,14 @@ i.e.~maximize information gain per observation. \vspace{0.5em} \textbf{Baseline heuristic:} \begin{itemize} - \item Choose source w.p. proportional to estimated out-degree $\implies$ wider + \item Choose source w.p.~proportional to estimated out-degree $\implies$ wider cascades $\implies$ more data \end{itemize} \vspace{0.5em} \textbf{Principled heuristic:} \begin{itemize} - \item Choose source w.p. proportional to mutual information + \item Choose source w.p.~proportional to mutual information \begin{multline*} p(i) \propto I\big(\{X_t\}_{t\geq 1} ,\Theta | X^0 = \{i\}\big)\\ = H(\Theta) - H\big(\Theta | \{X_t\}_{t\geq 1}, X_0 = \{i\}\big) @@ -163,13 +163,13 @@ i.e.~maximize information gain per observation. \textbf{Justification} for any $f$: \begin{displaymath} - I(X, Y) \geq I\big(f(X)\big) + I(X, Y) \geq I\big(f(X), Y\big) \end{displaymath} in our case: $f$ truncates the cascade at $t=1$. (wip: obtain closed-form formula in this case). \item variance is a lower-bound on mutual information $\implies$ pick - node $i$ w.p. proportional to $\sum_j \text{Var}(\Theta_{i,j})$. + node $i$ w.p.~proportional to $\sum_j \text{Var}(\Theta_{i,j})$. \end{itemize} @@ -177,7 +177,7 @@ i.e.~maximize information gain per observation. \begin{block}{Results} \begin{center} - \includegraphics[scale=1.5]{fig.png} + \includegraphics[scale=1.5]{../../simulation/plots/finale.png} \end{center} \end{block} |
