aboutsummaryrefslogtreecommitdiffstats
path: root/poster
diff options
context:
space:
mode:
authorThibaut Horel <thibaut.horel@gmail.com>2015-12-03 18:44:11 -0500
committerThibaut Horel <thibaut.horel@gmail.com>2015-12-03 18:44:11 -0500
commitb2e37a91f5ae65a003bb528812a79d6800908de6 (patch)
tree6501f4bf41f819f214d76157b766595ea561834b /poster
parent00d875c935bf9fc05ee20bc547ad91be9f18f0fa (diff)
downloadcascades-b2e37a91f5ae65a003bb528812a79d6800908de6.tar.gz
Polishing the first 1.5 column
Diffstat (limited to 'poster')
-rw-r--r--poster/Finale_poster/poster.tex32
1 files changed, 16 insertions, 16 deletions
diff --git a/poster/Finale_poster/poster.tex b/poster/Finale_poster/poster.tex
index 8f1bd1d..1365801 100644
--- a/poster/Finale_poster/poster.tex
+++ b/poster/Finale_poster/poster.tex
@@ -70,18 +70,18 @@
\begin{block}{MLE}
\begin{itemize}
- \item For node $i$, $y^t = x^{t+1}_i$
- \item For node $i$, $\{(x^t, y^t)\}$ are drawn from a GLM
- \item SGD on $\{\theta_{ij}\}$
- \vspace{1cm}
+ \item Define for node $j$, $y^t = x^{t+1}_j$
+ \item Observations $\{(x^t, y^t)\}$ are drawn from a GLM. MLE problem:
\begin{equation*}
\begin{split}
- \hat{\theta}\in \arg\max_\theta \sum_{t}~& y^t\log f(\theta\cdot x^t)
- \\ & + (1-y^t) \log \big(1 - f(\theta\cdot x^t)\big)
+ \hat{\theta}\in \arg\max_\theta \sum_{t}~& y^t\log f(\Theta_j\cdot x^t)
+ \\ & + (1-y^t) \log \big(1 - f(\Theta_j\cdot x^t)\big)
\end{split}
\end{equation*}
-\item Log-likelihood is concave for common contagion models (IC model)
-\item Prior work~\cite{} finds convergence guarantees for $L1$-regularization
+ Can be solved efficiently by SGD on $\Theta$.
+ \vspace{1cm}
+\item log-likelihood is concave for common contagion models (\emph{e.g} IC
+ model) $\Rightarrow$ provable convergence guarantees (\cite{}).
\end{itemize}
\end{block}
\end{column} % End of the first column
@@ -98,21 +98,20 @@
\end{figure}
{\bf Advantages:}
\begin{itemize}
- \item encode expressive graph priors (e.g. ERGMs)
- \item quantify uncertainty over each edge
+ \item prior encodes domain-specific knowledge of graph structure (e.g. ERGMs)
+ \item posterior expresses uncertainty over edge weights
\end{itemize}
{\bf Disadvantages:}
\begin{itemize}
- \item Hard to scale in large data volume regime
+ \item hard to scale when large number of observations.
\end{itemize}
\end{block}
\begin{block}{Active Learning}
- \emph{Can we gain by choosing the source node? If so, how to best choose the
- source node?}
- \begin{center}--OR--\end{center}
- \emph{Can we choose the parts of the dataset we compute next?}
-\end{block}
+ \emph{Can we learn faster by choosing the source node? If so, how to best
+ choose it?}
+ \begin{center}--~OR~--\end{center}
+ \emph{Can we cherry-pick the most relevant part of the dataset?}
{\bf Idea:} Focus on parts of the graph which are unexplored (high uncertainty).
i.e.~maximize information gain per cascade
@@ -132,6 +131,7 @@ Principled heuristic:
\item Exact strategy requires knowing true distribution of $(X_t)$
\item Use estimated $\Theta$ to compute $H(\Theta | (X_t), X_0 = i)$
\end{itemize}
+\end{block}
\end{column}
%-----------------------------------------------------------------------------
\begin{column}{\sepwid}\end{column}