\documentclass[10pt]{article} \usepackage[utf8x]{inputenc} \usepackage{amsmath, amssymb, amsthm, microtype, graphicx, bbm, times} \usepackage[pagebackref=false,breaklinks=true, colorlinks=true,citecolor=blue,draft]{hyperref} \usepackage[capitalize, noabbrev]{cleveref} \usepackage{subfigure} \input{def} \usepackage[accepted]{icml2015} \DeclareMathOperator*{\argmax}{arg\,max} \DeclareMathOperator*{\argmin}{arg\,min} \DeclareMathOperator{\E}{\mathbb{E}} \let\P\relax \DeclareMathOperator{\P}{\mathbb{P}} \newcommand{\ex}[1]{\E\left[#1\right]} \newcommand{\prob}[1]{\P\left[#1\right]} \newcommand{\inprod}[1]{\left\langle #1 \right\rangle} \newcommand{\R}{\mathbf{R}} \newcommand{\N}{\mathbf{N}} \newcommand{\C}{\mathcal{C}} \newcommand{\eps}{\varepsilon} \newcommand{\bt}{\boldsymbol{\theta}} \newcommand{\bx}{\mathbf{x}} \newcommand{\cl}[1]{\text{\textbf{#1}}} \newcommand{\var}{\text{Var}} \newcommand{\eqdef}{\mathbin{\stackrel{\rm def}{=}}} \newtheorem{theorem}{Theorem} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{corollary}[theorem]{Corollary} \theoremstyle{definition} \newtheorem{definition}[theorem]{Definition} \theoremstyle{remark} \newtheorem*{example}{Example} \newtheorem*{remark}{Remark} \begin{document} \twocolumn[ \icmltitle{Bayesian and Active Learning for Graph Inference} % It is OKAY to include author information, even for blind % submissions: the style file will automatically remove it for you % unless you've provided the [accepted] option to the icml2015 % package. \icmlauthor{Thibaut Horel}{thorel@seas.harvard.edu} \icmladdress{Harvard University} \icmlauthor{Jean Pouget-Abadie}{jeanpougetabadie@g.harvard.edu} \icmladdress{Harvard University} % You may provide any keywords that you % find helpful for describing your paper; these are used to populate % the "keywords" metadata in the PDF but will not be shown in the document \icmlkeywords{Sparse Recovery, Cascade Models, Graph Inference, Networks, Diffusion Processes} \vskip 0.3in ] \begin{abstract} The Network Inference Problem (NIP) is the machine learning challenge of recovering the edges and edge weights of an unknown weighted graph from the observations of a random contagion process propagating over this graph. While previous work has focused on provable convergence guarantees for Maximum-Likelihood Estimation of the edge weights, a Bayesian treatment of the problem is still lacking. In this work, we propose a scalable Bayesian framework for the unified NIP formulation of \cite{pouget}. Furthermore, we show how this Bayesian framework leads to intuitive and effective active learning heuristics which greatly speed up learning. \end{abstract} \section{Introduction} \input{sections/intro.tex} \section{Model} \label{sec:model} \input{sections/model.tex} \section{Bayesian Inference} \label{sec:bayes} \input{sections/bayesian.tex} \section{Active Learning} \label{sec:active} \input{sections/active.tex} \section{Experiments} \input{sections/experiments.tex} \section{Discussion} \input{sections/discussion.tex} \paragraph{Acknowledgments.} The authors would like to thank the Harvard CS281 staff and in particular Professor Finale Doshi-Velez for creating the context in which this project was born. The authors are also grateful to Edoardo Airoldi and Scott Linderman for fruitful discussions. \bibliography{sparse} \bibliographystyle{icml2015} \newpage \vspace*{10em} \newpage \section{Appendix} \label{sec:appendix} \input{sections/appendix.tex} \end{document}