aboutsummaryrefslogtreecommitdiffstats
path: root/finale/final_report.tex
blob: bac2d9ac35153626204ed8d7c1505799b5023aa3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
\documentclass[10pt]{article}
\usepackage[utf8x]{inputenc}
\usepackage{amsmath, amssymb, amsthm, microtype, graphicx, bbm, times}
\usepackage[pagebackref=false,breaklinks=true,
            colorlinks=true,citecolor=blue,draft]{hyperref}
\usepackage[capitalize, noabbrev]{cleveref}
\usepackage{subfigure}
\input{def}
\usepackage[accepted]{icml2015}
\DeclareMathOperator*{\argmax}{arg\,max}
\DeclareMathOperator*{\argmin}{arg\,min}
\DeclareMathOperator{\E}{\mathbb{E}}
\let\P\relax
\DeclareMathOperator{\P}{\mathbb{P}}
\newcommand{\ex}[1]{\E\left[#1\right]}
\newcommand{\prob}[1]{\P\left[#1\right]}
\newcommand{\inprod}[1]{\left\langle #1 \right\rangle}
\newcommand{\R}{\mathbf{R}}
\newcommand{\N}{\mathbf{N}}
\newcommand{\C}{\mathcal{C}}
\newcommand{\eps}{\varepsilon}
\newcommand{\bt}{\boldsymbol{\theta}}
\newcommand{\bx}{\mathbf{x}}
\newcommand{\cl}[1]{\text{\textbf{#1}}}
\newcommand{\var}{\text{Var}}
\newcommand{\eqdef}{\mathbin{\stackrel{\rm def}{=}}}

\newtheorem{theorem}{Theorem}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{corollary}[theorem]{Corollary}
\theoremstyle{definition}
\newtheorem{definition}[theorem]{Definition}
\theoremstyle{remark}
\newtheorem*{example}{Example}
\newtheorem*{remark}{Remark}

\begin{document}

\twocolumn[
\icmltitle{Bayesian and Active Learning for Graph Inference}

% It is OKAY to include author information, even for blind
% submissions: the style file will automatically remove it for you
% unless you've provided the [accepted] option to the icml2015
% package.
\icmlauthor{Thibaut Horel}{thorel@seas.harvard.edu}
\icmladdress{Harvard University}
\icmlauthor{Jean Pouget-Abadie}{jeanpougetabadie@g.harvard.edu}
\icmladdress{Harvard University}

% You may provide any keywords that you
% find helpful for describing your paper; these are used to populate
% the "keywords" metadata in the PDF but will not be shown in the document
\icmlkeywords{Sparse Recovery, Cascade Models, Graph Inference, Networks,
Diffusion Processes}

\vskip 0.3in
]

\begin{abstract}
    The Network Inference Problem (NIP) is the machine learning challenge of
    recovering the edges and edge weights of an unknown weighted graph from the
    observations of a random contagion process propagating over this graph.
    While previous work has focused on provable convergence guarantees for
    Maximum-Likelihood Estimation of the edge weights, a Bayesian treatment of
    the problem is still lacking. In this work, we propose a scalable Bayesian
    framework for the unified NIP formulation of \cite{pouget}. Furthermore, we
    show how this Bayesian framework leads to intuitive and effective active
    learning heuristics which greatly speed up learning.
\end{abstract}

\section{Introduction}
\input{sections/intro.tex}

\section{Model}
\label{sec:model}
\input{sections/model.tex}

\section{Bayesian Inference}
\label{sec:bayes}
\input{sections/bayesian.tex}

\section{Active Learning}
\label{sec:active}
\input{sections/active.tex}

\section{Experiments}
\input{sections/experiments.tex}

\section{Discussion}
\input{sections/discussion.tex}

\paragraph{Acknowledgments.} The authors would like to thank the Harvard CS281
staff and in particular Professor Finale Doshi-Velez for creating the context
in which this project was born. The authors are also grateful to Edoardo
Airoldi and Scott Linderman for fruitful discussions.

\bibliography{sparse}
\bibliographystyle{icml2015}
\newpage

\vspace*{10em}

\newpage


\section{Appendix}
\label{sec:appendix}
\input{sections/appendix.tex}

\end{document}