aboutsummaryrefslogtreecommitdiffstats
path: root/poster_abstract
diff options
context:
space:
mode:
Diffstat (limited to 'poster_abstract')
-rw-r--r--poster_abstract/main.tex19
1 files changed, 13 insertions, 6 deletions
diff --git a/poster_abstract/main.tex b/poster_abstract/main.tex
index 19c9de8..d3ce670 100644
--- a/poster_abstract/main.tex
+++ b/poster_abstract/main.tex
@@ -68,9 +68,9 @@ graph's edges with high probability and ${\cal O}(s\log m)$ measurements where
$s$ is the maximum degree of the graph and $m$ is the number of nodes.
Furthermore, we show that our algorithm also recovers the edge weights (the
parameters of the diffusion process) and is robust in the context of
-approximate sparsity. Finally we prove an almost matching lower bound of
-$\Omega(s\log\frac{m}{s})$ and validate our approach empirically on synthetic
-graphs.
+approximate sparsity. Finally
+%we prove an almost matching lower bound of $\Omega(s\log\frac{m}{s})$ and
+we validate our approach empirically on synthetic graphs.
\end{abstract}
\category{I.2.6}{Artificial Intelligence}{Learning}[Parameter Learning]
@@ -109,8 +109,13 @@ comparing its performance to prior algorithms on synthetic data.
\section{Model}
\begin{figure}
+ \begin{center}
\includegraphics[scale=.4]{../presentation/images/drawing.pdf}
-\caption{Illustration of the sparse recovery framework. $\theta_j$ is the unknown weight vector. $b_j$ is the result of the sparse product $\theta_j \cdot X^t$. We observe bernoulli variables {\cal B}($f(b)$). }
+\caption{Illustration of the sparse recovery framework. $\theta_j$ is the
+unknown weight vector, $b_j$ is the result of the sparse product $\theta_j
+\cdot X^t$. We observe bernoulli variables {\cal B}($f(b)$). }
+\end{center}
+\vspace{-2em}
\end{figure}
@@ -196,7 +201,7 @@ We will need the following assumptions:
\forall X \in {\cal C(S)}, X^T \nabla^2\mathcal{L}_i(\theta_i^*)X
\geq \gamma \|X\|_2^2
\end{equation*}
-for some $\gamma>0$.
+for some $\gamma>0$. $\gamma$ is called the \emph{restricted eigenvalue}.
\end{enumerate}
Adapting a result from \cite{Negahban:2009}, we obtain the following finite-sample guarantee:
@@ -223,7 +228,9 @@ assumption:
a smallest ``restricted'' eigenvalue bounded from below by $\gamma>0$, where $X$ is
the $n\times m$ design matrix whose $k$-th row is $X^k$.
\end{enumerate}
-supposing that either $\Omega(s^2 \log m)$ cumulative time steps are observed or $\Omega(s \log m \log^3 (s \log m))$ distinct instances of the diffusion process.
+provided that either $\Omega(s^2 \log m)$ cumulative time steps are observed or
+$\Omega(s \log m \log^3 (s \log m))$ distinct instances of the diffusion
+process (cascades) are observed.
\section{Experiments}