aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--paper/sections/results.tex9
-rw-r--r--src/make_plots.py2
2 files changed, 10 insertions, 1 deletions
diff --git a/paper/sections/results.tex b/paper/sections/results.tex
index ed6ebe6..31e478c 100644
--- a/paper/sections/results.tex
+++ b/paper/sections/results.tex
@@ -31,10 +31,19 @@ The following theorem is a consequence of Theorem 1 in \cite{Negahban:2009} and
\label{thm:neghaban}
Suppose the true vector $\theta^*$ has support S of size s and the {\bf(RE)} assumption holds for the Hessian $\nabla^2 {\cal L}(\theta)$ with $\gamma > 0$, then for $\delta > 0$ by solving \eqref{eq:pre-mle} with $\lambda \defeq 2\sqrt{\frac{\log m}{n^{1 - \delta} p_{\min}}}$, we have with probability $1-\exp(-n^\delta \log m)$:
\begin{equation}
+\label{eq:sparse}
\|\hat \theta - \theta \|_2 \leq \frac{3}{\gamma} \sqrt{\frac{s \log m}{p_{\min} n^{1-\delta}}}
\end{equation}
\end{theorem}
+\begin{proof}
+Adopting the notation of \cite{Negahban:2009}:
+\begin{lemma}
+Let ${\cal C}({\cal M}, \bar {\cal M}^\perp, \theta^*) \defeq \{ \Delta \in \mathbb{R}^p | {\cal R}(\Delta_{\bar {\cal M}^\perp} \leq 3 {\cal R}(\Delta_{\bar {\cal M}} + 4 {\cal R}(\theta^*_{{\cal M}^\perp}) \}$, where $\cal R$ is a \emph{decomposable} regularizer with respect to $({\cal M}, \bar {\cal M}^\perp)$, and $({\cal M}, \bar {\cal M})$ are two subspaces such that ${\cal M} \subseteq \bar {\cal M}$. Suppose that $\exists \kappa_{\cal L} > 0, \; \exists \tau_{\cal L}, \; \forall \Delta \in {\cal C}, \; {\cal L}(\theta^* + \Delta) - {\cal L}(\theta^*) - \langle \Delta {\cal L}(\theta^*), \Delta \rangle \geq \kappa_{\cal L} \|\Delta\|^2 - \tau_{\cal L}^2(\theta^*)$. Let $\Psi({\cal M}) \defeq \sup_{u \in {\cal M} \backslash \{0\}} \frac{{\cal R}(u)}{\|u\|}$. Finally suppose that $\lambda \geq 2 {\cal R}(\nabla {\cal L}(\theta^*))$, where ${\cal R}^*$ is the conjugate of ${\cal R}$. Then: $$\|\hat \theta_\lambda - \theta^* \|^2 \leq 9 \frac{\lambda^2}{\kappa_{\cal L}}\Psi^2(\bar {\cal M}) + \frac{\lambda}{\kappa_{\cal L}}\{2 \tau^2_{\cal L}(\theta^*) + 4 {\cal R}(\theta^*_{{\cal M}^\perp}\}$$
+\end{lemma}
+In the context of the Graph Inference problem, ${\cal M} \defeq \bar {\cal M} \defeq \{i \, : \, \theta_i \neq 0\}$, i.e. the subspace generated by the set of parents.${\cal R}$ is chosen to be $\ell1$-norm, which is decomposable for $({\cal M}, \bar {\cal M}^\perp)$. It follows that $\Psi(\bar {\cal M}) = \sqrt{s}$ and ${\cal R}^* = \|\|_{\infty}$. From \ref{lem:icc_lambda_upper_bound}, we can choose $\lambda \geq 2 \sqrt{\frac{\log m}{n^{1-\delta}p_{\min}}}$. The (RE) assumption ensures the existence of $\kappa_{\cal L} = \gamma > 0$ and $\tau_{\cal L} = 0$. Finally, we observe ${\cal R}(\theta^*_{{\cal M}^\perp)} = 0$ by definition of ${\cal M}^\perp$, yielding Eq.~\ref{eq:sparse}.
+\end{proof}
+
The proof is included in the Appendix. Note that as $n$ goes to infinity, the hessian $\nabla^2 {\cal L}(\theta)$ goes to the \emph{Fisher Information Matrix} of the cascade model as a function of $\theta$. The various assumptions of theorem~\ref{thm:neghaban} are discussed in section~\ref{sec:assumptions}. The following corollary follows easily and gives the first ${\cal O}(s \log m)$ algorithm for graph reconstruction on general graphs.
\begin{corollary}
diff --git a/src/make_plots.py b/src/make_plots.py
index 947bf57..cb78034 100644
--- a/src/make_plots.py
+++ b/src/make_plots.py
@@ -45,5 +45,5 @@ def watts_strogatz(n_cascades, lbda, passed_function):
if __name__=="__main__":
- watts_strogatz(n_cascades=500, lbda=.002, passed_function=
+ watts_strogatz(n_cascades=2000, lbda=.002, passed_function=
convex_optimization.sparse_recovery) \ No newline at end of file