diff options
| -rw-r--r-- | paper/sections/experiments.tex | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/paper/sections/experiments.tex b/paper/sections/experiments.tex index ccd82ce..e2cc2e1 100644 --- a/paper/sections/experiments.tex +++ b/paper/sections/experiments.tex @@ -11,9 +11,9 @@ & \includegraphics[scale=.21]{figures/watts_strogatz.pdf} & \includegraphics[scale=.23]{figures/kronecker_l2_norm.pdf} & \includegraphics[scale=.23]{figures/kronecker_l2_norm_nonsparse.pdf}\\ -(a) & (b) & (c) & (d) +(a) Barabasi-Albert & (b) Watts-Strogatz & (c) sparse Kronecker & (d) non-sparse Kronecker \end{tabular} -\captionof{figure}{blabla} +\captionof{figure}{Figures (a) and (b) report the $f1$-score in $\log$ scale for 2 graphs: (a) Barabasi-Albert graph, $300$ nodes, $16200$ edges. (b) Watts-Strogatz graph, $300$ nodes, $4500$ edges. Figures (c) and (d) report the $\ell2$-norm $\|\hat \Theta - \Theta\|_2$ in the exactly sparse case and the approximately sparse case for a Kronecker graph which is: (c) exactly sparse (d) non-exactly spasre} \end{table*} In this section, we validate empirically the results and assumptions of Section~\ref{sec:results} for different initializations of parameters ($n$, $m$, $\lambda$) and for varying levels of sparsity. We compare our algorithm to two different state-of-the-art algorithms: \textsc{greedy} and \textsc{mle} from \cite{Netrapalli:2012}. As an extra benchmark, we also introduce a new algorithm \textsc{lasso}, which approximates our \textsc{sparse mle} algorithm. We find empirically that \textsc{lasso} is highly robust, and can be computed more efficiently than both \textsc{mle} and \textsc{sparse mle} without sacrificing for performance. |
