From 86208a1bd7e6a7735340b318be4b40ced8448f1a Mon Sep 17 00:00:00 2001 From: jeanpouget-abadie Date: Thu, 5 Feb 2015 12:42:12 -0500 Subject: adding four figures --- paper/sections/experiments.tex | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'paper/sections/experiments.tex') diff --git a/paper/sections/experiments.tex b/paper/sections/experiments.tex index 600cac3..ccd82ce 100644 --- a/paper/sections/experiments.tex +++ b/paper/sections/experiments.tex @@ -3,15 +3,18 @@ \caption{Precision-Recall curve Holme-Kim Model. 200 nodes, 16200 edges.} \end{figure} -\begin{figure} -\includegraphics[scale=.4]{figures/watts_strogatz.pdf} -\caption{Watts-Strogatz Model. 200 nodes, 20000 edges.} -\end{figure} - -\begin{figure} -\includegraphics[scale=.4]{figures/barabasi_albert.pdf} -\caption{Barabasi Model.} -\end{figure} +\begin{table*}[t] +\centering +\begin{tabular}{c c c c} + +\includegraphics[scale=.21]{figures/barabasi_albert.pdf} +& \includegraphics[scale=.21]{figures/watts_strogatz.pdf} +& \includegraphics[scale=.23]{figures/kronecker_l2_norm.pdf} +& \includegraphics[scale=.23]{figures/kronecker_l2_norm_nonsparse.pdf}\\ +(a) & (b) & (c) & (d) +\end{tabular} +\captionof{figure}{blabla} +\end{table*} In this section, we validate empirically the results and assumptions of Section~\ref{sec:results} for different initializations of parameters ($n$, $m$, $\lambda$) and for varying levels of sparsity. We compare our algorithm to two different state-of-the-art algorithms: \textsc{greedy} and \textsc{mle} from \cite{Netrapalli:2012}. As an extra benchmark, we also introduce a new algorithm \textsc{lasso}, which approximates our \textsc{sparse mle} algorithm. We find empirically that \textsc{lasso} is highly robust, and can be computed more efficiently than both \textsc{mle} and \textsc{sparse mle} without sacrificing for performance. -- cgit v1.2.3-70-g09d2