From e41204334108b4ad40246f3d81435a40ff484837 Mon Sep 17 00:00:00 2001 From: jeanpouget-abadie Date: Mon, 18 May 2015 19:19:35 +0200 Subject: added plots to appendix --- src/make_plots.py | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 76 insertions(+), 1 deletion(-) (limited to 'src/make_plots.py') diff --git a/src/make_plots.py b/src/make_plots.py index 8970cdf..4cbef4a 100644 --- a/src/make_plots.py +++ b/src/make_plots.py @@ -3,9 +3,72 @@ import numpy as np import cascade_creation import convex_optimization import algorithms +import timeout import rip_condition +def theoreticalGuarantees(graph_name, n_cascades, min_proba, max_proba, + sparse_edges, p_init, passed_function, *args, **kwargs): + + #creating original graph + G = cascade_creation.InfluenceGraph(max_proba = max_proba, + min_proba = min_proba, + sparse_edges = sparse_edges + ) + G.import_from_file(graph_name) + + #creating dummy graph + G_hat = cascade_creation.InfluenceGraph(max_proba=None) + G_hat.add_nodes_from(G.nodes()) + + #generating cascades + A = cascade_creation.generate_cascades(G, p_init=p_init, + n_cascades = n_cascades) + + #Value of restrictedEigenvalueList + restrictedEigenvalueList = [] + + for node in G.nodes(): + print(node) + try: + M, _ = cascade_creation.icc_matrixvector_for_node(A, node) + M_val = np.delete(M, node, axis=1) + parents = np.nonzero(G.mat[:,node])[0] + restrictedEigenvalueList.append( + convex_optimization.restrictedEigenvalue(M, S=parents, + gramMatrix=True, node=node)) + print(restrictedEigenvalueList[-1]) + except timeout.TimeoutError: + print("TimeoutError, skipping to next node") + + print(restrictedEigenvalueList) + alpha = 1 + theoreticalGuarantee = sum( + np.sqrt(len(parents)*np.log(G.number_of_nodes())/len(M))/( + alpha*gamma) for gamma in restrictedEigenvalueList if gamma >= + 1000) + print(theoreticalGuarantee) + + +def cascades_vs_measurements(graph_name, n_cascades, min_proba, max_proba, + sparse_edges=False, p_init=0.05): + """ + Compares the number of measurements within a cascade for different types of + graphs + """ + G = cascade_creation.InfluenceGraph(max_proba = max_proba, + min_proba = min_proba, + sparse_edges = sparse_edges + ) + G.import_from_file(graph_name) + A = cascade_creation.generate_cascades(G, p_init=p_init, + n_cascades = n_cascades) + + L = [len(cascade) for cascade in A] + print(L) + print("mean: {}\nstandard deviation: {}".format(np.mean(L), np.std(L))) + + def compare_greedy_and_lagrange_cs284r(): """ Compares the performance of the greedy algorithm on the @@ -36,7 +99,8 @@ def compute_graph(graph_name, n_cascades, lbda, passed_function, min_proba, min_proba=min_proba, sparse_edges=sparse_edges) G.import_from_file(graph_name) - A = cascade_creation.generate_cascades(G, p_init=p_init, n_cascades=n_cascades) + A = cascade_creation.generate_cascades(G, p_init=p_init, + n_cascades=n_cascades) if passed_function==algorithms.greedy_prediction: G_hat = algorithms.greedy_prediction(G, A) @@ -232,6 +296,17 @@ def plot_p_init_watts_strogatz(): if __name__=="__main__": if 1: + theoreticalGuarantees(graph_name = + "../datasets/powerlaw_200_30_point3.txt", + n_cascades=100, min_proba=.2, max_proba=.7, + p_init=.05, sparse_edges=False, + passed_function=convex_optimization.sparse_recovery, + lbda=.001) + + if 0: + cascades_vs_measurements("../datasets/watts_strogatz_p_init.txt", + n_cascades=1000, min_proba=.2, max_proba=.7) + if 0: compute_graph("../datasets/watts_strogatz_300_30_point3.txt", n_cascades=300, lbda=.013382, min_proba=.2, max_proba=.7, passed_function= -- cgit v1.2.3-70-g09d2