import matplotlib.pyplot as plt import numpy as np import cascade_creation import convex_optimization import algorithms import rip_condition def plot_rip_numberofnodes(max_proba, n_min, n_max, p_init, n_cascades, K_max): """ Plots the RIP constant for varying number of nodes (n_max included) """ x = np.arange(n_min, n_max+1) y = [] for n_nodes in x: print n_nodes G = cascade_creation.InfluenceGraph(max_proba=.3) G.erdos_init(n=n_nodes, p=.1) # TODO: handle different inits! cascades = cascade_creation.generate_cascades(G, p_init=p_init, n_cascades=n_cascades) M, __ = cascade_creation.icc_matrixvector_for_node(cascades, None) M = cascade_creation.normalize_matrix(M) y.append(rip_condition.find_kth_rip_constants(M, 4)) # print y plt.clf() plt.plot(x, y) #plt.show() return x, y def compare_greedy_and_lagrange_cs284r(): """ Compares the performance of the greedy algorithm on the lagrangian sparse recovery objective on the Facebook dataset for the CS284r project """ G = cascade_creation.InfluenceGraph(max_proba = .8) G.import_from_file("../datasets/subset_facebook_SNAPnormalize.txt") A = cascade_creation.generate_cascades(G, p_init=.05, n_cascades=2000) #Greedy G_hat = algorithms.greedy_prediction(G, A) algorithms.correctness_measure(G, G_hat, print_values=True) #Lagrange Objective G_hat = algorithms.recovery_l1obj_l2constraint(G, A, passed_function=convex_optimization.l1obj_l2penalization, floor_cstt=.1, lbda=10) algorithms.correctness_measure(G, G_hat, print_values=True) def test(): """ unit test """ if 0: plot_rip_numberofnodes(max_proba=.3, n_min=30, n_max=30, p_init=.01, n_cascades=100, K_max=4) if 1: compare_greedy_and_lagrange_cs284r() if __name__=="__main__": test()