diff options
| author | jeanpouget-abadie <jean.pougetabadie@gmail.com> | 2015-02-01 13:43:12 -0500 |
|---|---|---|
| committer | jeanpouget-abadie <jean.pougetabadie@gmail.com> | 2015-02-01 13:43:12 -0500 |
| commit | 0991a13214af4023259465f132f09e6c66f3895c (patch) | |
| tree | 4c3f89721285eb07a004e8efae9375f3dbd4919e | |
| parent | 6446b4bc986be1e4ff4bd1914efc4a2e6d0b7d12 (diff) | |
| download | cascades-0991a13214af4023259465f132f09e6c66f3895c.tar.gz | |
remodeling results section
| -rw-r--r-- | src/algorithms.py | 2 | ||||
| -rw-r--r-- | src/cascade_creation.py | 4 | ||||
| -rw-r--r-- | src/convex_optimization.py | 72 | ||||
| -rw-r--r-- | src/make_plots.py | 6 |
4 files changed, 19 insertions, 65 deletions
diff --git a/src/algorithms.py b/src/algorithms.py index 39bcbb2..5b07f78 100644 --- a/src/algorithms.py +++ b/src/algorithms.py @@ -10,7 +10,7 @@ import timeout def greedy_prediction(G, cascades): """ Returns estimated graph from Greedy algorithm in "Learning Epidemic ..." - Only words for independent cascade model! + Only works for independent cascade model! """ G_hat = cascade_creation.InfluenceGraph(max_proba=None) G_hat.add_nodes_from(G.nodes()) diff --git a/src/cascade_creation.py b/src/cascade_creation.py index 1a71285..1d77bd2 100644 --- a/src/cascade_creation.py +++ b/src/cascade_creation.py @@ -87,7 +87,7 @@ def icc_cascade(G, p_init): active = active & susceptible susceptible = susceptible & np.logical_not(active) if not cascade: - print "Empty cascade, consider changing p_init or n_nodes. Retrying." + print("Empty cascade, consider changing p_init or n_nodes. Retrying.") return icc_cascade(G, p_init) return cascade @@ -156,7 +156,7 @@ def test(): A = generate_cascades(G, .1, 2) M, w = icc_matrixvector_for_node(A, 0) t1 = time.time() - print t1 - t0 + print(t1 - t0) if __name__ == "__main__": test() diff --git a/src/convex_optimization.py b/src/convex_optimization.py index 96dc26f..2661e25 100644 --- a/src/convex_optimization.py +++ b/src/convex_optimization.py @@ -6,63 +6,9 @@ import timeout import cvxopt -@timeout.timeout(20) -def l1obj_l2constraint(M_val, w_val): - """ - Solves: - min - sum_j theta_j - s.t theta_j <= 0 - |e^{M*theta} - (1 - w)|_2 <= 1 - """ - assert len(M_val) == len(w_val) - - if M_val.dtype == bool: - M_val = M_val.astype('float32') - - m, n = M_val.shape - c = cvxopt.matrix(-1.0, (n,1)) - - theta = tensor.row().T - z = tensor.row().T - theta_ = theta.flatten() - z_ = z.flatten() - M = theano.shared(M_val.astype(theano.config.floatX)) - w = theano.shared(w_val.astype(theano.config.floatX)) - y = (tensor.exp(M.dot(theta_)) - (1 - w)).norm(2) - 1 - y_diff = tensor.grad(y, theta_) - y_hess = z[0] * theano.gradient.hessian(y, theta_) - f_x = theano.function([theta], [y, y_diff], allow_input_downcast=True) - f_xz = theano.function([theta, z], [y, y_diff, y_hess], - allow_input_downcast=True) - - def F(x=None, z=None): - if x is None: - return 1, cvxopt.matrix(.0001, (n,1)) - elif z is None: - y, y_diff = f_x(x) - return cvxopt.matrix(float(y), (1, 1)),\ - cvxopt.matrix(y_diff.astype("float64")).T - else: - y, y_diff, y_hess = f_xz(x, z) - return cvxopt.matrix(float(y), (1, 1)), \ - cvxopt.matrix(y_diff.astype("float64")).T, \ - cvxopt.matrix(y_hess.astype("float64")) - - G = cvxopt.spdiag([1 for i in xrange(n)]) - h = cvxopt.matrix(0.0, (n,1)) - - cvxopt.solvers.options['show_progress'] = False - try: - theta = cvxopt.solvers.cpl(c,F, G, h)['x'] - except ArithmeticError: - print "ArithmeticError thrown, change initial point"+\ - " given to the solver" - - return 1 - np.exp(theta), theta - @timeout.timeout(20) -def l1obj_l2penalization(M_val, w_val, lbda): +def sparse_recovery(M_val, w_val, lbda): """ Solves: min - sum_j theta_j + lbda*|e^{M*theta} - (1 - w)|_2 @@ -76,17 +22,25 @@ def l1obj_l2penalization(M_val, w_val, lbda): if type(lbda) == int: lbda = np.array(lbda) - m, n = M_val.shape - theta = tensor.row().T - z = tensor.row().T theta_ = theta.flatten() - z_ = z.flatten() + M = theano.shared(M_val.astype(theano.config.floatX)) w = theano.shared(w_val.astype(theano.config.floatX)) lbda = theano.shared(lbda.astype(theano.config.floatX)) + y = (theta_).norm(1) + lbda * ( tensor.exp(M.dot(theta_)) - (1 - w)).norm(2) + + return diff_and_opt(theta, theta_, M, w, lbda, y) + + +def diff_and_opt(theta, theta_, M, w, lbda, y): + z = tensor.row().T + z_ = z.flatten() + + m, n = M_val.shape + y_diff = tensor.grad(y, theta_) y_hess = z[0] * theano.gradient.hessian(y, theta_) f_x = theano.function([theta], [y, y_diff], allow_input_downcast=True) diff --git a/src/make_plots.py b/src/make_plots.py index dfe9c86..1951576 100644 --- a/src/make_plots.py +++ b/src/make_plots.py @@ -14,7 +14,7 @@ def plot_rip_numberofnodes(max_proba, n_min, n_max, p_init, n_cascades, K_max): y = [] for n_nodes in x: - print n_nodes + print(n_nodes) G = cascade_creation.InfluenceGraph(max_proba=.3) G.erdos_init(n=n_nodes, p=.1) # TODO: handle different inits! cascades = cascade_creation.generate_cascades(G, p_init=p_init, @@ -23,7 +23,7 @@ def plot_rip_numberofnodes(max_proba, n_min, n_max, p_init, n_cascades, K_max): M = cascade_creation.normalize_matrix(M) y.append(rip_condition.find_kth_rip_constants(M, 4)) # - print y + print(y) plt.clf() plt.plot(x, y) @@ -48,7 +48,7 @@ def compare_greedy_and_lagrange_cs284r(): #Lagrange Objective G_hat = algorithms.recovery_l1obj_l2constraint(G, A, - passed_function=convex_optimization.l1obj_l2penalization, + passed_function=convex_optimization.sparse_recovery, floor_cstt=.1, lbda=10) algorithms.correctness_measure(G, G_hat, print_values=True) |
