diff options
Diffstat (limited to 'jpa_test')
| -rw-r--r-- | jpa_test/algorithms.py | 14 | ||||
| -rw-r--r-- | jpa_test/convex_optimization.py | 65 |
2 files changed, 23 insertions, 56 deletions
diff --git a/jpa_test/algorithms.py b/jpa_test/algorithms.py index be9caba..2ed015a 100644 --- a/jpa_test/algorithms.py +++ b/jpa_test/algorithms.py @@ -9,8 +9,8 @@ import convex_optimization def greedy_prediction(G, cascades): """ Returns estimated graph from Greedy algorithm in "Learning Epidemic ..." - TODO: write cleaner code? """ + #TODO: This function is deprecated! G_hat = cascade_creation.InfluenceGraph(max_proba=None) G_hat.add_nodes_from(G.nodes()) for node in G_hat.nodes(): @@ -44,9 +44,7 @@ def recovery_l1obj_l2constraint(G, cascades): G_hat.add_nodes_from(G.nodes()) for node in G_hat.nodes(): M, w = cascade_creation.icc_matrixvector_for_node(cascades, node) - M = M.astype("float64") - edges_node = convex_optimization.l1obj_l2constraint(M,w) - print edges_node + p_node, __ = convex_optimization.l1obj_l2constraint(M,w) def correctness_measure(G, G_hat): @@ -73,13 +71,5 @@ def test(): recovery_l1obj_l2constraint(G, A) - G_hat = greedy_prediction(G, A) - fp, fn, gp = correctness_measure(G, G_hat) - print "False Positive: {}".format(len(fp)) - print "False Negative: {}".format(len(fn)) - print "Good Positives: {}".format(len(gp)) - t1 = time.time() - print t1 - t0 - if __name__=="__main__": test() diff --git a/jpa_test/convex_optimization.py b/jpa_test/convex_optimization.py index dd93b05..ef54892 100644 --- a/jpa_test/convex_optimization.py +++ b/jpa_test/convex_optimization.py @@ -1,4 +1,5 @@ import theano +import cascade_creation from theano import tensor, function import numpy as np import cvxopt @@ -12,47 +13,11 @@ def l1obj_l2constraint(M_val, w_val): s.t theta_j <= 0 |e^{M*theta} - (1 - w)|_2 <= 1 """ - m, n = M_val.shape - c = cvxopt.matrix(-1.0, (n,1)) + assert len(M_val) == len(w_val) - theta = tensor.row().T - z = tensor.row().T - theta_ = theta.flatten() - z_ = z.flatten() - M = theano.shared(M_val.astype(theano.config.floatX)) - w = theano.shared(w_val.astype(theano.config.floatX)) - y = (tensor.exp(M.dot(theta_)) - (1 - w)).norm(2) - 1 - y_diff = tensor.grad(y, theta_) - y_hess = z[0] * theano.gradient.hessian(y, theta_) - f_x = theano.function([theta], [y, y_diff], allow_input_downcast=True) - f_xz = theano.function([theta, z], [y, y_diff, y_hess], allow_input_downcast=True) + if M_val.dtype == bool: + M_val = M_val.astype('float32') - def F(x=None, z=None): - if x is None: - return 1, cvxopt.matrix(1.0, (n,1)) - elif z is None: - y, y_diff = f_x(x) - return cvxopt.matrix(float(y), (1, 1)),\ - cvxopt.matrix(y_diff.astype("float64")).T - else: - y, y_diff, y_hess = f_xz(x, z) - return cvxopt.matrix(float(y), (1, 1)), \ - cvxopt.matrix(y_diff.astype("float64")).T, \ - cvxopt.matrix(y_hess.astype("float64")) - - G = cvxopt.spdiag([1 for i in xrange(n)]) - h = cvxopt.matrix(0.0, (n,1)) - - return cvxopt.solvers.cpl(c,F, G, h)['x'] - - -def l1obj_l2regl(M_val, w_val, lbda): - """ - Solves: - min - sum_j theta_j + lbda * |e^{M*theta} - (1 - w)|_2 - s.t theta_j <= 0 - """ - #TODO!!!!!!! m, n = M_val.shape c = cvxopt.matrix(-1.0, (n,1)) @@ -70,7 +35,7 @@ def l1obj_l2regl(M_val, w_val, lbda): def F(x=None, z=None): if x is None: - return 1, cvxopt.matrix(1.0, (n,1)) + return 1, cvxopt.matrix(.0001, (n,1)) elif z is None: y, y_diff = f_x(x) return cvxopt.matrix(float(y), (1, 1)),\ @@ -84,16 +49,28 @@ def l1obj_l2regl(M_val, w_val, lbda): G = cvxopt.spdiag([1 for i in xrange(n)]) h = cvxopt.matrix(0.0, (n,1)) - return cvxopt.solvers.cpl(c,F, G, h)['x'] + cvxopt.solvers.options['show_progress'] = False + try: + theta = cvxopt.solvers.cpl(c,F, G, h)['x'] + except ArithmeticError: + print "ArithmeticError thrown, change initial point"+\ + " given to the solver" + + return 1 - np.exp(theta), theta def test(): """ unit test """ - M_val = np.random.rand(100, 20) - w_val = np.random.rand(100) - print l1obj_l2regl(M_val, w_val, 1) + G = cascade_creation.InfluenceGraph(max_proba=.5) + G.erdos_init(n=100, p = .8) + A = cascade_creation.generate_cascades(G, .1, 20) + M_val, w_val = cascade_creation.icc_matrixvector_for_node(A, 0) + assert len(M_val) == len(w_val) + print np.linalg.matrix_rank(M_val) + p_vec, theta = l1obj_l2constraint(M_val, w_val) + print len(p_vec) if __name__=="__main__": test() |
