diff options
| -rw-r--r-- | src/convex_optimization.py | 39 | ||||
| -rw-r--r-- | src/make_plots.py | 8 |
2 files changed, 23 insertions, 24 deletions
diff --git a/src/convex_optimization.py b/src/convex_optimization.py index 7432e91..f9be47d 100644 --- a/src/convex_optimization.py +++ b/src/convex_optimization.py @@ -6,12 +6,18 @@ import timeout import cvxopt -@timeout.timeout(20) +@timeout.timeout(10) def sparse_recovery(M_val, w_val, lbda): """ Solves: - min lbda * |theta|_1 - sum b^i log(exp(-<M[i], theta_>) -1) - M*theta + min lbda * |theta|_1 - 1/n_cascades*( + sum w log(e^[M*theta]) + (1-w) log(1-e^[M*theta]) + ) s.t theta_j <= 0 + + The objective must be re-normalized: + -> log(e^x - 1) = (1-k)x + log(e^[kx] - 1) + with k chosen as : n_nodes*n_cascades """ assert len(M_val) == len(w_val) @@ -30,19 +36,11 @@ def sparse_recovery(M_val, w_val, lbda): w = theano.shared(w_val.astype(theano.config.floatX)) lbda = theano.shared(lbda.astype(theano.config.floatX)) - # y_tmp = lbda * (theta_).norm(1) - tensor.dot(1 -w, tensor.log(1-tensor.exp( - # M.dot(theta_)))) - tensor.dot(w, tensor.dot(M, theta_)) - # y_diff = tensor.grad(y_tmp, theta_) - # y_hess = theano.gradient.hessian(y_tmp, theta_) - # f = function([theta_], y_hess) - #print(f(-1*np.random.rand(M_val.shape[1]).astype("float32"))) - - #Note that the objective must be re-normalized: - #log(e^x - 1) = (1-k)x + log(e^[kx] - 1) - y = lbda * (theta_).norm(1) \ - - 1./n * tensor.dot(1 -w, tensor.log(1-tensor.exp(M.dot(theta_ *1./m)))) \ - - 1./n * (1 - 1./m) * tensor.dot(1 - w, tensor.dot(M, theta_)) \ - - 1./n * tensor.dot(w, tensor.dot(M, theta_)) + #Objective + y = lbda * (theta_).norm(1) - 1./m*( + tensor.dot(1-w, tensor.log(1-tensor.exp(M.dot(theta_ *1./(n*m)))))\ + + (1 - 1./(n*m)) * tensor.dot(1 - w, tensor.dot(M, theta_)) \ + + tensor.dot(w, tensor.dot(M, theta_))) return diff_and_opt(theta, theta_, M, M_val, w, lbda, y) @@ -89,7 +87,7 @@ def diff_and_opt(theta, theta_, M, M_val, w, lbda, y): def F(x=None, z=None): if x is None: - return 0, cvxopt.matrix(-.0001, (n,1)) + return 0, cvxopt.matrix(-.001, (n,1)) elif z is None: y, y_diff = f_x(x) return cvxopt.matrix(float(y), (1, 1)),\ @@ -104,8 +102,8 @@ def diff_and_opt(theta, theta_, M, M_val, w, lbda, y): h = cvxopt.matrix(0.0, (n,1)) #Relaxing precision constraints - cvxopt.solvers.options['feastol'] = 1e-5 - cvxopt.solvers.options['abstol'] = 1e-5 + cvxopt.solvers.options['feastol'] = 2e-5 + cvxopt.solvers.options['abstol'] = 2e-5 #cvxopt.solvers.options['maxiters'] = 100 cvxopt.solvers.options['show_progress'] = True try: @@ -113,6 +111,7 @@ def diff_and_opt(theta, theta_, M, M_val, w, lbda, y): except ArithmeticError: print("ArithmeticError thrown, change initial point"+\ " given to the solver") + except return 1 - np.exp(theta), theta @@ -122,9 +121,9 @@ def test(): unit test """ lbda = 1 - G = cascade_creation.InfluenceGraph(max_proba=.5) + G = cascade_creation.InfluenceGraph(max_proba=.8) G.erdos_init(n=100, p = .1) - A = cascade_creation.generate_cascades(G, .1, 500) + A = cascade_creation.generate_cascades(G, .1, 100) M_val, w_val = cascade_creation.icc_matrixvector_for_node(A, 0) #Type lasso diff --git a/src/make_plots.py b/src/make_plots.py index 18d0874..57c5caa 100644 --- a/src/make_plots.py +++ b/src/make_plots.py @@ -31,12 +31,12 @@ def test(): """ unit test """ - G = cascade_creation.InfluenceGraph(max_proba=.3) - G.erdos_init(n=10, p=.2) - A = cascade_creation.generate_cascades(G, p_init=.2, n_cascades=100) + G = cascade_creation.InfluenceGraph(max_proba=.8) + G.erdos_init(n=100, p=.1) + A = cascade_creation.generate_cascades(G, p_init=.02, n_cascades=1000) G_hat = algorithms.recovery_l1obj_l2constraint(G, A, passed_function=convex_optimization.sparse_recovery, - floor_cstt=.1, lbda=10) + floor_cstt=.1, lbda=1) algorithms.correctness_measure(G, G_hat, print_values=True) if __name__=="__main__": |
