aboutsummaryrefslogtreecommitdiffstats
path: root/src/convex_optimization.py
diff options
context:
space:
mode:
authorjeanpouget-abadie <jean.pougetabadie@gmail.com>2015-02-02 12:56:43 -0500
committerjeanpouget-abadie <jean.pougetabadie@gmail.com>2015-02-02 12:56:43 -0500
commit0480b2f097b60240d29a0de58efac649a8de959f (patch)
treefb3fb995f32824f55d1c8e33358a3839677de688 /src/convex_optimization.py
parentba03705f9840f45386837cf20c72d3222b90e83a (diff)
downloadcascades-0480b2f097b60240d29a0de58efac649a8de959f.tar.gz
better normalization cvxopt
Diffstat (limited to 'src/convex_optimization.py')
-rw-r--r--src/convex_optimization.py39
1 files changed, 19 insertions, 20 deletions
diff --git a/src/convex_optimization.py b/src/convex_optimization.py
index 7432e91..f9be47d 100644
--- a/src/convex_optimization.py
+++ b/src/convex_optimization.py
@@ -6,12 +6,18 @@ import timeout
import cvxopt
-@timeout.timeout(20)
+@timeout.timeout(10)
def sparse_recovery(M_val, w_val, lbda):
"""
Solves:
- min lbda * |theta|_1 - sum b^i log(exp(-<M[i], theta_>) -1) - M*theta
+ min lbda * |theta|_1 - 1/n_cascades*(
+ sum w log(e^[M*theta]) + (1-w) log(1-e^[M*theta])
+ )
s.t theta_j <= 0
+
+ The objective must be re-normalized:
+ -> log(e^x - 1) = (1-k)x + log(e^[kx] - 1)
+ with k chosen as : n_nodes*n_cascades
"""
assert len(M_val) == len(w_val)
@@ -30,19 +36,11 @@ def sparse_recovery(M_val, w_val, lbda):
w = theano.shared(w_val.astype(theano.config.floatX))
lbda = theano.shared(lbda.astype(theano.config.floatX))
- # y_tmp = lbda * (theta_).norm(1) - tensor.dot(1 -w, tensor.log(1-tensor.exp(
- # M.dot(theta_)))) - tensor.dot(w, tensor.dot(M, theta_))
- # y_diff = tensor.grad(y_tmp, theta_)
- # y_hess = theano.gradient.hessian(y_tmp, theta_)
- # f = function([theta_], y_hess)
- #print(f(-1*np.random.rand(M_val.shape[1]).astype("float32")))
-
- #Note that the objective must be re-normalized:
- #log(e^x - 1) = (1-k)x + log(e^[kx] - 1)
- y = lbda * (theta_).norm(1) \
- - 1./n * tensor.dot(1 -w, tensor.log(1-tensor.exp(M.dot(theta_ *1./m)))) \
- - 1./n * (1 - 1./m) * tensor.dot(1 - w, tensor.dot(M, theta_)) \
- - 1./n * tensor.dot(w, tensor.dot(M, theta_))
+ #Objective
+ y = lbda * (theta_).norm(1) - 1./m*(
+ tensor.dot(1-w, tensor.log(1-tensor.exp(M.dot(theta_ *1./(n*m)))))\
+ + (1 - 1./(n*m)) * tensor.dot(1 - w, tensor.dot(M, theta_)) \
+ + tensor.dot(w, tensor.dot(M, theta_)))
return diff_and_opt(theta, theta_, M, M_val, w, lbda, y)
@@ -89,7 +87,7 @@ def diff_and_opt(theta, theta_, M, M_val, w, lbda, y):
def F(x=None, z=None):
if x is None:
- return 0, cvxopt.matrix(-.0001, (n,1))
+ return 0, cvxopt.matrix(-.001, (n,1))
elif z is None:
y, y_diff = f_x(x)
return cvxopt.matrix(float(y), (1, 1)),\
@@ -104,8 +102,8 @@ def diff_and_opt(theta, theta_, M, M_val, w, lbda, y):
h = cvxopt.matrix(0.0, (n,1))
#Relaxing precision constraints
- cvxopt.solvers.options['feastol'] = 1e-5
- cvxopt.solvers.options['abstol'] = 1e-5
+ cvxopt.solvers.options['feastol'] = 2e-5
+ cvxopt.solvers.options['abstol'] = 2e-5
#cvxopt.solvers.options['maxiters'] = 100
cvxopt.solvers.options['show_progress'] = True
try:
@@ -113,6 +111,7 @@ def diff_and_opt(theta, theta_, M, M_val, w, lbda, y):
except ArithmeticError:
print("ArithmeticError thrown, change initial point"+\
" given to the solver")
+ except
return 1 - np.exp(theta), theta
@@ -122,9 +121,9 @@ def test():
unit test
"""
lbda = 1
- G = cascade_creation.InfluenceGraph(max_proba=.5)
+ G = cascade_creation.InfluenceGraph(max_proba=.8)
G.erdos_init(n=100, p = .1)
- A = cascade_creation.generate_cascades(G, .1, 500)
+ A = cascade_creation.generate_cascades(G, .1, 100)
M_val, w_val = cascade_creation.icc_matrixvector_for_node(A, 0)
#Type lasso