aboutsummaryrefslogtreecommitdiffstats
path: root/src/convex_optimization.py
diff options
context:
space:
mode:
authorjeanpouget-abadie <jean.pougetabadie@gmail.com>2015-02-02 12:20:44 -0500
committerjeanpouget-abadie <jean.pougetabadie@gmail.com>2015-02-02 12:20:44 -0500
commitba03705f9840f45386837cf20c72d3222b90e83a (patch)
tree69f9136be0bb8eadb6d78ee4808df88664d11766 /src/convex_optimization.py
parent8fd43ceef109ef8482a0bb28ee4fe771cc1cb9f7 (diff)
downloadcascades-ba03705f9840f45386837cf20c72d3222b90e83a.tar.gz
renormalization of cvxopt program
Diffstat (limited to 'src/convex_optimization.py')
-rw-r--r--src/convex_optimization.py49
1 files changed, 33 insertions, 16 deletions
diff --git a/src/convex_optimization.py b/src/convex_optimization.py
index 1d84db2..7432e91 100644
--- a/src/convex_optimization.py
+++ b/src/convex_optimization.py
@@ -21,6 +21,8 @@ def sparse_recovery(M_val, w_val, lbda):
if type(lbda) == int:
lbda = np.array(lbda)
+ m, n = M_val.shape
+
theta = tensor.row().T
theta_ = theta.flatten()
@@ -28,16 +30,19 @@ def sparse_recovery(M_val, w_val, lbda):
w = theano.shared(w_val.astype(theano.config.floatX))
lbda = theano.shared(lbda.astype(theano.config.floatX))
- y_tmp = lbda * (theta_).norm(1) - tensor.sum(tensor.dot(M, theta_
- )) + w.dot(tensor.log(tensor.exp(-M.dot(theta_))-1))
- y_diff = tensor.grad(y_tmp, theta_)
- y_hess = theano.gradient.hessian(y_tmp, theta_)
- f = function([theta_], y_hess)
+ # y_tmp = lbda * (theta_).norm(1) - tensor.dot(1 -w, tensor.log(1-tensor.exp(
+ # M.dot(theta_)))) - tensor.dot(w, tensor.dot(M, theta_))
+ # y_diff = tensor.grad(y_tmp, theta_)
+ # y_hess = theano.gradient.hessian(y_tmp, theta_)
+ # f = function([theta_], y_hess)
+ #print(f(-1*np.random.rand(M_val.shape[1]).astype("float32")))
- print(f(-1*np.random.rand(M_val.shape[1]).astype("float32")))
-
- y = lbda * (theta_).norm(1) - tensor.sum(tensor.dot(M, theta_
- )) - w.dot(tensor.log(tensor.exp(-M.dot(theta_))-1))
+ #Note that the objective must be re-normalized:
+ #log(e^x - 1) = (1-k)x + log(e^[kx] - 1)
+ y = lbda * (theta_).norm(1) \
+ - 1./n * tensor.dot(1 -w, tensor.log(1-tensor.exp(M.dot(theta_ *1./m)))) \
+ - 1./n * (1 - 1./m) * tensor.dot(1 - w, tensor.dot(M, theta_)) \
+ - 1./n * tensor.dot(w, tensor.dot(M, theta_))
return diff_and_opt(theta, theta_, M, M_val, w, lbda, y)
@@ -64,7 +69,7 @@ def type_lasso(M_val, w_val, lbda):
w = theano.shared(w_val.astype(theano.config.floatX))
lbda = theano.shared(lbda.astype(theano.config.floatX))
- y = (theta_).norm(1) + lbda * (
+ y = lbda * (theta_).norm(1) + (
tensor.exp(M.dot(theta_)) - (1 - w)).norm(2)
return diff_and_opt(theta, theta_, M, M_val, w, lbda, y)
@@ -84,7 +89,7 @@ def diff_and_opt(theta, theta_, M, M_val, w, lbda, y):
def F(x=None, z=None):
if x is None:
- return 0, cvxopt.matrix(.0001, (n,1))
+ return 0, cvxopt.matrix(-.0001, (n,1))
elif z is None:
y, y_diff = f_x(x)
return cvxopt.matrix(float(y), (1, 1)),\
@@ -98,6 +103,10 @@ def diff_and_opt(theta, theta_, M, M_val, w, lbda, y):
G = cvxopt.spdiag([1 for i in range(n)])
h = cvxopt.matrix(0.0, (n,1))
+ #Relaxing precision constraints
+ cvxopt.solvers.options['feastol'] = 1e-5
+ cvxopt.solvers.options['abstol'] = 1e-5
+ #cvxopt.solvers.options['maxiters'] = 100
cvxopt.solvers.options['show_progress'] = True
try:
theta = cvxopt.solvers.cp(F, G, h)['x']
@@ -112,13 +121,21 @@ def test():
"""
unit test
"""
- lbda = 10
- G = cascade_creation.InfluenceGraph(max_proba=.8)
+ lbda = 1
+ G = cascade_creation.InfluenceGraph(max_proba=.5)
G.erdos_init(n=100, p = .1)
- A = cascade_creation.generate_cascades(G, .1, 2000)
+ A = cascade_creation.generate_cascades(G, .1, 500)
M_val, w_val = cascade_creation.icc_matrixvector_for_node(A, 0)
- p_vec, theta = l1obj_l2penalization(M_val, w_val, lbda)
- print(p_vec)
+
+ #Type lasso
+ if 0:
+ p_vec, theta = type_lasso(M_val, w_val, lbda)
+ print(p_vec)
+
+ #Sparse recovery
+ if 1:
+ p_vec, theta = sparse_recovery(M_val, w_val, lbda)
+ print(p_vec)
if __name__=="__main__":
test()