aboutsummaryrefslogtreecommitdiffstats
path: root/src/convex_optimization.py
diff options
context:
space:
mode:
authorjeanpouget-abadie <jean.pougetabadie@gmail.com>2015-05-18 19:19:35 +0200
committerjeanpouget-abadie <jean.pougetabadie@gmail.com>2015-05-18 19:19:35 +0200
commite41204334108b4ad40246f3d81435a40ff484837 (patch)
tree5a391fd927fe64b41a145bbca6089d05b96b7be9 /src/convex_optimization.py
parent3d818b2861faf0ccdbcb2eeb6ac90a07f4e4374a (diff)
downloadcascades-e41204334108b4ad40246f3d81435a40ff484837.tar.gz
added plots to appendix
Diffstat (limited to 'src/convex_optimization.py')
-rw-r--r--src/convex_optimization.py73
1 files changed, 67 insertions, 6 deletions
diff --git a/src/convex_optimization.py b/src/convex_optimization.py
index 000143f..fbfeb29 100644
--- a/src/convex_optimization.py
+++ b/src/convex_optimization.py
@@ -4,7 +4,7 @@ from theano import tensor, function
import numpy as np
import timeout
import cvxopt
-
+import scipy
@timeout.timeout(20)
def sparse_recovery(lbda, n_cascades):
@@ -124,28 +124,89 @@ def diff_and_opt(M_val, w_val, f_x, f_xz):
return 1 - np.exp(theta), theta
+def restrictedEigenvalue(M_val, w_val, theta_val, gramMatrix=False):
+ """
+ returns the smallest restricted eigenvalue of the matrix on the set S
+ gramMatrix (bool) : If true, use the gram matrix instead of the Hessian
+ theta_val : true vector of parameters
+ """
+ m, n = M_val.shape
+
+ if gramMatrix:
+ H = 1./m * np.dot(M_val.T,M_val)
+ else:
+ #compute the Hessian
+ #theta = tensor.row().T
+ #theta_ = theta.flatten()
+
+ #M = tensor.matrix(name='M', dtype=theano.config.floatX)
+ #w = tensor.vector(name='w', dtype=theano.config.floatX)
+
+ #y = - 1./n*( (w).dot(tensor.log(1-tensor.exp(M.dot(theta_)))) +
+ #(1-w).dot(tensor.dot(M, theta_)))
+
+ #f = theano.function([theta, M, w], [theano.gradient.hessian(y,
+ #theta_)], allow_input_downcast=True)
+
+ #print(theta_val)
+ #H = f(np.atleast_2d(theta_val).T, M_val.astype('float64'), w_val)
+
+ #print(H)
+
+ #evals_small, evecs_small = scipy.sparse.linalg.eigsh(H.astype('float64'), 1,
+ #which="LM")
+ #print(evals_small)
+
+ theta = tensor.vector()
+
+ M = tensor.matrix(name='M', dtype=theano.config.floatX)
+ w = tensor.vector(name='w', dtype=theano.config.floatX)
+
+ y = - 1./n*((w).dot(tensor.log(1-tensor.exp(M.dot(theta)))) +
+ (1-w).dot(tensor.dot(M, theta)))
+
+ f = theano.function([theta, M, w], [theano.gradient.hessian(y,
+ theta)], allow_input_downcast=True)
+
+ H = f(theta_val, M_val.astype('float32'), w_val)
+
+ print(H)
+
+ evals_small, evecs_small = scipy.sparse.linalg.eigsh(H, 1,
+ which="LM")
+ print(evals_small)
+
+
def test():
"""
unit test
"""
lbda = .0001
- G = cascade_creation.InfluenceGraph(max_proba=.9)
+ G = cascade_creation.InfluenceGraph(max_proba=.9, min_proba=.2)
G.erdos_init(n=20, p = .3)
A = cascade_creation.generate_cascades(G, .1, 500)
- M_val, w_val = cascade_creation.icc_matrixvector_for_node(A, 2)
- print(len(M_val))
+ M_val, w_val = cascade_creation.icc_matrixvector_for_node(A, 0)
+ print(M_val.shape)
+ print(w_val.shape)
#Type lasso
if 0:
- f_x, f_xz = type_lasso(lbda)
+ f_x, f_xz = type_lasso(lbda, 500)
p_vec, _ = diff_and_opt(M_val, w_val, f_x, f_xz)
print(G.mat[2])
#Sparse recovery
- if 1:
+ if 0:
f_x, f_xz = sparse_recovery(lbda, 500)
p_vec, _ = diff_and_opt(M_val, w_val, f_x, f_xz)
print(G.mat[2])
+ #Restricted Eigenvalue
+ if 2:
+ node = 1
+ parents = np.nonzero(G.mat[:, node])[0]
+ theta_val = G.logmat[:,node]
+ restrictedEigenvalue(M_val, w_val, theta_val)
+
if __name__=="__main__":
test()