aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjeanpouget-abadie <jean.pougetabadie@gmail.com>2015-02-03 10:19:14 -0500
committerjeanpouget-abadie <jean.pougetabadie@gmail.com>2015-02-03 10:19:14 -0500
commitc650550dab33eac8e0eb86d9843a942ae00cc9c9 (patch)
treeb790a1512fec77887a7f334e607409ad55c0cd27
parent12e52fe9e9e1ac286c1b64342a54d7eccd6d9332 (diff)
downloadcascades-c650550dab33eac8e0eb86d9843a942ae00cc9c9.tar.gz
small changes to opt
-rw-r--r--src/convex_optimization.py37
-rw-r--r--src/make_plots.py6
2 files changed, 25 insertions, 18 deletions
diff --git a/src/convex_optimization.py b/src/convex_optimization.py
index 0fda456..f283d6b 100644
--- a/src/convex_optimization.py
+++ b/src/convex_optimization.py
@@ -7,7 +7,7 @@ import cvxopt
@timeout.timeout(20)
-def sparse_recovery(M_val, w_val, lbda):
+def sparse_recovery(lbda, n_cascades):
"""
Solves:
min lbda * |theta|_1 - 1/n_cascades*(
@@ -19,29 +19,33 @@ def sparse_recovery(M_val, w_val, lbda):
-> log(e^x - 1) = (1-k)x + log(e^[kx] - 1)
with k chosen as : n_nodes*n_cascades
"""
- assert len(M_val) == len(w_val)
-
- if M_val.dtype == bool:
- M_val = M_val.astype('float32')
-
if type(lbda) == int or type(lbda) == float:
lbda = np.array(lbda)
- m, n = M_val.shape
+ lbda = theano.shared(lbda.astype(theano.config.floatX))
theta = tensor.row().T
theta_ = theta.flatten()
- M = theano.shared(M_val.astype(theano.config.floatX))
- w = theano.shared(w_val.astype(theano.config.floatX))
- lbda = theano.shared(lbda.astype(theano.config.floatX))
+ M = tensor.matrix(name='M', dtype=theano.config.floatX)
+ w = tensor.vector(name='w', dtype=theano.config.floatX)
- y = lbda * theta_.norm(1) - 1./m*(
+ y = lbda * theta_.norm(1) - 1./n_cascades*(
(w).dot(tensor.log(1-tensor.exp(M.dot(theta_))))
+ (1-w).dot(tensor.dot(M, theta_))
)
- return diff_and_opt(theta, theta_, M, M_val, w, lbda, y)
+ z = tensor.row().T
+ z_ = z.flatten()
+
+ y_diff = tensor.grad(y, theta_)
+ y_hess = z[0] * theano.gradient.hessian(y, theta_)
+ f_x = theano.function([theta, M, w], [y, y_diff],
+ allow_input_downcast=True)
+ f_xz = theano.function([theta, z, M, w], [y, y_diff, y_hess],
+ allow_input_downcast=True)
+
+ return f_x, f_xz
def type_lasso(lbda):
@@ -79,6 +83,9 @@ def type_lasso(lbda):
@timeout.timeout(8)
def diff_and_opt(M_val, w_val, f_x, f_xz):
+ if M_val.dtype == bool:
+ M_val = M_val.astype(theano.config.floatX)
+
m, n = M_val.shape
def F(x=None, z=None):
@@ -125,14 +132,14 @@ def test():
M_val, w_val = cascade_creation.icc_matrixvector_for_node(A, 2)
#Type lasso
- if 1:
+ if 0:
f_x, f_xz = type_lasso(lbda)
p_vec, _ = diff_and_opt(M_val, w_val, f_x, f_xz)
print(G.mat[2])
#Sparse recovery
- if 0:
- p_vec, theta = sparse_recovery(M_val, w_val, lbda)
+ if 1:
+ p_vec, theta = sparse_recovery(lbda, 1000)
print(p_vec)
print(G.mat)
diff --git a/src/make_plots.py b/src/make_plots.py
index 4a952d1..bfe78f8 100644
--- a/src/make_plots.py
+++ b/src/make_plots.py
@@ -35,10 +35,10 @@ def test():
G.erdos_init(n=50, p=.2)
A = cascade_creation.generate_cascades(G, p_init=.1, n_cascades=1000)
G_hat = algorithms.recovery_passed_function(G, A,
- passed_function=convex_optimization.type_lasso,
- floor_cstt=.1, lbda=.001)
+ passed_function=convex_optimization.sparse_recovery,
+ floor_cstt=.1, lbda=.001, n_cascades=1000)
algorithms.correctness_measure(G, G_hat, print_values=True)
- print(G.mat)
+
if __name__=="__main__":
test()