diff options
| author | jeanpouget-abadie <jean.pougetabadie@gmail.com> | 2015-02-03 10:19:14 -0500 |
|---|---|---|
| committer | jeanpouget-abadie <jean.pougetabadie@gmail.com> | 2015-02-03 10:19:14 -0500 |
| commit | c650550dab33eac8e0eb86d9843a942ae00cc9c9 (patch) | |
| tree | b790a1512fec77887a7f334e607409ad55c0cd27 /src/convex_optimization.py | |
| parent | 12e52fe9e9e1ac286c1b64342a54d7eccd6d9332 (diff) | |
| download | cascades-c650550dab33eac8e0eb86d9843a942ae00cc9c9.tar.gz | |
small changes to opt
Diffstat (limited to 'src/convex_optimization.py')
| -rw-r--r-- | src/convex_optimization.py | 37 |
1 files changed, 22 insertions, 15 deletions
diff --git a/src/convex_optimization.py b/src/convex_optimization.py index 0fda456..f283d6b 100644 --- a/src/convex_optimization.py +++ b/src/convex_optimization.py @@ -7,7 +7,7 @@ import cvxopt @timeout.timeout(20) -def sparse_recovery(M_val, w_val, lbda): +def sparse_recovery(lbda, n_cascades): """ Solves: min lbda * |theta|_1 - 1/n_cascades*( @@ -19,29 +19,33 @@ def sparse_recovery(M_val, w_val, lbda): -> log(e^x - 1) = (1-k)x + log(e^[kx] - 1) with k chosen as : n_nodes*n_cascades """ - assert len(M_val) == len(w_val) - - if M_val.dtype == bool: - M_val = M_val.astype('float32') - if type(lbda) == int or type(lbda) == float: lbda = np.array(lbda) - m, n = M_val.shape + lbda = theano.shared(lbda.astype(theano.config.floatX)) theta = tensor.row().T theta_ = theta.flatten() - M = theano.shared(M_val.astype(theano.config.floatX)) - w = theano.shared(w_val.astype(theano.config.floatX)) - lbda = theano.shared(lbda.astype(theano.config.floatX)) + M = tensor.matrix(name='M', dtype=theano.config.floatX) + w = tensor.vector(name='w', dtype=theano.config.floatX) - y = lbda * theta_.norm(1) - 1./m*( + y = lbda * theta_.norm(1) - 1./n_cascades*( (w).dot(tensor.log(1-tensor.exp(M.dot(theta_)))) + (1-w).dot(tensor.dot(M, theta_)) ) - return diff_and_opt(theta, theta_, M, M_val, w, lbda, y) + z = tensor.row().T + z_ = z.flatten() + + y_diff = tensor.grad(y, theta_) + y_hess = z[0] * theano.gradient.hessian(y, theta_) + f_x = theano.function([theta, M, w], [y, y_diff], + allow_input_downcast=True) + f_xz = theano.function([theta, z, M, w], [y, y_diff, y_hess], + allow_input_downcast=True) + + return f_x, f_xz def type_lasso(lbda): @@ -79,6 +83,9 @@ def type_lasso(lbda): @timeout.timeout(8) def diff_and_opt(M_val, w_val, f_x, f_xz): + if M_val.dtype == bool: + M_val = M_val.astype(theano.config.floatX) + m, n = M_val.shape def F(x=None, z=None): @@ -125,14 +132,14 @@ def test(): M_val, w_val = cascade_creation.icc_matrixvector_for_node(A, 2) #Type lasso - if 1: + if 0: f_x, f_xz = type_lasso(lbda) p_vec, _ = diff_and_opt(M_val, w_val, f_x, f_xz) print(G.mat[2]) #Sparse recovery - if 0: - p_vec, theta = sparse_recovery(M_val, w_val, lbda) + if 1: + p_vec, theta = sparse_recovery(lbda, 1000) print(p_vec) print(G.mat) |
