diff options
| -rw-r--r-- | jpa_test/convex_optimization.py | 16 |
1 files changed, 10 insertions, 6 deletions
diff --git a/jpa_test/convex_optimization.py b/jpa_test/convex_optimization.py index ebd68e3..fdd321e 100644 --- a/jpa_test/convex_optimization.py +++ b/jpa_test/convex_optimization.py @@ -8,8 +8,8 @@ import cvxopt def l1regls(M_val, w_val): """ Solves: - min - sum theta - s.t theta <= 0 + min - sum_j theta_j + s.t theta_j <= 0 |e^{M*theta} - (1 - w)|_2 <= 1 """ m, n = M_val.shape @@ -25,23 +25,27 @@ def l1regls(M_val, w_val): y_diff = tensor.grad(y, theta_) y_hess = z[0] * theano.gradient.hessian(y, theta_) f_x = theano.function([theta], [y, y_diff], allow_input_downcast=True) - f_xz = theano.function([theta, z], [y, y_diff, y_hess]) + f_xz = theano.function([theta, z], [y, y_diff, y_hess], allow_input_downcast=True) def F(x=None, z=None): if x is None: return 1, cvxopt.matrix(1.0, (n,1)) elif z is None: y, y_diff = f_x(x) - return cvxopt.matrix(float(y), (1, 1)), cvxopt.matrix(y_diff.astype("float64")).T + return cvxopt.matrix(float(y), (1, 1)),\ + cvxopt.matrix(y_diff.astype("float64")).T else: y, y_diff, y_hess = f_xz(x, z) return cvxopt.matrix(float(y), (1, 1)), \ cvxopt.matrix(y_diff.astype("float64")).T, \ cvxopt.matrix(y_hess.astype("float64")) - return cvxopt.solvers.cpl(c,F)['x'] + G = cvxopt.spdiag([1 for i in xrange(n)]) + h = cvxopt.matrix(0.0, (n,1)) + + return cvxopt.solvers.cpl(c,F, G, h)['x'] if __name__=="__main__": M_val = np.random.rand(100, 20) w_val = np.random.rand(100) - l1regls(M_val, w_val) + print l1regls(M_val, w_val) |
