import theano import cascade_creation from theano import tensor, function import numpy as np import timeout import cvxopt @timeout.timeout(20) def sparse_recovery(M_val, w_val, lbda): """ Solves: min lbda * |theta|_1 - sum b^i log(exp(-) -1) - M*theta s.t theta_j <= 0 """ assert len(M_val) == len(w_val) if M_val.dtype == bool: M_val = M_val.astype('float32') if type(lbda) == int: lbda = np.array(lbda) m, n = M_val.shape theta = tensor.row().T theta_ = theta.flatten() M = theano.shared(M_val.astype(theano.config.floatX)) w = theano.shared(w_val.astype(theano.config.floatX)) lbda = theano.shared(lbda.astype(theano.config.floatX)) # y_tmp = lbda * (theta_).norm(1) - tensor.dot(1 -w, tensor.log(1-tensor.exp( # M.dot(theta_)))) - tensor.dot(w, tensor.dot(M, theta_)) # y_diff = tensor.grad(y_tmp, theta_) # y_hess = theano.gradient.hessian(y_tmp, theta_) # f = function([theta_], y_hess) #print(f(-1*np.random.rand(M_val.shape[1]).astype("float32"))) #Note that the objective must be re-normalized: #log(e^x - 1) = (1-k)x + log(e^[kx] - 1) y = lbda * (theta_).norm(1) \ - 1./n * tensor.dot(1 -w, tensor.log(1-tensor.exp(M.dot(theta_ *1./m)))) \ - 1./n * (1 - 1./m) * tensor.dot(1 - w, tensor.dot(M, theta_)) \ - 1./n * tensor.dot(w, tensor.dot(M, theta_)) return diff_and_opt(theta, theta_, M, M_val, w, lbda, y) @timeout.timeout(20) def type_lasso(M_val, w_val, lbda): """ Solves: min - sum_j theta_j + lbda*|e^{M*theta} - (1 - w)|_2 s.t theta_j <= 0 """ assert len(M_val) == len(w_val) if M_val.dtype == bool: M_val = M_val.astype('float32') if type(lbda) == int: lbda = np.array(lbda) theta = tensor.row().T theta_ = theta.flatten() M = theano.shared(M_val.astype(theano.config.floatX)) w = theano.shared(w_val.astype(theano.config.floatX)) lbda = theano.shared(lbda.astype(theano.config.floatX)) y = lbda * (theta_).norm(1) + ( tensor.exp(M.dot(theta_)) - (1 - w)).norm(2) return diff_and_opt(theta, theta_, M, M_val, w, lbda, y) def diff_and_opt(theta, theta_, M, M_val, w, lbda, y): z = tensor.row().T z_ = z.flatten() m, n = M_val.shape y_diff = tensor.grad(y, theta_) y_hess = z[0] * theano.gradient.hessian(y, theta_) f_x = theano.function([theta], [y, y_diff], allow_input_downcast=True) f_xz = theano.function([theta, z], [y, y_diff, y_hess], allow_input_downcast=True) def F(x=None, z=None): if x is None: return 0, cvxopt.matrix(-.0001, (n,1)) elif z is None: y, y_diff = f_x(x) return cvxopt.matrix(float(y), (1, 1)),\ cvxopt.matrix(y_diff.astype("float64")).T else: y, y_diff, y_hess = f_xz(x, z) return cvxopt.matrix(float(y), (1, 1)), \ cvxopt.matrix(y_diff.astype("float64")).T, \ cvxopt.matrix(y_hess.astype("float64")) G = cvxopt.spdiag([1 for i in range(n)]) h = cvxopt.matrix(0.0, (n,1)) #Relaxing precision constraints cvxopt.solvers.options['feastol'] = 1e-5 cvxopt.solvers.options['abstol'] = 1e-5 #cvxopt.solvers.options['maxiters'] = 100 cvxopt.solvers.options['show_progress'] = True try: theta = cvxopt.solvers.cp(F, G, h)['x'] except ArithmeticError: print("ArithmeticError thrown, change initial point"+\ " given to the solver") return 1 - np.exp(theta), theta def test(): """ unit test """ lbda = 1 G = cascade_creation.InfluenceGraph(max_proba=.5) G.erdos_init(n=100, p = .1) A = cascade_creation.generate_cascades(G, .1, 500) M_val, w_val = cascade_creation.icc_matrixvector_for_node(A, 0) #Type lasso if 0: p_vec, theta = type_lasso(M_val, w_val, lbda) print(p_vec) #Sparse recovery if 1: p_vec, theta = sparse_recovery(M_val, w_val, lbda) print(p_vec) if __name__=="__main__": test()