aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorjeanpouget-abadie <jean.pougetabadie@gmail.com>2015-02-02 22:45:07 -0500
committerjeanpouget-abadie <jean.pougetabadie@gmail.com>2015-02-02 22:45:07 -0500
commitda8c267a9d10deb593e98d06c26ad09afd2af3b1 (patch)
treefe09aa8099841c209597c913b20a97eed136e3c0 /src
parent683dbbdff11b7aa3b99641c82c3413c9c1bd1d9b (diff)
downloadcascades-da8c267a9d10deb593e98d06c26ad09afd2af3b1.tar.gz
model section
Diffstat (limited to 'src')
-rw-r--r--src/algorithms.py16
-rw-r--r--src/convex_optimization.py48
-rw-r--r--src/make_plots.py10
3 files changed, 37 insertions, 37 deletions
diff --git a/src/algorithms.py b/src/algorithms.py
index 926d9bc..9ebccf1 100644
--- a/src/algorithms.py
+++ b/src/algorithms.py
@@ -31,7 +31,7 @@ def greedy_prediction(G, cascades):
return G_hat
-def recovery_l1obj_l2constraint(G, cascades, floor_cstt, passed_function,
+def recovery_passed_function(G, cascades, floor_cstt, passed_function,
*args, **kwargs):
"""
Returns estimated graph from convex program specified by passed_function
@@ -40,11 +40,13 @@ def recovery_l1obj_l2constraint(G, cascades, floor_cstt, passed_function,
G_hat = cascade_creation.InfluenceGraph(max_proba=None)
G_hat.add_nodes_from(G.nodes())
+ f_x, f_xz = passed_function(*args, **kwargs)
+
for node in G_hat.nodes():
print(node)
try:
M, w = cascade_creation.icc_matrixvector_for_node(cascades, node)
- p_node, __ = passed_function(M,w, *args, **kwargs)
+ p_node, _ = convex_optimization.diff_and_opt(M, w, f_x, f_xz)
G_hat = cascade_creation.add_edges_from_proba_vector(G=G_hat,
p_node=p_node, node=node, floor_cstt=floor_cstt)
except timeout.TimeoutError:
@@ -86,15 +88,15 @@ def test():
unit test
"""
G = cascade_creation.InfluenceGraph(max_proba = .8)
- G.erdos_init(n = 10, p = .2)
+ G.erdos_init(n = 100, p = .2)
import time
t0 = time.time()
A = cascade_creation.generate_cascades(G, .2, 1000)
- if 1:
- G_hat = greedy_prediction(G, A)
if 0:
- G_hat = recovery_l1obj_l2constraint(G, A,
- passed_function=convex_optimization.l1obj_l2penalization,
+ G_hat = greedy_prediction(G, A)
+ if 1:
+ G_hat = recovery_passed_function(G, A,
+ passed_function=convex_optimization.type_lasso,
floor_cstt=.1, lbda=10)
correctness_measure(G, G_hat, print_values=True)
diff --git a/src/convex_optimization.py b/src/convex_optimization.py
index 1f34508..163b6d5 100644
--- a/src/convex_optimization.py
+++ b/src/convex_optimization.py
@@ -44,55 +44,52 @@ def sparse_recovery(M_val, w_val, lbda):
return diff_and_opt(theta, theta_, M, M_val, w, lbda, y)
-@timeout.timeout(20)
-def type_lasso(M_val, w_val, lbda):
+def type_lasso(lbda):
"""
Solves:
min - sum_j theta_j + lbda*|e^{M*theta} - (1 - w)|_2
s.t theta_j <= 0
"""
- assert len(M_val) == len(w_val)
-
- if M_val.dtype == bool:
- M_val = M_val.astype('float32')
-
if type(lbda) == int or type(lbda) == float:
lbda = np.array(lbda)
+ lbda = theano.shared(lbda.astype(theano.config.floatX))
+
theta = tensor.row().T
theta_ = theta.flatten()
- M = theano.shared(M_val.astype(theano.config.floatX))
- w = theano.shared(w_val.astype(theano.config.floatX))
- lbda = theano.shared(lbda.astype(theano.config.floatX))
+ M = tensor.matrix(name='M', dtype=theano.config.floatX)
+ w = tensor.vector(name='w', dtype=theano.config.floatX)
y = lbda * (theta_).norm(1) + (
tensor.exp(M.dot(theta_)) - (1 - w)).norm(2)
- return diff_and_opt(theta, theta_, M, M_val, w, lbda, y)
-
-
-def diff_and_opt(theta, theta_, M, M_val, w, lbda, y):
z = tensor.row().T
z_ = z.flatten()
- m, n = M_val.shape
-
y_diff = tensor.grad(y, theta_)
y_hess = z[0] * theano.gradient.hessian(y, theta_)
- f_x = theano.function([theta], [y, y_diff], allow_input_downcast=True)
- f_xz = theano.function([theta, z], [y, y_diff, y_hess],
- allow_input_downcast=True)
+ f_x = theano.function([theta, M, w], [y, y_diff],
+ allow_input_downcast=True)
+ f_xz = theano.function([theta, z, M, w], [y, y_diff, y_hess],
+ allow_input_downcast=True)
+
+ return f_x, f_xz
+
+@timeout.timeout(8)
+def diff_and_opt(M_val, w_val, f_x, f_xz):
+
+ m, n = M_val.shape
def F(x=None, z=None):
if x is None:
return 0, cvxopt.matrix(-.001, (n,1))
elif z is None:
- y, y_diff = f_x(x)
+ y, y_diff = f_x(x, M_val, w_val)
return cvxopt.matrix(float(y), (1, 1)),\
cvxopt.matrix(y_diff.astype("float64")).T
else:
- y, y_diff, y_hess = f_xz(x, z)
+ y, y_diff, y_hess = f_xz(x, z, M_val, w_val)
return cvxopt.matrix(float(y), (1, 1)), \
cvxopt.matrix(y_diff.astype("float64")).T, \
cvxopt.matrix(y_hess.astype("float64"))
@@ -104,7 +101,7 @@ def diff_and_opt(theta, theta_, M, M_val, w, lbda, y):
#cvxopt.solvers.options['feastol'] = 2e-5
#cvxopt.solvers.options['abstol'] = 2e-5
#cvxopt.solvers.options['maxiters'] = 100
- cvxopt.solvers.options['show_progress'] = False
+ cvxopt.solvers.options['show_progress'] = True
try:
theta = cvxopt.solvers.cp(F, G, h)['x']
except ArithmeticError:
@@ -128,13 +125,14 @@ def test():
M_val, w_val = cascade_creation.icc_matrixvector_for_node(A, 0)
#Type lasso
- if 0:
- p_vec, theta = type_lasso(M_val, w_val, lbda)
+ if 1:
+ f_x, f_xz = type_lasso(lbda)
+ p_vec, _ = diff_and_opt(M_val, w_val, f_x, f_xz)
print(p_vec)
print(G.mat)
#Sparse recovery
- if 1:
+ if 0:
p_vec, theta = sparse_recovery(M_val, w_val, lbda)
print(p_vec)
print(G.mat)
diff --git a/src/make_plots.py b/src/make_plots.py
index 3ac8362..c1cafe7 100644
--- a/src/make_plots.py
+++ b/src/make_plots.py
@@ -23,7 +23,7 @@ def compare_greedy_and_lagrange_cs284r():
#Lagrange Objective
G_hat = algorithms.recovery_l1obj_l2constraint(G, A,
passed_function=convex_optimization.type_lasso,
- floor_cstt=.1, lbda=10)
+ floor_cstt=.05, lbda=10)
algorithms.correctness_measure(G, G_hat, print_values=True)
@@ -32,11 +32,11 @@ def test():
unit test
"""
G = cascade_creation.InfluenceGraph(max_proba=.8)
- G.erdos_init(n=20, p=.2)
+ G.erdos_init(n=30, p=.2)
A = cascade_creation.generate_cascades(G, p_init=.1, n_cascades=1000)
- G_hat = algorithms.recovery_l1obj_l2constraint(G, A,
- passed_function=convex_optimization.sparse_recovery,
- floor_cstt=.1, lbda=1)
+ G_hat = algorithms.recovery_passed_function(G, A,
+ passed_function=convex_optimization.type_lasso,
+ floor_cstt=.001, lbda=.0001)
algorithms.correctness_measure(G, G_hat, print_values=True)
if __name__=="__main__":