aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/algorithms.py6
-rw-r--r--src/convex_optimization.py21
-rw-r--r--src/make_plots.py3
3 files changed, 13 insertions, 17 deletions
diff --git a/src/algorithms.py b/src/algorithms.py
index 29de661..926d9bc 100644
--- a/src/algorithms.py
+++ b/src/algorithms.py
@@ -38,11 +38,7 @@ def recovery_l1obj_l2constraint(G, cascades, floor_cstt, passed_function,
passed_function should have similar structure to ones in convex_optimation
"""
G_hat = cascade_creation.InfluenceGraph(max_proba=None)
- G_hat.add_nodes_from(G.nodes())
-
- ####
- print(G.nodes())
- ####
+ G_hat.add_nodes_from(G.nodes())
for node in G_hat.nodes():
print(node)
diff --git a/src/convex_optimization.py b/src/convex_optimization.py
index 6a43705..1f34508 100644
--- a/src/convex_optimization.py
+++ b/src/convex_optimization.py
@@ -6,7 +6,7 @@ import timeout
import cvxopt
-@timeout.timeout(10)
+@timeout.timeout(20)
def sparse_recovery(M_val, w_val, lbda):
"""
Solves:
@@ -24,7 +24,7 @@ def sparse_recovery(M_val, w_val, lbda):
if M_val.dtype == bool:
M_val = M_val.astype('float32')
- if type(lbda) == int:
+ if type(lbda) == int or type(lbda) == float:
lbda = np.array(lbda)
m, n = M_val.shape
@@ -37,8 +37,8 @@ def sparse_recovery(M_val, w_val, lbda):
lbda = theano.shared(lbda.astype(theano.config.floatX))
y = lbda * theta_.norm(1) - 1./m*(
- tensor.dot(1-w, tensor.log(1-tensor.exp(M.dot(theta_))))
- + tensor.dot(w, tensor.dot(M, theta_))
+ (w).dot(tensor.log(1-tensor.exp(M.dot(theta_))))
+ + (1-w).dot(tensor.dot(M, theta_))
)
return diff_and_opt(theta, theta_, M, M_val, w, lbda, y)
@@ -56,7 +56,7 @@ def type_lasso(M_val, w_val, lbda):
if M_val.dtype == bool:
M_val = M_val.astype('float32')
- if type(lbda) == int:
+ if type(lbda) == int or type(lbda) == float:
lbda = np.array(lbda)
theta = tensor.row().T
@@ -104,11 +104,11 @@ def diff_and_opt(theta, theta_, M, M_val, w, lbda, y):
#cvxopt.solvers.options['feastol'] = 2e-5
#cvxopt.solvers.options['abstol'] = 2e-5
#cvxopt.solvers.options['maxiters'] = 100
- cvxopt.solvers.options['show_progress'] = True
+ cvxopt.solvers.options['show_progress'] = False
try:
theta = cvxopt.solvers.cp(F, G, h)['x']
except ArithmeticError:
- print("ArithmeticError thrown, change initial point"+\
+ print("ArithmeticError thrown, change initial point"
" given to the solver")
if cvxopt.solvers.options['show_progress']:
@@ -121,9 +121,9 @@ def test():
"""
unit test
"""
- lbda = 1
+ lbda = .001
G = cascade_creation.InfluenceGraph(max_proba=.9)
- G.erdos_init(n=10, p = .3)
+ G.erdos_init(n=100, p = .3)
A = cascade_creation.generate_cascades(G, .1, 1000)
M_val, w_val = cascade_creation.icc_matrixvector_for_node(A, 0)
@@ -131,12 +131,13 @@ def test():
if 0:
p_vec, theta = type_lasso(M_val, w_val, lbda)
print(p_vec)
+ print(G.mat)
#Sparse recovery
if 1:
p_vec, theta = sparse_recovery(M_val, w_val, lbda)
- print(G.mat[0])
print(p_vec)
+ print(G.mat)
if __name__=="__main__":
test()
diff --git a/src/make_plots.py b/src/make_plots.py
index c69673c..3ac8362 100644
--- a/src/make_plots.py
+++ b/src/make_plots.py
@@ -33,8 +33,7 @@ def test():
"""
G = cascade_creation.InfluenceGraph(max_proba=.8)
G.erdos_init(n=20, p=.2)
- print(G.mat)
- A = cascade_creation.generate_cascades(G, p_init=.1, n_cascades=100)
+ A = cascade_creation.generate_cascades(G, p_init=.1, n_cascades=1000)
G_hat = algorithms.recovery_l1obj_l2constraint(G, A,
passed_function=convex_optimization.sparse_recovery,
floor_cstt=.1, lbda=1)