aboutsummaryrefslogtreecommitdiffstats
path: root/jpa_test/algorithms.py
diff options
context:
space:
mode:
Diffstat (limited to 'jpa_test/algorithms.py')
-rw-r--r--jpa_test/algorithms.py14
1 files changed, 2 insertions, 12 deletions
diff --git a/jpa_test/algorithms.py b/jpa_test/algorithms.py
index be9caba..2ed015a 100644
--- a/jpa_test/algorithms.py
+++ b/jpa_test/algorithms.py
@@ -9,8 +9,8 @@ import convex_optimization
def greedy_prediction(G, cascades):
"""
Returns estimated graph from Greedy algorithm in "Learning Epidemic ..."
- TODO: write cleaner code?
"""
+ #TODO: This function is deprecated!
G_hat = cascade_creation.InfluenceGraph(max_proba=None)
G_hat.add_nodes_from(G.nodes())
for node in G_hat.nodes():
@@ -44,9 +44,7 @@ def recovery_l1obj_l2constraint(G, cascades):
G_hat.add_nodes_from(G.nodes())
for node in G_hat.nodes():
M, w = cascade_creation.icc_matrixvector_for_node(cascades, node)
- M = M.astype("float64")
- edges_node = convex_optimization.l1obj_l2constraint(M,w)
- print edges_node
+ p_node, __ = convex_optimization.l1obj_l2constraint(M,w)
def correctness_measure(G, G_hat):
@@ -73,13 +71,5 @@ def test():
recovery_l1obj_l2constraint(G, A)
- G_hat = greedy_prediction(G, A)
- fp, fn, gp = correctness_measure(G, G_hat)
- print "False Positive: {}".format(len(fp))
- print "False Negative: {}".format(len(fn))
- print "Good Positives: {}".format(len(gp))
- t1 = time.time()
- print t1 - t0
-
if __name__=="__main__":
test()