aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--datasets/subset_facebook_SNAPnormalize.txt2
-rw-r--r--notes/reportYaron.tex5
-rw-r--r--src/algorithms.py2
-rw-r--r--src/cascade_creation.py2
-rw-r--r--src/make_plots.py2
5 files changed, 6 insertions, 7 deletions
diff --git a/datasets/subset_facebook_SNAPnormalize.txt b/datasets/subset_facebook_SNAPnormalize.txt
index 049260c..30851e3 100644
--- a/datasets/subset_facebook_SNAPnormalize.txt
+++ b/datasets/subset_facebook_SNAPnormalize.txt
@@ -5035,4 +5035,4 @@
242 107
317 306
331 249
-204 255 \ No newline at end of file
+204 255
diff --git a/notes/reportYaron.tex b/notes/reportYaron.tex
index d5822ed..acdfaea 100644
--- a/notes/reportYaron.tex
+++ b/notes/reportYaron.tex
@@ -19,9 +19,7 @@ Given a set of observed cascades, the \textbf{graph reconstruction problem} cons
\section{Related Work}
-There have been several works tackling the graph reconstruction problem in variants of the independent cascade. We briefly summarize their results and approaches below.
-
-
+In previous work, this problem has been formulated in different ways, including a convex optimization and a maximum likelihood problem. However, there is no known algorithm for graph reconstruction with theoretical guarantees and with a reasonable required sample size.
\section{The Voter Model}
@@ -280,6 +278,7 @@ $\delta_4 = .54$ & $\delta_4 = .37$ & $\delta_4 = .43$ & $\delta_4 = .23$ \\
The results of our findings on a very small social network (a subset of the famous Karate club), show that as the number of cascades increase the RIP constants decrease and that if $p_\text{init}$ is small then the RIP constant decrease as well. Finally the constants we obtain are either under or close to the $.25$ mark set by the authors of \cite{candes}.
+
\subsection{Testing our algorithm}
diff --git a/src/algorithms.py b/src/algorithms.py
index 39bcbb2..0e240c9 100644
--- a/src/algorithms.py
+++ b/src/algorithms.py
@@ -60,7 +60,7 @@ def correctness_measure(G, G_hat, print_values=False):
edges_hat = set(G_hat.edges())
fp = len(edges_hat - edges)
fn = len(edges - edges_hat)
- tp = len(edges & edges_hat)
+ tp = len(edges | edges_hat)
tn = G.number_of_nodes() ** 2 - fp - fn - tp
#Other metrics
diff --git a/src/cascade_creation.py b/src/cascade_creation.py
index 1a71285..9a26c03 100644
--- a/src/cascade_creation.py
+++ b/src/cascade_creation.py
@@ -4,7 +4,7 @@ import collections
from itertools import izip
from sklearn.preprocessing import normalize
-class InfluenceGraph(nx.DiGraph):
+class InfluenceGraph(nx.Graph):
"""
networkX graph with mat and logmat attributes
"""
diff --git a/src/make_plots.py b/src/make_plots.py
index 905c731..7c8bebb 100644
--- a/src/make_plots.py
+++ b/src/make_plots.py
@@ -40,7 +40,7 @@ def compare_greedy_and_lagrange_cs284r():
"""
G = cascade_creation.InfluenceGraph(max_proba = .8)
G.import_from_file("../datasets/subset_facebook_SNAPnormalize.txt")
- A = cascade_creation.generate_cascades(G, p_init=.05, n_cascades=100)
+ A = cascade_creation.generate_cascades(G, p_init=.05, n_cascades=50)
#Greedy
G_hat = algorithms.greedy_prediction(G, A)