summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--hawkes_experiments/cause.py24
-rw-r--r--hawkes_experiments/main.py34
-rw-r--r--hawkes_experiments/refine.py6
3 files changed, 32 insertions, 32 deletions
diff --git a/hawkes_experiments/cause.py b/hawkes_experiments/cause.py
index 800f699..711e34a 100644
--- a/hawkes_experiments/cause.py
+++ b/hawkes_experiments/cause.py
@@ -30,12 +30,13 @@ def cause(lamb, alpha, mu):
fatal_droots += 1
continue
background_rate = lamb * (1 + 0.43 * sin(0.0172 * t1 + 4.36))
- parents = sorted([(n2, t2, alpha / d * mu * exp(-mu * (t1 - t2)), d)
+ parents = sorted([(n2, t2, alpha / d ** 2 * mu * exp(-mu * (t1 - t2)), d)
for (n2, t2, d) in s], reverse=True,
key=lambda x: x[2])
parent_rate = sum(e[2] for e in parents)
- # if sum(e[2] for e in prl[:1]) > br:
- # G.add_edge((n1, t1), tuple(prl[0][:2]))
+ # if parents[0][2] > background_rate:
+ # G.add_edge(tuple(parents[0][:2]), (n1, t1),
+ # weight=parents[0][3])
if background_rate > parent_rate:
roots += 1
if (n1, t1) in fatals:
@@ -56,9 +57,9 @@ def analyze_graph(G):
print "cascades: {0}, min: {1}, max: {2}, mean: {3}, median: {4}".format(
len(csizes), np.min(csizes), np.max(csizes), np.mean(csizes),
np.median(csizes))
- # counts = Counter(l)
- # w = writer(open("components_dist.csv", "w"))
- # w.writerows(counts.most_common())
+ counts = Counter(csizes)
+ w = writer(open("components_dist.csv", "w"))
+ w.writerows(counts.most_common())
edges = list(G.edges_iter(data=True))
print "edges: {0}".format(len(edges))
times = [e[1][1] - e[0][1] for e in edges]
@@ -68,14 +69,15 @@ def analyze_graph(G):
print "distances, min: {0}, max: {1}, mean: {2}, median: {3}".format(
np.min(distances), np.max(distances), np.mean(distances),
np.median(distances))
- # e = writer(open("edges.csv", "w"))
- # e.writerows(edges)
+ e = writer(open("edges.csv", "w"))
+ e.writerows((e[0][0], e[0][1], e[1][0], e[1][1], e[2]["weight"])
+ for e in edges)
if __name__ == "__main__":
- nodes, edges, events, event_edges = load(open("data-dist1.pickle", "rb"))
- lamb, alpha, mu = 1.86602117779e-05, 0.0433473674726, 0.00109325510695
- # lamb, alpha, mu = 1.87717287808e-05, 5.12006113875e+14, 4.20918377797e-20
+ nodes, edges, events, event_edges = load(open("data-all.pickle", "rb"))
+ lamb, alpha, mu = 1.18909761267e-05, 0.00781529533133, 0.00373882477787
+ print "mu: {0}, alpha: {1}, beta: {2}".format(lamb, alpha, mu)
(droots, roots, infections, fatal_droots,
fatal_roots, fatal_infections, G) = cause(lamb, alpha, mu)
r = "events: {0}, droots: {1}, roots: {2}, infections: {3}, "\
diff --git a/hawkes_experiments/main.py b/hawkes_experiments/main.py
index 8f30dcf..9ed4205 100644
--- a/hawkes_experiments/main.py
+++ b/hawkes_experiments/main.py
@@ -43,19 +43,18 @@ def approx(x):
def ll(lamb, alpha, mu):
r1 = sum(log(lamb * (1 + 0.43 * sin(0.0172 * t1 + 4.36))
- + sum(alpha / d * mu * exp(-mu * (t1 - t2))
+ + sum(alpha / d ** 2* mu * exp(-mu * (t1 - t2))
for (n2, t2, d) in s))
for ((n1, t1), s) in event_edges.iteritems())
- r2 = sum(sum(alpha / d * approx(mu * (nodes[n2][0] - t1))
+ r2 = sum(sum(alpha / d ** 2 * approx(mu * (nodes[n2][0] - t1))
for n2, d in edges[n1].iteritems()
if nodes[n2][0] > t1)
for (n1, t1) in iter_events(events))
r3 = lamb * sum(node[1] for node in nodes.itervalues())
- print r1, r2, r3
return -(r1 - r2 - r3)
-def sa(x, y, z, sigma=0.5, niter=1000, fc=None):
+def sa(x, y, z, sigma=0.5, niter=70, fc=None):
T = 0.1
e = 1.1
if fc:
@@ -82,7 +81,7 @@ def sa(x, y, z, sigma=0.5, niter=1000, fc=None):
return y, z, fo
-def optimize_with_sa(x, y, z, niter=10):
+def optimize_with_sa(x, y, z, niter=4):
def f(x):
return ll(x, y, z)
@@ -108,8 +107,8 @@ def optimize_with_gss(x, y, z, niter=5):
return ll(x, y, z)
for _ in xrange(niter):
- y, fc = gss(g, 0, 1e50, tol=1e-10)
- z, fc = gss(h, 0, 1e50, tol=1e-10)
+ y, fc = gss(g, 0, 1, tol=1e-10)
+ z, fc = gss(h, 0, 1, tol=1e-10)
x, fc = gss(f, 0, 1e-3, tol=1e-10)
print x, y, z, fc
sys.stdout.flush()
@@ -121,7 +120,7 @@ def coarse_search():
v = map(float, line.strip().split())
d[tuple(v[:3])] = v[3]
p = Pool(5)
- lamb = [20. ** i for i in range(-10, 0)]
+ lamb = [5e-6, 1e-5, 1.5e-5, 2e-5, 2.5e-5]
alpha = [20. ** i for i in range(-15, 15)]
mu = [20. ** i for i in range(-15, 15)]
@@ -136,16 +135,15 @@ def coarse_search():
if __name__ == "__main__":
- nodes, edges, events, event_edges = load(open("data-dist1.pickle", "rb"))
- x = 1.25e-5
- y = 1.2e16
- z = 1.5e-20
- sa(x, y, z)
-
- # with open(sys.argv[1]) as fh:
- # l = [map(float, line.strip().split()[:3]) for line in fh]
- # for e in l:
- # optimize_with_gss(*e)
+ nodes, edges, events, event_edges = load(open("data-all.pickle", "rb"))
+ x = 1.875e-5
+ y = 6.1e14
+ z = 3e-20
+ # optimize_with_sa(1.25e-05, 2.048e+13, 9.1552734375e-20)
+ with open(sys.argv[1]) as fh:
+ l = [map(float, line.strip().split()[:3]) for line in fh]
+ for e in l:
+ optimize_with_gss(*e)
# print ll(x, y, z)
# coarse_search()
diff --git a/hawkes_experiments/refine.py b/hawkes_experiments/refine.py
index 02bbe07..6acb382 100644
--- a/hawkes_experiments/refine.py
+++ b/hawkes_experiments/refine.py
@@ -24,10 +24,10 @@ def approx(x):
def ll(lamb, alpha, mu):
r1 = sum(log(lamb * (1 + 0.43 * sin(0.0172 * t1 + 4.36))
- + sum(alpha / d * mu * exp(-mu * (t1 - t2))
+ + sum(alpha / d ** 2 * mu * exp(-mu * (t1 - t2))
for (n2, t2, d) in s))
for ((n1, t1), s) in event_edges.iteritems())
- r2 = sum(sum(alpha / d * approx(mu * (nodes[n2][0] - t1))
+ r2 = sum(sum(alpha / d ** 2 * approx(mu * (nodes[n2][0] - t1))
for n2, d in edges[n1].iteritems()
if nodes[n2][0] > t1)
for (n1, t1) in iter_events(events))
@@ -45,7 +45,7 @@ def get_values():
for line in open("refine.txt"):
v = map(float, line.strip().split())
d[tuple(v[:3])] = v[3]
- for a, _ in l[:100]:
+ for a, _ in l[:20]:
t = [1. / i for i in range(2, 4)] + [float(i) for i in range(1, 4)]
for b in product(t, repeat=3):
l, al, m = inprod(a, b)