diff options
| author | Ben Green <ben@SEASITs-MacBook-Pro.local> | 2015-07-08 00:41:29 -0400 |
|---|---|---|
| committer | Ben Green <ben@SEASITs-MacBook-Pro.local> | 2015-07-08 00:41:32 -0400 |
| commit | 034c71328460d460deef74bf591c8d6cd28d5f93 (patch) | |
| tree | 242235bdbee29d84f8249b19f40a6f4efdc526e7 /experiments/ml.pyx | |
| parent | b91ab29e267fb58b8ba161c1f77da5919b0e62d9 (diff) | |
| download | criminal_cascades-034c71328460d460deef74bf591c8d6cd28d5f93.tar.gz | |
updated temporal model for different time scales, ran some tests
Diffstat (limited to 'experiments/ml.pyx')
| -rw-r--r-- | experiments/ml.pyx | 45 |
1 files changed, 26 insertions, 19 deletions
diff --git a/experiments/ml.pyx b/experiments/ml.pyx index 0d72c44..7eca30a 100644 --- a/experiments/ml.pyx +++ b/experiments/ml.pyx @@ -6,32 +6,38 @@ from libc.math cimport log, exp DTYPE = np.float64 ctypedef np.float_t DTYPE_t -cdef DTYPE_t weight_success(int dist, int dt, DTYPE_t alpha, DTYPE_t delta, - DTYPE_t w1, DTYPE_t w2, DTYPE_t w3): +cdef DTYPE_t weight_success(int dist, int dt, DTYPE_t alpha, DTYPE_t delta, DTYPE_t lmbda, + DTYPE_t t_scale, DTYPE_t w1, DTYPE_t w2, DTYPE_t w3): """weight for successful infection, exponential time model""" cdef DTYPE_t structural, temporal, result structural = delta ** dist - temporal = log(exp(alpha)-1.) - alpha*dt/1. + # structural = delta/(1. + 1./(w1*lmbda) + 1./(w2*lmbda) + 1./(w3*lmbda)) + temporal = log(exp(alpha/t_scale)-1.) - alpha*dt/t_scale + # temporal = 1. / (1. + (dt - 1.)/alpha)**0.01 - 1. / (1. + dt/alpha)**0.01 result = log(structural) + temporal return result -cdef DTYPE_t weight_failure(int dist, int dt, DTYPE_t alpha, DTYPE_t delta, - DTYPE_t w1, DTYPE_t w2, DTYPE_t w3): +cdef DTYPE_t weight_failure(int dist, int dt, DTYPE_t alpha, DTYPE_t delta, DTYPE_t lmbda, + DTYPE_t t_scale, DTYPE_t w1, DTYPE_t w2, DTYPE_t w3): """weight for failed infection, exponential time model""" cdef DTYPE_t structural, temporal, result structural = delta ** dist - temporal = exp(-alpha * dt/1.) + # structural = delta/(1. + 1./(w1*lmbda) + 1./(w2*lmbda) + 1./(w3*lmbda)) + temporal = exp(-alpha * dt/t_scale) + # temporal = 1. - 1. / (1. + dt/alpha)**0.01 result = log(1. - structural + structural * temporal) return result -def ml(dict root_victims, dict victims, dict non_victims, DTYPE_t age, - DTYPE_t alpha, DTYPE_t delta): +def ml(dict root_victims, dict victims, dict non_victims, + DTYPE_t alpha, DTYPE_t delta, DTYPE_t lmbda): cdef: - int n_roots, n_victims, roots, i, dist, dt, t, l + int n_roots, n_victims, roots, i, dist, dt, t, l, n_days DTYPE_t beta, ll, beta_add, max_beta, max_beta_add list parents, failures, successes n_roots, n_victims = len(root_victims), len(victims) - n_nodes = 5000 + n_nodes = 148152 + n_days = 3012 + t_scale = 1. cdef: np.ndarray[DTYPE_t] probs = np.zeros(n_victims, dtype=DTYPE) np.ndarray[DTYPE_t] probs_fail = np.zeros(n_victims, dtype=DTYPE) @@ -44,15 +50,14 @@ def ml(dict root_victims, dict victims, dict non_victims, DTYPE_t age, # for each victim node i, compute the probability that all its parents # fail to infect it, also computes the probability that its most # likely parent infects it - failures = [weight_failure(dist, dt, alpha, delta, w1, w2, w3) + failures = [weight_failure(dist, dt, alpha, delta, lmbda, t_scale, w1, w2, w3) for (prnt, dist, dt, w1, w2, w3) in parents] probs_fail[i] = sum(failures) - successes = [weight_success(dist, dt, alpha, delta, w1, w2, w3) + successes = [weight_success(dist, dt, alpha, delta, lmbda, t_scale, w1, w2, w3) for (prnt, dist, dt, w1, w2, w3) in parents] dists = [dist for (prnt, dist, dt, w1, w2, w3) in parents] dts = [dt for (prnt, dist, dt, w1, w2, w3) in parents] # find parent that maximizes log(p) - log(\tilde{p}) - # probs[i] = max(s - failures[l] for l, s in enumerate(successes)) probs[i] = float("-inf") for l, s in enumerate(successes): prob = s - failures[l] @@ -60,13 +65,12 @@ def ml(dict root_victims, dict victims, dict non_victims, DTYPE_t age, probs[i] = prob parent_dists[i] = dists[l] parent_dts[i] = dts[l] - # probs_fail[i] = failures[l] # loop through non-victims for i, parents in enumerate(non_victims.itervalues()): # for each non victim node, compute the probability that all its # parents fail to infect it - failures = [weight_failure(dist, dt, alpha, delta, w1, w2, w3) + failures = [weight_failure(dist, dt, alpha, delta, lmbda, t_scale, w1, w2, w3) for (prnt, dist, dt, w1, w2, w3) in parents] probs_nv[i] = sum(failures) @@ -76,8 +80,8 @@ def ml(dict root_victims, dict victims, dict non_victims, DTYPE_t age, max_beta_add = float('-inf') # iterate over all victim nodes to find the optimal threshold - for beta in np.arange(0.01, 1., .01): - thresh = log(beta/(1000.*(1.-beta))) + for beta in np.arange(0.01, 1., 0.01): + thresh = log(beta/(n_days*(1.-beta))) seeds = probs<thresh non_seeds = probs>=thresh roots = n_roots + sum(seeds) @@ -86,16 +90,19 @@ def ml(dict root_victims, dict victims, dict non_victims, DTYPE_t age, # add probability for realized edges and subtract probability these edges fail beta_add += (probs[non_seeds]).sum() # add probability for the seeds and non-seeds - beta_add += roots * log(beta/1000.) + (n_nodes-roots) * log(1. - beta) + beta_add += roots * log(beta/n_days) + (n_nodes-roots) * log(1. - beta) if beta_add > max_beta_add: max_beta = beta max_roots = roots max_beta_add = beta_add + pdists = (parent_dists[non_seeds]).mean() + pdts = (parent_dts[non_seeds]).mean() # print 'beta:', max_beta, 'add:', max_beta_add, 'roots:', max_roots ll += max_beta_add roots = max_roots beta = max_beta - # print n_nodes, n_roots, n_victims, max_i, roots + print 'dist:', pdists + print 'dt:', pdts return (beta, roots, ll) |
