aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjeanpouget-abadie <jean.pougetabadie@gmail.com>2015-11-29 18:50:34 -0500
committerjeanpouget-abadie <jean.pougetabadie@gmail.com>2015-11-29 18:50:34 -0500
commit582ea9dade68859e3d863d80a3aeddcb10a4c368 (patch)
treeaf8e010329d9f31a220ba8ffc765f686c769ed37
parent7322c00eafcde38dadbf9d4f05a1572d627355bf (diff)
downloadcascades-582ea9dade68859e3d863d80a3aeddcb10a4c368.tar.gz
simple heuristic + star graph + rmse computation
-rw-r--r--simulation/active_blocks.py31
-rw-r--r--simulation/main.py10
-rw-r--r--simulation/mle_blocks.py95
-rw-r--r--simulation/vi_blocks.py4
4 files changed, 34 insertions, 106 deletions
diff --git a/simulation/active_blocks.py b/simulation/active_blocks.py
index 47fce2f..e3924c6 100644
--- a/simulation/active_blocks.py
+++ b/simulation/active_blocks.py
@@ -54,7 +54,9 @@ class ActiveLearning(blocks.extensions.SimpleExtension):
self.dataset = dataset
def do(self, which_callback, *args):
- pass
+ out_degree = np.sum(self.dataset.graph, axis=1)
+ self.dataset.node_p = out_degree / np.sum(out_degree)
+ print(self.dataset.node_p)
class ShuffledBatchesScheme(fuel.schemes.ShuffledScheme):
@@ -77,10 +79,13 @@ class ShuffledBatchesScheme(fuel.schemes.ShuffledScheme):
return iter(batches[np.random.permutation(len(batches))])
-def create_mle_model(n_nodes):
+def create_mle_model(graph):
"""
return cascade likelihood theano computation graph
"""
+ n_nodes = len(graph)
+ g_shared = theano.shared(value=graph, name='graph')
+
x = tsr.matrix(name='x', dtype='int8')
s = tsr.matrix(name='s', dtype='int8')
params = theano.shared(
@@ -94,7 +99,13 @@ def create_mle_model(n_nodes):
lkl_neg = tsr.sum(-y[0:-1] * (~x[1:] & s[1:]))
lkl_mle = lkl_pos + lkl_neg
lkl_mle.name = 'cost'
- return x, s, params, lkl_mle
+
+ diff = (g_shared - params) ** 2
+ subarray = tsr.arange(g_shared.shape[0])
+ tsr.set_subtensor(diff[subarray, subarray], 0)
+ rmse = tsr.sum(diff) / (n_nodes ** 2)
+ rmse.name = 'rmse'
+ return x, s, params, lkl_mle, rmse
def create_fixed_data_stream(n_cascades, graph, batch_size, shuffle=True):
@@ -126,25 +137,27 @@ def create_learned_data_stream(graph, batch_size):
if __name__ == "__main__":
batch_size = 1000
- graph = mn.create_random_graph(n_nodes=1000)
+ #graph = mn.create_random_graph(n_nodes=1000)
+ graph = mn.create_star(1000)
print('GRAPH:\n', graph, '\n-------------\n')
- x, s, params, cost = create_mle_model(len(graph))
+ x, s, params, cost, rmse = create_mle_model(graph)
alg = blocks.algorithms.GradientDescent(
cost=-cost, parameters=[params], step_rule=blocks.algorithms.AdaDelta()
)
data_stream = create_learned_data_stream(graph, batch_size)
+ #n_cascades = 10000
#data_stream = create_fixed_data_stream(n_cascades, graph, batch_size,
# shuffle=False)
loop = blocks.main_loop.MainLoop(
alg, data_stream,
extensions=[
blocks.extensions.FinishAfter(after_n_batches = 10**4),
- blocks.extensions.monitoring.TrainingDataMonitoring([cost, params],
- after_batch=True),
- blocks.extensions.Printing(every_n_batches = 10),
- #ActiveLearning(active_dataset)
+ blocks.extensions.monitoring.TrainingDataMonitoring([cost, params,
+ rmse], after_batch=True),
+ blocks.extensions.Printing(every_n_batches = 10)#,
+ #ActiveLearning(data_stream.dataset)
]
)
loop.run()
diff --git a/simulation/main.py b/simulation/main.py
index a916034..81133c7 100644
--- a/simulation/main.py
+++ b/simulation/main.py
@@ -19,6 +19,16 @@ def create_random_graph(n_nodes, p=.5):
return np.log(1. / (1 - p * graph))
+def create_star(n_nodes, p=.5):
+ graph = np.zeros((n_nodes, n_nodes))
+ graph[0] = np.ones((n_nodes,))
+ graph[0, 0] = 0
+ for index, row in enumerate(graph[1:-1]):
+ row[index + 1] = 1
+ graph[-1, 1] = 1
+ return np.log(1. / (1 - p * graph))
+
+
def simulate_cascade(x, graph):
"""
Simulate an IC cascade given a graph and initial state.
diff --git a/simulation/mle_blocks.py b/simulation/mle_blocks.py
deleted file mode 100644
index 5acebab..0000000
--- a/simulation/mle_blocks.py
+++ /dev/null
@@ -1,95 +0,0 @@
-import main as mn
-import theano
-from theano import tensor as tsr
-import blocks
-import blocks.algorithms, blocks.main_loop, blocks.extensions.monitoring
-import theano.tensor.shared_randomstreams
-import picklable_itertools
-import numpy as np
-from six.moves import range
-import fuel
-import fuel.datasets
-import collections
-
-
-class JeaninuScheme(fuel.schemes.ShuffledScheme):
- def get_request_iterator(self):
- indices = list(self.indices)
- start = np.random.randint(self.batch_size)
- batches = list(map(
- list,
- picklable_itertools.extras.partition_all(self.batch_size,
- indices[start:])
- ))
- if indices[:start]:
- batches.append(indices[:start])
- batches = np.asarray(batches)
- return iter(batches[np.random.permutation(len(batches))])
-
-
-def create_model(n_nodes):
- x = tsr.matrix(name='x', dtype='int8')
- s = tsr.matrix(name='s', dtype='int8')
- params = theano.shared(
- .5 + .01 *
- np.random.normal(size=(n_nodes, n_nodes)).astype(theano.config.floatX),
- name='params'
- )
- y = tsr.maximum(tsr.dot(x, params), 1e-5)
- infect = tsr.log(1. - tsr.exp(-y[0:-1]))
- lkl_pos = tsr.sum(infect * (x[1:] & s[1:]))
- lkl_neg = tsr.sum(-y[0:-1] * (~x[1:] & s[1:]))
- lkl_mle = lkl_pos + lkl_neg
- lkl_mle.name = 'cost'
- return x, s, params, lkl_mle
-
-
-def create_random_graph(n_nodes, p=.5):
- graph = .5 * np.random.binomial(2, p=.5, size=(n_nodes, n_nodes))
- for k in range(len(graph)):
- graph[k, k] = 0
- return np.log(1. / (1 - p * graph))
-
-
-def create_data_stream(n_cascades, graph, batch_size, shuffle=True):
- """
- shuffle (bool): shuffle minibatches but not within minibatch
- """
- cascades = mn.build_cascade_list(mn.simulate_cascades(n_cascades, graph),
- collapse=True)
- x_obs, s_obs = cascades[0], cascades[1]
- data_set = fuel.datasets.base.IndexableDataset(collections.OrderedDict(
- [('x', x_obs), ('s', s_obs)]
- ))
- if shuffle:
- scheme = JeaninuScheme(len(x_obs), batch_size=batch_size)
- else:
- scheme = fuel.schemes.SequentialScheme(len(x_obs),
- batch_size=batch_size)
- return fuel.streams.DataStream(dataset=data_set, iteration_scheme=scheme)
-
-
-if __name__ == "__main__":
- n_cascades = 10000
- batch_size = 1000
- graph = np.array([[0, 0, 1], [0, 0, 0.5], [0, 0, 0]])
- graph = np.log(1. / (1 - .5 * graph))
- print('GRAPH:\n', graph, '\n-------------\n')
-
- x, s, params, cost = create_model(len(graph))
-
- alg = blocks.algorithms.GradientDescent(
- cost=-cost, parameters=[params], step_rule=blocks.algorithms.AdaDelta()
- )
- data_stream = create_data_stream(n_cascades, graph, batch_size,
- shuffle=True)
- loop = blocks.main_loop.MainLoop(
- alg, data_stream,
- extensions=[
- blocks.extensions.FinishAfter(after_n_epochs = 1000),
- blocks.extensions.monitoring.TrainingDataMonitoring([cost, params],
- after_batch=True),
- blocks.extensions.Printing()
- ]
- )
- loop.run()
diff --git a/simulation/vi_blocks.py b/simulation/vi_blocks.py
index 3f88611..58b68d3 100644
--- a/simulation/vi_blocks.py
+++ b/simulation/vi_blocks.py
@@ -19,9 +19,9 @@ class ClippedParams(blocks.algorithms.StepRule):
def compute_step(self, parameter, previous_step):
min_clipped = tsr.switch(parameter - previous_step < self.min_value,
- self.min_value, previous_step)
+ 0, previous_step)
return tsr.switch(parameter - previous_step > self.max_value,
- self.max_value, min_clipped)
+ 0, min_clipped), []
def create_vi_model(n_nodes, n_samp=100):