aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThibaut Horel <thibaut.horel@gmail.com>2015-12-01 15:00:51 -0500
committerThibaut Horel <thibaut.horel@gmail.com>2015-12-01 15:00:51 -0500
commit3b5321add6cd71c6e23ff65e75faaa48e6829634 (patch)
treeafe0bd06cff712efed3f0de9366311fad318e046
parentc39e365d71fc07f3ae5b198252b4a7247efb9bc5 (diff)
downloadcascades-3b5321add6cd71c6e23ff65e75faaa48e6829634.tar.gz
Extract blocks utils and reorganize code
-rw-r--r--simulation/mle_blocks.py59
-rw-r--r--simulation/utils_blocks.py (renamed from simulation/active_blocks.py)69
-rw-r--r--simulation/vi_blocks.py10
3 files changed, 71 insertions, 67 deletions
diff --git a/simulation/mle_blocks.py b/simulation/mle_blocks.py
new file mode 100644
index 0000000..89aaf2e
--- /dev/null
+++ b/simulation/mle_blocks.py
@@ -0,0 +1,59 @@
+import utils
+import utils_blocks as ub
+import theano
+from theano import tensor as tsr
+from blocks import algorithms, main_loop
+import blocks.extensions as be
+import blocks.extensions.monitoring as bm
+import numpy as np
+
+
+def create_mle_model(graph):
+ """return cascade likelihood theano computation graph"""
+ n_nodes = len(graph)
+ x = tsr.matrix(name='x', dtype='int8')
+ s = tsr.matrix(name='s', dtype='int8')
+ params = theano.shared(
+ .5 + .01 *
+ np.random.normal(size=(n_nodes, n_nodes)).astype(theano.config.floatX),
+ name='params'
+ )
+ y = tsr.maximum(tsr.dot(x, params), 1e-5)
+ infect = tsr.log(1. - tsr.exp(-y[0:-1]))
+ lkl_pos = tsr.sum(infect * (x[1:] & s[1:]))
+ lkl_neg = tsr.sum(-y[0:-1] * (~x[1:] & s[1:]))
+ lkl_mle = lkl_pos + lkl_neg
+ lkl_mle.name = 'cost'
+
+ return x, s, params, lkl_mle
+
+
+if __name__ == "__main__":
+ batch_size = 100
+ n_obs = 100000
+ graph = utils.create_wheel(100)
+
+ print('GRAPH:\n', graph, '\n-------------\n')
+
+ g_shared = theano.shared(value=graph, name='graph')
+ x, s, params, cost = create_mle_model(graph)
+ rmse = ub.rmse_error(g_shared, params)
+ error = ub.relative_error(g_shared, params)
+
+ alg = algorithms.GradientDescent(
+ cost=-cost, parameters=[params], step_rule=algorithms.AdaDelta()
+ )
+ data_stream = ub.dynamic_data_stream(graph, batch_size)
+ # data_stream = ub.fixed_data_stream(n_obs, graph, batch_size)
+ loop = main_loop.MainLoop(
+ alg, data_stream,
+ extensions=[
+ be.FinishAfter(after_n_batches=10**3),
+ bm.TrainingDataMonitoring([cost, params,
+ rmse, error], every_n_batches=10),
+ be.Printing(every_n_batches=10),
+ ub.JSONDump("log.json", every_n_batches=10),
+ ub.ActiveLearning(data_stream.dataset),
+ ],
+ )
+ loop.run()
diff --git a/simulation/active_blocks.py b/simulation/utils_blocks.py
index 1495eb8..5e91658 100644
--- a/simulation/active_blocks.py
+++ b/simulation/utils_blocks.py
@@ -1,16 +1,11 @@
-import utils
-import theano
from theano import tensor as tsr
-import blocks
-from blocks import algorithms, main_loop
+import fuel.datasets
import blocks.extensions as be
-import blocks.extensions.monitoring as bm
import picklable_itertools
import numpy as np
-import fuel
-import fuel.datasets
from json import dumps
import collections
+import utils
class LearnedDataset(fuel.datasets.Dataset):
@@ -31,7 +26,7 @@ class LearnedDataset(fuel.datasets.Dataset):
return utils.simulate_cascades(request, self.graph, self.source)
-class ActiveLearning(blocks.extensions.SimpleExtension):
+class ActiveLearning(be.SimpleExtension):
"""
Extension which updates the node_p array passed to the get_data method of
LearnedDataset
@@ -46,7 +41,7 @@ class ActiveLearning(blocks.extensions.SimpleExtension):
print(self.dataset.node_p)
-class JSONDump(blocks.extensions.SimpleExtension):
+class JSONDump(be.SimpleExtension):
"""Dump a JSON-serialized version of the log to a file."""
def __init__(self, filename, **kwargs):
@@ -82,26 +77,6 @@ class ShuffledBatchesScheme(fuel.schemes.ShuffledScheme):
return iter(batches[np.random.permutation(len(batches))])
-def create_mle_model(graph):
- """return cascade likelihood theano computation graph"""
- n_nodes = len(graph)
- x = tsr.matrix(name='x', dtype='int8')
- s = tsr.matrix(name='s', dtype='int8')
- params = theano.shared(
- .5 + .01 *
- np.random.normal(size=(n_nodes, n_nodes)).astype(theano.config.floatX),
- name='params'
- )
- y = tsr.maximum(tsr.dot(x, params), 1e-5)
- infect = tsr.log(1. - tsr.exp(-y[0:-1]))
- lkl_pos = tsr.sum(infect * (x[1:] & s[1:]))
- lkl_neg = tsr.sum(-y[0:-1] * (~x[1:] & s[1:]))
- lkl_mle = lkl_pos + lkl_neg
- lkl_mle.name = 'cost'
-
- return x, s, params, lkl_mle
-
-
def rmse_error(graph, params):
n_nodes = graph.shape[0]
diff = (graph - params) ** 2
@@ -114,7 +89,7 @@ def rmse_error(graph, params):
def relative_error(graph, params):
n_nodes = graph.shape[0]
- diff = abs(g_shared - params)
+ diff = abs(graph - params)
subarray = tsr.arange(n_nodes)
tsr.set_subtensor(diff[subarray, subarray], 0)
error = tsr.sum(tsr.switch(tsr.eq(graph, 0.), 0., diff / graph)) / n_nodes
@@ -122,7 +97,7 @@ def relative_error(graph, params):
return error
-def create_fixed_data_stream(n_obs, graph, batch_size, shuffle=True):
+def fixed_data_stream(n_obs, graph, batch_size, shuffle=True):
"""
creates a datastream for a fixed (not learned) dataset:
-shuffle (bool): shuffle minibatches but not within minibatch, else
@@ -139,38 +114,8 @@ def create_fixed_data_stream(n_obs, graph, batch_size, shuffle=True):
return fuel.streams.DataStream(dataset=data_set, iteration_scheme=scheme)
-def create_learned_data_stream(graph, batch_size):
+def dynamic_data_stream(graph, batch_size):
node_p = np.ones(len(graph)) / len(graph)
data_set = LearnedDataset(node_p, graph)
scheme = fuel.schemes.ConstantScheme(batch_size)
return fuel.streams.DataStream(dataset=data_set, iteration_scheme=scheme)
-
-
-if __name__ == "__main__":
- batch_size = 100
- n_obs = 1000
- graph = utils.create_wheel(10)
- print('GRAPH:\n', graph, '\n-------------\n')
-
- g_shared = theano.shared(value=graph, name='graph')
- x, s, params, cost = create_mle_model(graph)
- rmse = rmse_error(g_shared, params)
- error = relative_error(g_shared, params)
-
- alg = algorithms.GradientDescent(
- cost=-cost, parameters=[params], step_rule=blocks.algorithms.AdaDelta()
- )
- # data_stream = create_learned_data_stream(graph, batch_size)
- data_stream = create_fixed_data_stream(n_obs, graph, batch_size)
- loop = main_loop.MainLoop(
- alg, data_stream,
- extensions=[
- be.FinishAfter(after_n_batches=10**3),
- bm.TrainingDataMonitoring([cost, params,
- rmse, error], every_n_batches=10),
- be.Printing(every_n_batches=10),
- JSONDump("log.json", every_n_batches=10)
- # ActiveLearning(data_stream.dataset),
- ],
- )
- loop.run()
diff --git a/simulation/vi_blocks.py b/simulation/vi_blocks.py
index 11dc4de..94038a5 100644
--- a/simulation/vi_blocks.py
+++ b/simulation/vi_blocks.py
@@ -1,4 +1,5 @@
import utils
+import utils_blocks as ub
import theano
from theano import tensor as tsr
from blocks import algorithms, main_loop
@@ -6,7 +7,6 @@ import blocks.extensions as be
import blocks.extensions.monitoring as bm
import theano.tensor.shared_randomstreams
import numpy as np
-import active_blocks as ab
class ClippedParams(algorithms.StepRule):
@@ -58,16 +58,16 @@ if __name__ == "__main__":
print('GRAPH:\n', graph, '\n-------------\n')
x, s, mu, sig, cost = create_vi_model(len(graph), n_samples)
- rmse = ab.rmse_error(graph, mu)
+ rmse = ub.rmse_error(graph, mu)
step_rules = algorithms.CompositeRule([algorithms.AdaDelta(),
ClippedParams(1e-3, 1 - 1e-3)])
alg = algorithms.GradientDescent(cost=cost, parameters=[mu, sig],
step_rule=step_rules)
- data_stream = ab.create_fixed_data_stream(n_cascades, graph, batch_size,
- shuffle=False)
- # data_stream = ab.create_learned_data_stream(graph, batch_size)
+ data_stream = ub.fixed_data_stream(n_cascades, graph, batch_size,
+ shuffle=False)
+ # data_stream = ub.dynamic_data_stream(graph, batch_size)
loop = main_loop.MainLoop(
alg, data_stream,
extensions=[