aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--simulation/plot_utils.py26
-rw-r--r--simulation/utils_blocks.py35
-rw-r--r--simulation/vi_blocks.py4
3 files changed, 62 insertions, 3 deletions
diff --git a/simulation/plot_utils.py b/simulation/plot_utils.py
new file mode 100644
index 0000000..af5269c
--- /dev/null
+++ b/simulation/plot_utils.py
@@ -0,0 +1,26 @@
+import matplotlib.pyplot as plt
+import argparse
+import json
+import seaborn
+seaborn.set_style('whitegrid')
+
+parser = argparse.ArgumentParser(description='Process logs')
+parser.add_argument('-x', help='name of parameters on x axis', default='time')
+parser.add_argument('-y', help='name of parameters on y axis', default='rmse')
+parser.add_argument('f', help='list of logs to parse', nargs='+')
+parser.add_argument('-dest', help='name of figure to save', default='fig.png')
+args = parser.parse_args()
+
+for file_name in args.f:
+ x, y = [], []
+ with open(file_name) as f:
+ for line in f:
+ jason = json.loads(line)
+ x.append(jason[args.x])
+ y.append(jason[args.y])
+ plt.plot(x, y, label=file_name)
+
+plt.legend()
+plt.xlabel(args.x)
+plt.ylabel(args.y)
+plt.savefig(args.dest)
diff --git a/simulation/utils_blocks.py b/simulation/utils_blocks.py
index 5e91658..0d30786 100644
--- a/simulation/utils_blocks.py
+++ b/simulation/utils_blocks.py
@@ -38,7 +38,9 @@ class ActiveLearning(be.SimpleExtension):
def do(self, which_callback, *args):
out_degree = np.sum(self.dataset.graph, axis=1)
self.dataset.node_p = out_degree / np.sum(out_degree)
- print(self.dataset.node_p)
+
+# def do(self, which_callback, *args):
+
class JSONDump(be.SimpleExtension):
@@ -119,3 +121,34 @@ def dynamic_data_stream(graph, batch_size):
data_set = LearnedDataset(node_p, graph)
scheme = fuel.schemes.ConstantScheme(batch_size)
return fuel.streams.DataStream(dataset=data_set, iteration_scheme=scheme)
+
+
+if __name__ == "__main__":
+ batch_size = 100
+ n_obs = 1000
+ frequency = 1
+ graph = utils.create_wheel(1000)
+ print('GRAPH:\n', graph, '\n-------------\n')
+
+ g_shared = theano.shared(value=graph, name='graph')
+ x, s, params, cost = create_mle_model(graph)
+ rmse = rmse_error(g_shared, params)
+ error = relative_error(g_shared, params)
+
+ alg = algorithms.GradientDescent(
+ cost=-cost, parameters=[params], step_rule=blocks.algorithms.AdaDelta()
+ )
+ data_stream = create_learned_data_stream(graph, batch_size)
+ #data_stream = create_fixed_data_stream(n_obs, graph, batch_size)
+ loop = main_loop.MainLoop(
+ alg, data_stream,
+ extensions=[
+ be.FinishAfter(after_n_batches=10**4),
+ bm.TrainingDataMonitoring([cost, rmse, error],
+ every_n_batches=frequency),
+ be.Printing(every_n_batches=frequency),
+ JSONDump("tmpactive_log.json", every_n_batches=frequency),
+ ActiveLearning(data_stream.dataset, every_n_batches=frequency)
+ ],
+ )
+ loop.run()
diff --git a/simulation/vi_blocks.py b/simulation/vi_blocks.py
index 94038a5..1177979 100644
--- a/simulation/vi_blocks.py
+++ b/simulation/vi_blocks.py
@@ -51,8 +51,8 @@ def create_vi_model(n_nodes, n_samp=100):
if __name__ == "__main__":
- n_cascades = 10000
- batch_size = 1000
+ #n_cascades = 10000
+ batch_size = 10
n_samples = 50
graph = utils.create_random_graph(n_nodes=4)
print('GRAPH:\n', graph, '\n-------------\n')