import main as mn import theano from theano import tensor as tsr import blocks import blocks.algorithms, blocks.main_loop, blocks.extensions.monitoring import theano.tensor.shared_randomstreams import numpy as np from six.moves import range import fuel import fuel.datasets import active_blocks as ab class ClippedParams(blocks.algorithms.StepRule): """A rule to maintain parameters within a specified range""" def __init__(self, min_value, max_value): self.min_value = min_value self.max_value = max_value def compute_step(self, parameter, previous_step): min_clipped = tsr.switch(parameter - previous_step < self.min_value, 0, previous_step) return tsr.switch(parameter - previous_step > self.max_value, 0, min_clipped), [] def create_vi_model(n_nodes, n_samp=100): """return variational inference theano computation graph""" def aux(a, b): rand = a + b * np.random.normal(size=(n_nodes, n_nodes)) return np.clip(rand, 1e-3, 1 - 1e-3).astype(theano.config.floatX) x = tsr.matrix(name='x', dtype='int8') s = tsr.matrix(name='s', dtype='int8') mu = theano.shared(value=aux(.5, .1), name='mu1') sig = theano.shared(value=aux(.5, .1), name='sig1') mu0 = theano.shared(value=aux(.5, .1), name='mu0') sig0 = theano.shared(value=aux(.5, .1), name='sig0') srng = tsr.shared_randomstreams.RandomStreams(seed=123) theta = srng.normal((n_samp, n_nodes, n_nodes)) * sig[None, :, :] + mu[None, :, :] y = tsr.maximum(tsr.dot(x, theta), 1e-3) infect = tsr.log(1. - tsr.exp(-y[0:-1])).dimshuffle(1, 0, 2) lkl_pos = tsr.sum(infect * (x[1:] & s[1:])) / n_samp lkl_neg = tsr.sum(-y[0:-1].dimshuffle(1, 0, 2) * (~x[1:] & s[1:])) / n_samp lkl = lkl_pos + lkl_neg kl = tsr.sum(tsr.log(sig0 / sig) + (sig**2 + (mu0 - mu)**2)/(2*sig0)**2) cost = - lkl + kl cost.name = 'cost' return x, s, mu, sig, cost if __name__ == "__main__": n_cascades = 10000 batch_size = 1000 n_samples = 50 graph = mn.create_random_graph(n_nodes=4) print('GRAPH:\n', graph, '\n-------------\n') x, s, mu, sig, cost = create_vi_model(len(graph), n_samples) rmse, g_shared = ab.rmse_error(graph, mu) step_rules= blocks.algorithms.CompositeRule([blocks.algorithms.AdaDelta(), ClippedParams(1e-3, 1 - 1e-3)]) alg = blocks.algorithms.GradientDescent(cost=cost, parameters=[mu, sig], step_rule=step_rules) #data_stream = ab.create_fixed_data_stream(n_cascades, graph, batch_size, # shuffle=False) data_stream = ab.create_learned_data_stream(graph, batch_size) loop = blocks.main_loop.MainLoop( alg, data_stream, extensions=[ blocks.extensions.FinishAfter(after_n_batches = 10**4), blocks.extensions.monitoring.TrainingDataMonitoring([cost, mu, sig, rmse, g_shared], after_batch=True), blocks.extensions.Printing(every_n_batches = 100), ] ) loop.run()