def test_binary_vis_layer_make_state(): # Verifies that BinaryVector.make_state creates # a shared variable whose value passes check_binary_samples n = 5 num_samples = 1000 tol = .04 layer = BinaryVector(nvis = n) rng = np.random.RandomState([2012,11,1]) mean = rng.uniform(1e-6, 1. - 1e-6, (n,)) z = inverse_sigmoid_numpy(mean) layer.set_biases(z.astype(config.floatX)) init_state = layer.make_state(num_examples=num_samples, numpy_rng=rng) value = init_state.get_value() check_binary_samples(value, (num_samples, n), mean, tol)
def test_make_symbolic_state(): # Tests whether the returned p_sample and h_sample have the right # dimensions num_examples = 40 theano_rng = MRG_RandomStreams(2012 + 11 + 1) visible_layer = BinaryVector(nvis=100) rval = visible_layer.make_symbolic_state(num_examples=num_examples, theano_rng=theano_rng) hidden_layer = BinaryVectorMaxPool(detector_layer_dim=500, pool_size=1, layer_name='h', irange=0.05, init_bias=-2.0) p_sample, h_sample = hidden_layer.make_symbolic_state( num_examples=num_examples, theano_rng=theano_rng) softmax_layer = Softmax(n_classes=10, layer_name='s', irange=0.05) h_sample_s = softmax_layer.make_symbolic_state(num_examples=num_examples, theano_rng=theano_rng) required_shapes = [(40, 100), (40, 500), (40, 500), (40, 10)] f = function(inputs=[], outputs=[rval, p_sample, h_sample, h_sample_s]) for s, r in zip(f(), required_shapes): assert s.shape == r
def make_random_basic_binary_dbm( rng, pool_size_1, num_vis = None, num_pool_1 = None, num_pool_2 = None, pool_size_2 = None, center = False ): """ Makes a DBM with BinaryVector for the visible layer, and two hidden layers of type BinaryVectorMaxPool. The weights and biases are initialized randomly with somewhat large values (i.e., not what you'd want to use for learning) rng: A numpy RandomState. pool_size_1: The size of the pools to use in the first layer. """ if num_vis is None: num_vis = rng.randint(1,11) if num_pool_1 is None: num_pool_1 = rng.randint(1,11) if num_pool_2 is None: num_pool_2 = rng.randint(1,11) if pool_size_2 is None: pool_size_2 = rng.randint(1,6) num_h1 = num_pool_1 * pool_size_1 num_h2 = num_pool_2 * pool_size_2 v = BinaryVector(num_vis, center=center) v.set_biases(rng.uniform(-1., 1., (num_vis,)).astype(config.floatX), recenter=center) h1 = BinaryVectorMaxPool( detector_layer_dim = num_h1, pool_size = pool_size_1, layer_name = 'h1', center = center, irange = 1.) h1.set_biases(rng.uniform(-1., 1., (num_h1,)).astype(config.floatX), recenter=center) h2 = BinaryVectorMaxPool( center = center, detector_layer_dim = num_h2, pool_size = pool_size_2, layer_name = 'h2', irange = 1.) h2.set_biases(rng.uniform(-1., 1., (num_h2,)).astype(config.floatX), recenter=center) dbm = DBM(visible_layer = v, hidden_layers = [h1, h2], batch_size = 1, niter = 50) return dbm
def test_variational_cd(): # Verifies that VariationalCD works well with make_layer_to_symbolic_state visible_layer = BinaryVector(nvis=100) hidden_layer = BinaryVectorMaxPool(detector_layer_dim=500, pool_size=1, layer_name='h', irange=0.05, init_bias=-2.0) model = DBM(visible_layer=visible_layer, hidden_layers=[hidden_layer], batch_size=100, niter=1) cost = VariationalCD(num_chains=100, num_gibbs_steps=2) data_specs = cost.get_data_specs(model) mapping = DataSpecsMapping(data_specs) space_tuple = mapping.flatten(data_specs[0], return_tuple=True) source_tuple = mapping.flatten(data_specs[1], return_tuple=True) theano_args = [] for space, source in safe_zip(space_tuple, source_tuple): name = '%s' % (source) arg = space.make_theano_batch(name=name) theano_args.append(arg) theano_args = tuple(theano_args) nested_args = mapping.nest(theano_args) grads, updates = cost.get_gradients(model, nested_args)
def test_binary_vis_layer_sample(): # Verifies that BinaryVector.sample returns an expression # whose value passes check_binary_samples assert hasattr(np, 'exp') n = 5 num_samples = 1000 tol = .04 class DummyLayer(object): """ A layer that we build for the test that just uses a state as its downward message. """ def downward_state(self, state): return state def downward_message(self, state): return state vis = BinaryVector(nvis=n) hid = DummyLayer() rng = np.random.RandomState([2012,11,1,259]) mean = rng.uniform(1e-6, 1. - 1e-6, (n,)) ofs = rng.randn(n) vis.set_biases(ofs.astype(config.floatX)) z = inverse_sigmoid_numpy(mean) - ofs z_var = sharedX(np.zeros((num_samples, n)) + z) theano_rng = MRG_RandomStreams(2012+11+1) sample = vis.sample(state_above=z_var, layer_above=hid, theano_rng=theano_rng) sample = sample.eval() check_binary_samples(sample, (num_samples, n), mean, tol)
def test_softmax_mf_sample_consistent(): # A test of the Softmax class # Verifies that the mean field update is consistent with # the sampling function # Since a Softmax layer contains only one random variable # (with n_classes possible values) the mean field assumption # does not impose any restriction so mf_update simply gives # the true expected value of h given v. # We can thus use mf_update to compute the expected value # of a sample of y conditioned on v, and check that samples # drawn using the layer's sample method convert to that # value. rng = np.random.RandomState([2012, 11, 1, 1154]) theano_rng = MRG_RandomStreams(2012 + 11 + 1 + 1154) num_samples = 1000 tol = .042 # Make DBM num_vis = rng.randint(1, 11) n_classes = rng.randint(1, 11) v = BinaryVector(num_vis) v.set_biases(rng.uniform(-1., 1., (num_vis, )).astype(config.floatX)) y = Softmax(n_classes=n_classes, layer_name='y', irange=1.) y.set_biases(rng.uniform(-1., 1., (n_classes, )).astype(config.floatX)) dbm = DBM(visible_layer=v, hidden_layers=[y], batch_size=1, niter=50) # Randomly pick a v to condition on # (Random numbers are generated via dbm.rng) layer_to_state = dbm.make_layer_to_state(1) v_state = layer_to_state[v] y_state = layer_to_state[y] # Infer P(y | v) using mean field expected_y = y.mf_update(state_below=v.upward_state(v_state)) expected_y = expected_y[0, :] expected_y = expected_y.eval() # copy all the states out into a batch size of num_samples cause_copy = sharedX(np.zeros((num_samples, ))).dimshuffle(0, 'x') v_state = v_state[0, :] + cause_copy y_state = y_state[0, :] + cause_copy y_samples = y.sample(state_below=v.upward_state(v_state), theano_rng=theano_rng) y_samples = function([], y_samples)() check_multinomial_samples(y_samples, (num_samples, n_classes), expected_y, tol)
def test_softmax_mf_sample_consistent(): # A test of the Softmax class # Verifies that the mean field update is consistent with # the sampling function # Since a Softmax layer contains only one random variable # (with n_classes possible values) the mean field assumption # does not impose any restriction so mf_update simply gives # the true expected value of h given v. # We can thus use mf_update to compute the expected value # of a sample of y conditioned on v, and check that samples # drawn using the layer's sample method convert to that # value. rng = np.random.RandomState([2012,11,1,1154]) theano_rng = MRG_RandomStreams(2012+11+1+1154) num_samples = 1000 tol = .042 # Make DBM num_vis = rng.randint(1,11) n_classes = rng.randint(1, 11) v = BinaryVector(num_vis) v.set_biases(rng.uniform(-1., 1., (num_vis,)).astype(config.floatX)) y = Softmax( n_classes = n_classes, layer_name = 'y', irange = 1.) y.set_biases(rng.uniform(-1., 1., (n_classes,)).astype(config.floatX)) dbm = DBM(visible_layer = v, hidden_layers = [y], batch_size = 1, niter = 50) # Randomly pick a v to condition on # (Random numbers are generated via dbm.rng) layer_to_state = dbm.make_layer_to_state(1) v_state = layer_to_state[v] y_state = layer_to_state[y] # Infer P(y | v) using mean field expected_y = y.mf_update( state_below = v.upward_state(v_state)) expected_y = expected_y[0, :] expected_y = expected_y.eval() # copy all the states out into a batch size of num_samples cause_copy = sharedX(np.zeros((num_samples,))).dimshuffle(0,'x') v_state = v_state[0,:] + cause_copy y_state = y_state[0,:] + cause_copy y_samples = y.sample(state_below = v.upward_state(v_state), theano_rng=theano_rng) y_samples = function([], y_samples)() check_multinomial_samples(y_samples, (num_samples, n_classes), expected_y, tol)
def test_softmax_mf_energy_consistent_centering(): # A test of the Softmax class # Verifies that the mean field update is consistent with # the energy function when using the centering trick # Since a Softmax layer contains only one random variable # (with n_classes possible values) the mean field assumption # does not impose any restriction so mf_update simply gives # the true expected value of h given v. # We also know P(h | v) # = P(h, v) / P( v) # = P(h, v) / sum_h P(h, v) # = exp(-E(h, v)) / sum_h exp(-E(h, v)) # So we can check that computing P(h | v) with both # methods works the same way rng = np.random.RandomState([2012,11,1,1131]) # Make DBM num_vis = rng.randint(1,11) n_classes = rng.randint(1, 11) v = BinaryVector(num_vis, center=True) v.set_biases(rng.uniform(-1., 1., (num_vis,)).astype(config.floatX), recenter=True) y = Softmax( n_classes = n_classes, layer_name = 'y', irange = 1., center=True) y.set_biases(rng.uniform(-1., 1., (n_classes,)).astype(config.floatX), recenter=True) dbm = DBM(visible_layer = v, hidden_layers = [y], batch_size = 1, niter = 50) # Randomly pick a v to condition on # (Random numbers are generated via dbm.rng) layer_to_state = dbm.make_layer_to_state(1) v_state = layer_to_state[v] y_state = layer_to_state[y] # Infer P(y | v) using mean field expected_y = y.mf_update( state_below = v.upward_state(v_state)) expected_y = expected_y[0, :] expected_y = expected_y.eval() # Infer P(y | v) using the energy function energy = dbm.energy(V = v_state, hidden = [y_state]) unnormalized_prob = T.exp(-energy) assert unnormalized_prob.ndim == 1 unnormalized_prob = unnormalized_prob[0] unnormalized_prob = function([], unnormalized_prob) def compute_unnormalized_prob(which): write_y = np.zeros((n_classes,)) write_y[which] = 1. y_value = y_state.get_value() y_value[0, :] = write_y y_state.set_value(y_value) return unnormalized_prob() probs = [compute_unnormalized_prob(idx) for idx in xrange(n_classes)] denom = sum(probs) probs = [on_prob / denom for on_prob in probs] # np.asarray(probs) doesn't make a numpy vector, so I do it manually wtf_numpy = np.zeros((n_classes,)) for i in xrange(n_classes): wtf_numpy[i] = probs[i] probs = wtf_numpy if not np.allclose(expected_y, probs): print 'mean field expectation of h:',expected_y print 'expectation of h based on enumerating energy function values:',probs assert False
def test_ais(): """ Test ais computation by comparing the output of estimate_likelihood to Russ's code's output for the same parameters. """ try: trainset = MNIST(which_set='train') testset = MNIST(which_set='test') except NoDataPathError: raise SkipTest("PYLEARN2_DATA_PATH environment variable not defined") nvis = 784 nhid = 20 # Random initialization of RBM parameters numpy.random.seed(98734) w_hid = 10 * numpy.cast[theano.config.floatX](numpy.random.randn(nvis, nhid)) b_vis = 10 * numpy.cast[theano.config.floatX](numpy.random.randn(nvis)) b_hid = 10 * numpy.cast[theano.config.floatX](numpy.random.randn(nhid)) # Initialization of RBM visible_layer = BinaryVector(nvis) hidden_layer = BinaryVectorMaxPool(detector_layer_dim=nhid, pool_size=1, layer_name='h', irange=0.1) rbm = DBM(100, visible_layer, [hidden_layer], 1) rbm.visible_layer.set_biases(b_vis) rbm.hidden_layers[0].set_weights(w_hid) rbm.hidden_layers[0].set_biases(b_hid) rbm.nvis = nvis rbm.nhid = nhid # Compute real logz and associated train_ll and test_ll using rbm_tools v_sample = T.matrix('v_sample') h_sample = T.matrix('h_sample') W = theano.shared(rbm.hidden_layers[0].get_weights()) hbias = theano.shared(rbm.hidden_layers[0].get_biases()) vbias = theano.shared(rbm.visible_layer.get_biases()) wx_b = T.dot(v_sample, W) + hbias vbias_term = T.dot(v_sample, vbias) hidden_term = T.sum(T.log(1 + T.exp(wx_b)), axis=1) free_energy_v = -hidden_term - vbias_term free_energy_v_fn = theano.function(inputs=[v_sample], outputs=free_energy_v) wh_c = T.dot(h_sample, W.T) + vbias hbias_term = T.dot(h_sample, hbias) visible_term = T.sum(T.log(1 + T.exp(wh_c)), axis=1) free_energy_h = -visible_term - hbias_term free_energy_h_fn = theano.function(inputs=[h_sample], outputs=free_energy_h) real_logz = rbm_tools.compute_log_z(rbm, free_energy_h_fn) real_ais_train_ll = -rbm_tools.compute_nll(rbm, trainset.get_design_matrix(), real_logz, free_energy_v_fn) real_ais_test_ll = -rbm_tools.compute_nll(rbm, testset.get_design_matrix(), real_logz, free_energy_v_fn) # Compute train_ll, test_ll and logz using dbm_metrics train_ll, test_ll, logz = dbm_metrics.estimate_likelihood([W], [vbias, hbias], trainset, testset, pos_mf_steps=100) assert (real_logz - logz) < 2.0 assert (real_ais_train_ll - train_ll) < 2.0 assert (real_ais_test_ll - test_ll) < 2.0