Пример #1
0
def test_gaussian_build_from_config():
    ly = layers.GaussianLayer(num_vis)
    ly.add_constraint({'loc': constraints.non_negative})
    p = penalties.l2_penalty(0.37)
    ly.add_penalty({'log_var': p})
    ly_new = layers.Layer.from_config(ly.get_config())
    assert ly_new.get_config() == ly.get_config()
Пример #2
0
def test_gaussian_conditional_params():
    ly = layers.GaussianLayer(num_vis)
    w = layers.Weights((num_vis, num_hid))
    scaled_units = [be.randn((num_samples, num_hid))]
    weights = [w.W_T()]
    beta = be.rand((num_samples, 1))
    ly._conditional_params(scaled_units, weights, beta)
Пример #3
0
def test_gaussian_derivatives():
    ly = layers.GaussianLayer(num_vis)
    w = layers.Weights((num_vis, num_hid))
    vis = ly.random((num_samples, num_vis))
    hid = [be.randn((num_samples, num_hid))]
    weights = [w.W_T()]
    ly.derivatives(vis, hid, weights)
Пример #4
0
def test_grbm_reload():
    vis_layer = layers.BernoulliLayer(num_vis, center=True)
    hid_layer = layers.GaussianLayer(num_hid, center=True)

    # create some extrinsics
    grbm = BoltzmannMachine([vis_layer, hid_layer])
    data = batch.Batch({
        'train':
        batch.InMemoryTable(be.randn((10 * num_samples, num_vis)), num_samples)
    })
    grbm.initialize(data)
    with tempfile.NamedTemporaryFile() as file:
        # save the model
        store = pandas.HDFStore(file.name, mode='w')
        grbm.save(store)
        store.close()
        # reload
        store = pandas.HDFStore(file.name, mode='r')
        grbm_reload = BoltzmannMachine.from_saved(store)
        store.close()
    # check the two models are consistent
    vis_data = vis_layer.random((num_samples, num_vis))
    data_state = State.from_visible(vis_data, grbm)
    vis_orig = grbm.deterministic_iteration(1, data_state)[0]
    vis_reload = grbm_reload.deterministic_iteration(1, data_state)[0]
    assert be.allclose(vis_orig, vis_reload)
    assert be.allclose(grbm.layers[0].moments.mean,
                       grbm_reload.layers[0].moments.mean)
    assert be.allclose(grbm.layers[0].moments.var,
                       grbm_reload.layers[0].moments.var)
    assert be.allclose(grbm.layers[1].moments.mean,
                       grbm_reload.layers[1].moments.mean)
    assert be.allclose(grbm.layers[1].moments.var,
                       grbm_reload.layers[1].moments.var)
Пример #5
0
def test_gaussian_update():
    ly = layers.GaussianLayer(num_vis)
    w = layers.Weights((num_vis, num_hid))
    scaled_units = [be.randn((num_samples, num_hid))]
    weights = [w.W_T()]
    beta = be.rand((num_samples, 1))
    ly.update(scaled_units, weights, beta)
Пример #6
0
def test_grbm_save():
    vis_layer = layers.BernoulliLayer(num_vis)
    hid_layer = layers.GaussianLayer(num_hid)
    grbm = model.Model([vis_layer, hid_layer])
    with tempfile.NamedTemporaryFile() as file:
        store = pandas.HDFStore(file.name, mode='w')
        grbm.save(store)
        store.close()
Пример #7
0
def test_grbm_from_config():
    vis_layer = layers.BernoulliLayer(num_vis)
    hid_layer = layers.GaussianLayer(num_hid)
    grbm = model.Model([vis_layer, hid_layer])
    config = grbm.get_config()

    rbm_from_config = model.Model.from_config(config)
    config_from_config = rbm_from_config.get_config()
    assert config == config_from_config
Пример #8
0
def example_mnist_hopfield(paysage_path=None, num_epochs=10, show_plot=False):

    num_hidden_units = 500
    batch_size = 50
    learning_rate = 0.001
    mc_steps = 1

    (_, _, shuffled_filepath) = \
        util.default_paths(paysage_path)

    # set up the reader to get minibatches
    data = batch.Batch(shuffled_filepath,
                       'train/images',
                       batch_size,
                       transform=batch.binarize_color,
                       train_fraction=0.99)

    # set up the model and initialize the parameters
    vis_layer = layers.BernoulliLayer(data.ncols)
    hid_layer = layers.GaussianLayer(num_hidden_units)

    rbm = model.Model([vis_layer, hid_layer])
    rbm.initialize(data)

    metrics = [
        'ReconstructionError', 'EnergyDistance', 'EnergyGap', 'EnergyZscore'
    ]
    perf = fit.ProgressMonitor(data, metrics=metrics)

    # set up the optimizer and the fit method
    opt = optimizers.ADAM(stepsize=learning_rate,
                          scheduler=optimizers.PowerLawDecay(0.1))

    sampler = fit.DrivenSequentialMC.from_batch(rbm, data, method='stochastic')

    cd = fit.SGD(rbm,
                 data,
                 opt,
                 num_epochs,
                 method=fit.pcd,
                 sampler=sampler,
                 mcsteps=mc_steps,
                 monitor=perf)

    # fit the model
    print('training with contrastive divergence')
    cd.train()

    # evaluate the model
    util.show_metrics(rbm, perf)
    util.show_reconstructions(rbm, data.get('validate'), fit, show_plot)
    util.show_fantasy_particles(rbm, data.get('validate'), fit, show_plot)
    util.show_weights(rbm, show_plot)

    # close the HDF5 store
    data.close()
    print("Done")
Пример #9
0
def test_grbm_from_config():
    vis_layer = layers.BernoulliLayer(num_vis)
    hid_layer = layers.GaussianLayer(num_hid)

    grbm = BoltzmannMachine([vis_layer, hid_layer])
    config = grbm.get_config()

    rbm_from_config = BoltzmannMachine.from_config(config)
    config_from_config = rbm_from_config.get_config()
    assert config == config_from_config
Пример #10
0
def test_gaussian_GFE_entropy_gradient():
    num_units = 5
    lay = layers.GaussianLayer(num_units)

    lay.params.loc[:] = be.rand_like(lay.params.loc)
    lay.params.log_var[:] = be.randn(be.shape(lay.params.loc))

    from cytoolz import compose
    sum_square = compose(be.tsum, be.square)

    for itr in range(10):
        mag = lay.get_random_magnetization()
        lms = lay.lagrange_multipliers_analytic(mag)
        entropy = lay.TAP_entropy(mag)
        lr = 0.001
        gogogo = True
        grad = lay.TAP_magnetization_grad(mag, [], [], [])
        grad_mag = math.sqrt(be.float_scalar(be.accumulate(sum_square, grad)))
        normit = partial(be.tmul_, be.float_scalar(1.0/grad_mag))
        be.apply_(normit, grad)
        rand_grad = lay.get_random_magnetization()
        grad_mag = math.sqrt(be.float_scalar(be.accumulate(sum_square, rand_grad)))
        normit = partial(be.tmul_, be.float_scalar(1.0/grad_mag))
        be.apply_(normit, rand_grad)
        while gogogo:
            cop1_mag = deepcopy(mag)
            cop1_lms = deepcopy(lms)
            cop2_mag = deepcopy(mag)
            cop2_lms = deepcopy(lms)

            cop1_mag.mean[:] = mag.mean + lr * grad.mean
            cop2_mag.mean[:] = mag.mean + lr * rand_grad.mean
            cop1_mag.variance[:] = mag.variance + lr * grad.variance
            cop2_mag.variance[:] = mag.variance + lr * rand_grad.variance
            lay.clip_magnetization_(cop1_mag)
            lay.clip_magnetization_(cop2_mag)
            cop1_lms = lay.lagrange_multipliers_analytic(cop1_mag)
            cop2_lms = lay.lagrange_multipliers_analytic(cop2_mag)

            entropy_1 = lay.TAP_entropy(cop1_mag)
            entropy_2 = lay.TAP_entropy(cop2_mag)

            regress = entropy_1 - entropy_2 < 0.0
            #print(itr, "[",lr, "] ", entropy, entropy_1, entropy_2, regress)
            if regress:
                #print(grad, rand_grad)
                if lr < 1e-6:
                    assert False,\
                    "Gaussian GFE magnetization gradient is wrong"
                    break
                else:
                    lr *= 0.5
            else:
                break
Пример #11
0
def test_gaussian_1D_1mode_train():
    # create some example data
    num = 10000
    mu = 3
    sigma = 1
    samples = be.randn((num, 1)) * sigma + mu

    # set up the reader to get minibatches
    batch_size = 100
    samples_train, samples_validate = batch.split_tensor(samples, 0.9)
    data = batch.Batch({
        'train':
        batch.InMemoryTable(samples_train, batch_size),
        'validate':
        batch.InMemoryTable(samples_validate, batch_size)
    })

    # parameters
    learning_rate = schedules.PowerLawDecay(initial=0.1, coefficient=0.1)
    mc_steps = 1
    num_epochs = 10
    num_sample_steps = 100

    # set up the model and initialize the parameters
    vis_layer = layers.GaussianLayer(1)
    hid_layer = layers.OneHotLayer(1)

    rbm = BoltzmannMachine([vis_layer, hid_layer])
    rbm.initialize(data, method='hinton')

    # modify the parameters to shift the initialized model from the data
    # this forces it to train
    rbm.layers[0].params = layers.ParamsGaussian(
        rbm.layers[0].params.loc - 3, rbm.layers[0].params.log_var - 1)

    # set up the optimizer and the fit method
    opt = optimizers.ADAM(stepsize=learning_rate)
    cd = fit.SGD(rbm, data)

    # fit the model
    print('training with persistent contrastive divergence')
    cd.train(opt, num_epochs, method=fit.pcd, mcsteps=mc_steps)

    # sample data from the trained model
    model_state = \
        samplers.SequentialMC.generate_fantasy_state(rbm, num, num_sample_steps)
    pts_trained = model_state[0]

    percent_error = 10
    mu_trained = be.mean(pts_trained)
    assert numpy.abs(mu_trained / mu - 1) < (percent_error / 100)

    sigma_trained = numpy.sqrt(be.var(pts_trained))
    assert numpy.abs(sigma_trained / sigma - 1) < (percent_error / 100)
Пример #12
0
def run(paysage_path=None, num_epochs=10, show_plot=False):

    num_hidden_units = 500
    batch_size = 100
    learning_rate = schedules.PowerLawDecay(initial=0.001, coefficient=0.1)
    mc_steps = 1

    (_, _, shuffled_filepath) = \
        util.default_paths(paysage_path)

    # set up the reader to get minibatches
    data = batch.HDFBatch(shuffled_filepath,
                         'train/images',
                          batch_size,
                          transform=pre.binarize_color,
                          train_fraction=0.99)

    # set up the model and initialize the parameters
    vis_layer = layers.BernoulliLayer(data.ncols)
    hid_layer = layers.GaussianLayer(num_hidden_units)
    hid_layer.set_fixed_params(["loc", "log_var"])

    rbm = model.Model([vis_layer, hid_layer])
    rbm.initialize(data, method="glorot_normal")

    metrics = ['ReconstructionError', 'EnergyDistance', 'EnergyGap',
               'EnergyZscore', 'HeatCapacity', 'WeightSparsity', 'WeightSquare']
    perf = fit.ProgressMonitor(data, metrics=metrics)

    # set up the optimizer and the fit method
    opt = optimizers.ADAM(stepsize=learning_rate)

    sampler = fit.DrivenSequentialMC.from_batch(rbm, data)

    cd = fit.SGD(rbm, data, opt, num_epochs, sampler, method=fit.pcd,
                 mcsteps=mc_steps, monitor=perf)

    # fit the model
    print('training with contrastive divergence')
    cd.train()

    # evaluate the model
    util.show_metrics(rbm, perf)
    valid = data.get('validate')
    util.show_reconstructions(rbm, valid, fit, show_plot,
                              n_recon=10, vertical=False, num_to_avg=10)
    util.show_fantasy_particles(rbm, valid, fit, show_plot, n_fantasy=25)
    util.show_weights(rbm, show_plot, n_weights=25)

    # close the HDF5 store
    data.close()
    print("Done")
Пример #13
0
def test_grbm_save():
    vis_layer = layers.BernoulliLayer(num_vis, center=True)
    hid_layer = layers.GaussianLayer(num_hid, center=True)
    grbm = BoltzmannMachine([vis_layer, hid_layer])
    data = batch.Batch({
        'train':
        batch.InMemoryTable(be.randn((10 * num_samples, num_vis)), num_samples)
    })
    grbm.initialize(data)
    with tempfile.NamedTemporaryFile() as file:
        store = pandas.HDFStore(file.name, mode='w')
        grbm.save(store)
        store.close()
Пример #14
0
def run(num_epochs=10, show_plot=False):

    num_hidden_units = 256
    batch_size = 100
    learning_rate = schedules.PowerLawDecay(initial=0.001, coefficient=0.1)
    mc_steps = 1

    # set up the reader to get minibatches
    data = util.create_batch(batch_size,
                             train_fraction=0.95,
                             transform=transform)

    # set up the model and initialize the parameters
    vis_layer = layers.GaussianLayer(data.ncols)
    hid_layer = layers.BernoulliLayer(num_hidden_units)

    rbm = BoltzmannMachine([vis_layer, hid_layer])
    rbm.initialize(data, 'stddev')
    rbm.layers[0].params.log_var[:] = \
      be.log(0.05*be.ones_like(rbm.layers[0].params.log_var))

    opt = optimizers.ADAM(stepsize=learning_rate)

    # This example parameter set for TAP uses gradient descent to optimize the
    # Gibbs free energy:
    tap = fit.TAP(True, 1.0, 0.01, 100, False, 0.9, 0.001, 0.5)

    # This example parameter set for TAP uses self-consistent iteration to
    # optimize the Gibbs free energy:
    #tap = fit.TAP(False, tolerance=0.001, max_iters=100)
    sgd = fit.SGD(rbm, data)
    sgd.monitor.generator_metrics.append(TAPFreeEnergy())
    sgd.monitor.generator_metrics.append(TAPLogLikelihood())

    # fit the model
    print('Training with stochastic gradient ascent using TAP expansion')
    sgd.train(opt, num_epochs, method=tap.tap_update, mcsteps=mc_steps)

    util.show_metrics(rbm, sgd.monitor)
    valid = data.get('validate')
    util.show_reconstructions(rbm,
                              valid,
                              show_plot,
                              n_recon=10,
                              vertical=False,
                              num_to_avg=10)
    util.show_fantasy_particles(rbm, valid, show_plot, n_fantasy=5)
    util.show_weights(rbm, show_plot, n_weights=25)
    # close the HDF5 store
    data.close()
    print("Done")
Пример #15
0
def test_gaussian_GFE_derivatives_gradient_descent():
    num_units = 5

    layer_1 = layers.GaussianLayer(num_units)
    layer_2 = layers.BernoulliLayer(num_units)

    rbm = BoltzmannMachine([layer_1, layer_2])

    for i in range(len(rbm.connections)):
        rbm.connections[i].weights.params.matrix[:] = \
        0.01 * be.randn(rbm.connections[i].shape)

    for lay in rbm.layers:
        lay.params.loc[:] = be.rand_like(lay.params.loc)

    state, GFE = rbm.compute_StateTAP(use_GD=False, tol=1e-7, max_iters=50)
    grad = rbm._grad_gibbs_free_energy(state)
    gu.grad_normalize_(grad)

    for i in range(100):
        lr = 0.001
        gogogo = True
        random_grad = gu.random_grad(rbm)
        gu.grad_normalize_(random_grad)
        while gogogo:
            cop1 = deepcopy(rbm)
            lr_mul = partial(be.tmul, lr)

            cop1.parameter_update(gu.grad_apply(lr_mul, grad))
            cop1_state, cop1_GFE = cop1.compute_StateTAP(use_GD=False, tol=1e-7, max_iters=50)

            cop2 = deepcopy(rbm)
            cop2.parameter_update(gu.grad_apply(lr_mul, random_grad))
            cop2_state, cop2_GFE = cop2.compute_StateTAP(use_GD=False, tol=1e-7, max_iters=50)

            regress = cop2_GFE - cop1_GFE < 0

            if regress:
                if lr < 1e-6:
                    assert False, \
                    "TAP FE gradient is not working properly for Gaussian models"
                    break
                else:
                    lr *= 0.5
            else:
                break
Пример #16
0
def run(num_epochs=10, show_plot=False):

    num_hidden_units = 256
    batch_size = 100
    learning_rate = schedules.PowerLawDecay(initial=0.001, coefficient=0.1)
    mc_steps = 1

    # set up the reader to get minibatches
    data = util.create_batch(batch_size,
                             train_fraction=0.95,
                             transform=transform)

    # set up the model and initialize the parameters
    vis_layer = layers.BernoulliLayer(data.ncols)
    hid_layer = layers.GaussianLayer(num_hidden_units)

    rbm = BoltzmannMachine([vis_layer, hid_layer])
    rbm.initialize(data)

    # set up the optimizer and the fit method
    opt = optimizers.ADAM(stepsize=learning_rate)
    cd = fit.SGD(rbm, data)

    # fit the model
    print('training with contrastive divergence')
    cd.train(opt, num_epochs, method=fit.pcd, mcsteps=mc_steps)

    # evaluate the model
    util.show_metrics(rbm, cd.monitor)
    valid = data.get('validate')
    util.show_reconstructions(rbm,
                              valid,
                              show_plot,
                              n_recon=10,
                              vertical=False,
                              num_to_avg=10)
    util.show_fantasy_particles(rbm, valid, show_plot, n_fantasy=5)
    util.show_weights(rbm, show_plot, n_weights=25)

    # close the HDF5 store
    data.close()
    print("Done")
Пример #17
0
def test_grbm_reload():
    vis_layer = layers.BernoulliLayer(num_vis)
    hid_layer = layers.GaussianLayer(num_hid)
    # create some extrinsics
    grbm = model.Model([vis_layer, hid_layer])
    with tempfile.NamedTemporaryFile() as file:
        # save the model
        store = pandas.HDFStore(file.name, mode='w')
        grbm.save(store)
        store.close()
        # reload
        store = pandas.HDFStore(file.name, mode='r')
        grbm_reload = model.Model.from_saved(store)
        store.close()
    # check the two models are consistent
    vis_data = vis_layer.random((num_samples, num_vis))
    data_state = model.State.from_visible(vis_data, grbm)
    vis_orig = grbm.deterministic_iteration(1, data_state).units[0]
    vis_reload = grbm_reload.deterministic_iteration(1, data_state).units[0]
    assert be.allclose(vis_orig, vis_reload)
Пример #18
0
def test_gaussian_Compute_StateTAP_GD():
    num_units = 10

    layer_1 = layers.GaussianLayer(num_units)
    layer_2 = layers.BernoulliLayer(num_units)

    rbm = BoltzmannMachine([layer_1, layer_2])
    for i in range(len(rbm.connections)):
        rbm.connections[i].weights.params.matrix[:] = \
        0.01 * be.randn(rbm.connections[i].shape)

    for lay in rbm.layers:
        lay.params.loc[:] = be.rand_like(lay.params.loc)

    for i in range(100):
        random_state = StateTAP.from_model_rand(rbm)
        GFE = rbm.gibbs_free_energy(random_state.cumulants)
        _,min_GFE = rbm._compute_StateTAP_GD(seed=random_state)

        if GFE - min_GFE < 0.0:
            assert False, \
                "compute_StateTAP_self_consistent is not reducing the GFE"
Пример #19
0
def run(num_epochs=20, show_plot=False):
    num_hidden_units = 200
    batch_size = 100
    mc_steps = 10
    beta_std = 0.95

    # set up the reader to get minibatches
    data = util.create_batch(batch_size, train_fraction=0.95, transform=transform)

    # set up the model and initialize the parameters
    vis_layer = layers.GaussianLayer(data.ncols, center=False)
    hid_layer = layers.BernoulliLayer(num_hidden_units, center=True)
    hid_layer.set_fixed_params(hid_layer.get_param_names())

    rbm = BoltzmannMachine([vis_layer, hid_layer])
    rbm.initialize(data, 'pca', epochs = 500, verbose=True)

    print('training with persistent contrastive divergence')
    cd = fit.SGD(rbm, data, fantasy_steps=10)
    cd.monitor.generator_metrics.append(M.JensenShannonDivergence())

    learning_rate = schedules.PowerLawDecay(initial=1e-3, coefficient=5)
    opt = optimizers.ADAM(stepsize=learning_rate)
    cd.train(opt, num_epochs, method=fit.pcd, mcsteps=mc_steps,
             beta_std=beta_std, burn_in=1)

    # evaluate the model
    util.show_metrics(rbm, cd.monitor)
    valid = data.get('validate')
    util.show_reconstructions(rbm, valid, show_plot, n_recon=10, vertical=False)
    util.show_fantasy_particles(rbm, valid, show_plot, n_fantasy=5)
    util.show_weights(rbm, show_plot, n_weights=100)

    # close the HDF5 store
    data.close()
    print("Done")

    return rbm
Пример #20
0
def test_gaussian_update():
    num_visible_units = 100
    num_hidden_units = 50
    batch_size = 25

    # set a seed for the random number generator
    be.set_seed()

    # set up some layer and model objects
    vis_layer = layers.GaussianLayer(num_visible_units)
    hid_layer = layers.GaussianLayer(num_hidden_units)
    rbm = hidden.Model([vis_layer, hid_layer])

    # randomly set the intrinsic model parameters
    a = be.randn((num_visible_units, ))
    b = be.randn((num_hidden_units, ))
    log_var_a = 0.1 * be.randn((num_visible_units, ))
    log_var_b = 0.1 * be.randn((num_hidden_units, ))
    W = be.randn((num_visible_units, num_hidden_units))

    rbm.layers[0].int_params.loc[:] = a
    rbm.layers[1].int_params.loc[:] = b
    rbm.layers[0].int_params.log_var[:] = log_var_a
    rbm.layers[1].int_params.log_var[:] = log_var_b
    rbm.weights[0].int_params.matrix[:] = W

    # generate a random batch of data
    vdata = rbm.layers[0].random((batch_size, num_visible_units))
    hdata = rbm.layers[1].random((batch_size, num_hidden_units))

    # compute the variance
    visible_var = be.exp(log_var_a)
    hidden_var = be.exp(log_var_b)

    # rescale the data
    vdata_scaled = vdata / be.broadcast(visible_var, vdata)
    hdata_scaled = hdata / be.broadcast(hidden_var, hdata)

    # test rescale
    assert be.allclose(vdata_scaled, rbm.layers[0].rescale(vdata)),\
    "visible rescale wrong in gaussian-gaussian rbm"

    assert be.allclose(hdata_scaled, rbm.layers[1].rescale(hdata)),\
    "hidden rescale wrong in gaussian-gaussian rbm"

    # compute the mean
    hidden_mean = be.dot(vdata_scaled, W)  # (batch_size, num_hidden_units)
    hidden_mean += be.broadcast(b, hidden_mean)

    visible_mean = be.dot(hdata_scaled,
                          be.transpose(W))  # (batch_size, num_hidden_units)
    visible_mean += be.broadcast(a, visible_mean)

    # update the extrinsic parameters using the layer functions
    rbm.layers[0].update([hdata_scaled], [rbm.weights[0].W_T()])
    rbm.layers[1].update([vdata_scaled], [rbm.weights[0].W()])

    assert be.allclose(visible_var, rbm.layers[0].ext_params.variance),\
    "visible variance wrong in gaussian-gaussian rbm"

    assert be.allclose(hidden_var, rbm.layers[1].ext_params.variance),\
    "hidden variance wrong in gaussian-gaussian rbm"

    assert be.allclose(visible_mean, rbm.layers[0].ext_params.mean),\
    "visible mean wrong in gaussian-gaussian rbm"

    assert be.allclose(hidden_mean, rbm.layers[1].ext_params.mean),\
    "hidden mean wrong in gaussian-gaussian rbm"
Пример #21
0
def test_grbm_config():
    vis_layer = layers.BernoulliLayer(num_vis)
    hid_layer = layers.GaussianLayer(num_hid)
    grbm = model.Model([vis_layer, hid_layer])
    grbm.get_config()
Пример #22
0
def test_hopfield_construction():
    vis_layer = layers.BernoulliLayer(num_vis)
    hid_layer = layers.GaussianLayer(num_hid)
    rbm = model.Model([vis_layer, hid_layer])
Пример #23
0
def example_mnist_grbm(paysage_path=None, show_plot=False):

    num_hidden_units = 500
    batch_size = 50
    num_epochs = 10
    learning_rate = 0.001  # gaussian rbm usually requires smaller learnign rate
    mc_steps = 1

    (_, _, shuffled_filepath) = \
            util.default_paths(paysage_path)

    # set up the reader to get minibatches
    data = batch.Batch(shuffled_filepath,
                       'train/images',
                       batch_size,
                       transform=transform,
                       train_fraction=0.99)

    # set up the model and initialize the parameters
    vis_layer = layers.GaussianLayer(data.ncols)
    hid_layer = layers.BernoulliLayer(num_hidden_units)

    rbm = hidden.Model([vis_layer, hid_layer])
    rbm.initialize(data)

    # set up the optimizer, sampler, and fit method
    opt = optimizers.ADAM(rbm,
                          stepsize=learning_rate,
                          scheduler=optimizers.PowerLawDecay(0.1))

    sampler = fit.DrivenSequentialMC.from_batch(rbm, data, method='stochastic')

    cd = fit.PCD(rbm,
                 data,
                 opt,
                 sampler,
                 num_epochs,
                 mcsteps=mc_steps,
                 skip=200,
                 metrics=[
                     'ReconstructionError', 'EnergyDistance', 'EnergyGap',
                     'EnergyZscore'
                 ])

    # fit the model
    print('training with contrastive divergence')
    cd.train()

    # evaluate the model
    # this will be the same as the final epoch results
    # it is repeated here to be consistent with the sklearn rbm example
    metrics = [
        'ReconstructionError', 'EnergyDistance', 'EnergyGap', 'EnergyZscore'
    ]
    performance = fit.ProgressMonitor(0, data, metrics=metrics)

    util.show_metrics(rbm, performance)
    util.show_reconstructions(rbm, data.get('validate'), fit, show_plot)
    util.show_fantasy_particles(rbm, data.get('validate'), fit, show_plot)
    util.show_weights(rbm, show_plot)

    # close the HDF5 store
    data.close()
    print("Done")
Пример #24
0
def test_gaussian_derivatives():
    num_visible_units = 100
    num_hidden_units = 50
    batch_size = 25

    # set a seed for the random number generator
    be.set_seed()

    # set up some layer and model objects
    vis_layer = layers.GaussianLayer(num_visible_units)
    hid_layer = layers.GaussianLayer(num_hidden_units)
    rbm = hidden.Model([vis_layer, hid_layer])

    # randomly set the intrinsic model parameters
    a = be.randn((num_visible_units, ))
    b = be.randn((num_hidden_units, ))
    log_var_a = 0.1 * be.randn((num_visible_units, ))
    log_var_b = 0.1 * be.randn((num_hidden_units, ))
    W = be.randn((num_visible_units, num_hidden_units))

    rbm.layers[0].int_params.loc[:] = a
    rbm.layers[1].int_params.loc[:] = b
    rbm.layers[0].int_params.log_var[:] = log_var_a
    rbm.layers[1].int_params.log_var[:] = log_var_b
    rbm.weights[0].int_params.matrix[:] = W

    # generate a random batch of data
    vdata = rbm.layers[0].random((batch_size, num_visible_units))
    visible_var = be.exp(log_var_a)
    vdata_scaled = vdata / be.broadcast(visible_var, vdata)

    # compute the mean of the hidden layer
    rbm.layers[1].update([vdata_scaled], [rbm.weights[0].W()])
    hidden_var = be.exp(log_var_b)
    hid_mean = rbm.layers[1].mean()
    hid_mean_scaled = rbm.layers[1].rescale(hid_mean)

    # compute the derivatives
    d_vis_loc = -be.mean(vdata_scaled, axis=0)
    d_vis_logvar = -0.5 * be.mean(be.square(be.subtract(a, vdata)), axis=0)
    d_vis_logvar += be.batch_dot(
        hid_mean_scaled, be.transpose(W), vdata, axis=0) / len(vdata)
    d_vis_logvar /= visible_var

    d_hid_loc = -be.mean(hid_mean_scaled, axis=0)

    d_hid_logvar = -0.5 * be.mean(
        be.square(hid_mean - be.broadcast(b, hid_mean)), axis=0)
    d_hid_logvar += be.batch_dot(vdata_scaled, W, hid_mean,
                                 axis=0) / len(hid_mean)
    d_hid_logvar /= hidden_var

    d_W = -be.batch_outer(vdata_scaled, hid_mean_scaled) / len(vdata_scaled)

    # compute the derivatives using the layer functions
    rbm.layers[1].update([vdata_scaled], [rbm.weights[0].W()])
    rbm.layers[0].update([hid_mean_scaled], [rbm.weights[0].W_T()])

    vis_derivs = rbm.layers[0].derivatives(vdata, [hid_mean_scaled],
                                           [rbm.weights[0].W()])

    hid_derivs = rbm.layers[1].derivatives(hid_mean, [vdata_scaled],
                                           [rbm.weights[0].W_T()])

    weight_derivs = rbm.weights[0].derivatives(vdata_scaled, hid_mean_scaled)

    assert be.allclose(d_vis_loc, vis_derivs.loc), \
    "derivative of visible loc wrong in gaussian-gaussian rbm"

    assert be.allclose(d_hid_loc, hid_derivs.loc), \
    "derivative of hidden loc wrong in gaussian-gaussian rbm"

    assert be.allclose(d_vis_logvar, vis_derivs.log_var, rtol=1e-05, atol=1e-01), \
    "derivative of visible log_var wrong in gaussian-gaussian rbm"

    assert be.allclose(d_hid_logvar, hid_derivs.log_var, rtol=1e-05, atol=1e-01), \
    "derivative of hidden log_var wrong in gaussian-gaussian rbm"

    assert be.allclose(d_W, weight_derivs.matrix), \
    "derivative of weights wrong in gaussian-gaussian rbm"
Пример #25
0
def test_Gaussian_creation():
    layers.GaussianLayer(num_vis)
Пример #26
0
def test_gaussian_shrink_parameters():
    ly = layers.GaussianLayer(num_vis)
    ly.shrink_parameters(0.1)
Пример #27
0
def test_gaussian_online_param_update():
    ly = layers.GaussianLayer(num_vis)
    vis = ly.random((num_samples, num_vis))
    ly.online_param_update(vis)
Пример #28
0
def test_gaussian_log_partition_function():
    ly = layers.GaussianLayer(num_vis)
    vis = ly.random((num_samples, num_vis))
    ly.log_partition_function(vis)
Пример #29
0
def test_gaussian_energy():
    ly = layers.GaussianLayer(num_vis)
    vis = ly.random((num_samples, num_vis))
    ly.energy(vis)
Пример #30
0
def test_Gaussian_creation():
    layers.GaussianLayer(num_vis, dropout_p)