Exemple #1
0
def test_gaussian_1D_1mode_train():
    # create some example data
    num = 10000
    mu = 3
    sigma = 1
    samples = be.randn((num, 1)) * sigma + mu

    # set up the reader to get minibatches
    batch_size = 100
    samples_train, samples_validate = batch.split_tensor(samples, 0.9)
    data = batch.Batch({
        'train':
        batch.InMemoryTable(samples_train, batch_size),
        'validate':
        batch.InMemoryTable(samples_validate, batch_size)
    })

    # parameters
    learning_rate = schedules.PowerLawDecay(initial=0.1, coefficient=0.1)
    mc_steps = 1
    num_epochs = 10
    num_sample_steps = 100

    # set up the model and initialize the parameters
    vis_layer = layers.GaussianLayer(1)
    hid_layer = layers.OneHotLayer(1)

    rbm = BoltzmannMachine([vis_layer, hid_layer])
    rbm.initialize(data, method='hinton')

    # modify the parameters to shift the initialized model from the data
    # this forces it to train
    rbm.layers[0].params = layers.ParamsGaussian(
        rbm.layers[0].params.loc - 3, rbm.layers[0].params.log_var - 1)

    # set up the optimizer and the fit method
    opt = optimizers.ADAM(stepsize=learning_rate)
    cd = fit.SGD(rbm, data)

    # fit the model
    print('training with persistent contrastive divergence')
    cd.train(opt, num_epochs, method=fit.pcd, mcsteps=mc_steps)

    # sample data from the trained model
    model_state = \
        samplers.SequentialMC.generate_fantasy_state(rbm, num, num_sample_steps)
    pts_trained = model_state[0]

    percent_error = 10
    mu_trained = be.mean(pts_trained)
    assert numpy.abs(mu_trained / mu - 1) < (percent_error / 100)

    sigma_trained = numpy.sqrt(be.var(pts_trained))
    assert numpy.abs(sigma_trained / sigma - 1) < (percent_error / 100)
Exemple #2
0
def test_mean_variance():
    # create some random data
    s = be.rand((100000, ))

    # reference result
    ref_mean = be.mean(s)
    ref_var = be.var(s)

    # do the online calculation
    mv = math_utils.MeanVarianceCalculator()
    for i in range(10):
        mv.update(s[i * 10000:(i + 1) * 10000])

    assert be.allclose(be.float_tensor(np.array([ref_mean])),
                       be.float_tensor(np.array([mv.mean])))
    assert be.allclose(be.float_tensor(np.array([ref_var])),
                       be.float_tensor(np.array([mv.var])),
                       rtol=1e-4,
                       atol=1e-7)
Exemple #3
0
def test_mean_variance_2d():
    # create some random data
    num = 10000
    dim2 = 10
    num_steps = 10
    stepsize = num // num_steps
    s = be.rand((num,dim2))

    # reference result
    ref_mean = be.mean(s, axis=0)
    ref_var = be.var(s, axis=0)

    # do the online calculation
    mv = math_utils.MeanVarianceArrayCalculator()
    for i in range(num_steps):
        mv.update(s[i*stepsize:(i+1)*stepsize])

    assert be.allclose(ref_mean, mv.mean)
    assert be.allclose(ref_var, mv.var, rtol=1e-3, atol=1e-5)
Exemple #4
0
def test_mean_variance():
    # create some random data
    num = 100000
    num_steps = 10
    stepsize = num // num_steps
    s = be.rand((num,))

    # reference result
    ref_mean = be.mean(s)
    ref_var = be.var(s)

    # do the online calculation
    mv = math_utils.MeanVarianceCalculator()
    for i in range(num_steps):
        mv.update(s[i*stepsize:(i+1)*stepsize])

    assert be.allclose(be.float_tensor(np.array([ref_mean])),
                       be.float_tensor(np.array([mv.mean])))
    assert be.allclose(be.float_tensor(np.array([ref_var])),
                       be.float_tensor(np.array([mv.var])),
                       rtol=1e-3, atol=1e-5)