Esempio n. 1
0
def compute_reconstructions(rbm,
                            v_data,
                            fit,
                            n_recon=10,
                            vertical=False,
                            num_to_avg=1):

    v_model = be.zeros_like(v_data)

    # Average over n reconstruction attempts
    for k in range(num_to_avg):
        data_state = State.from_visible(v_data, rbm)
        visible = data_state.units[0]
        reconstructions = fit.DrivenSequentialMC(rbm)
        reconstructions.set_state(data_state)
        dropout_scale = State.dropout_rescale(rbm)
        reconstructions.update_state(1, dropout_scale)
        v_model += rbm.deterministic_iteration(1, reconstructions.state,
                                               dropout_scale).units[0]

    v_model /= num_to_avg

    idx = numpy.random.choice(range(len(v_model)), n_recon, replace=False)
    grid = numpy.array(
        [[be.to_numpy_array(visible[i]),
          be.to_numpy_array(v_model[i])] for i in idx])
    if vertical:
        return grid
    else:
        return grid.swapaxes(0, 1)
Esempio n. 2
0
def test_state_for_grad_DrivenSequentialMC():
    num_visible_units = 100
    num_hidden_units = 50
    batch_size = 25

    # set a seed for the random number generator
    be.set_seed()

    # set up some layer and model objects
    vis_layer = layers.BernoulliLayer(num_visible_units)
    hid_layer = layers.BernoulliLayer(num_hidden_units)
    rbm = model.Model([vis_layer, hid_layer])

    # randomly set the intrinsic model parameters
    a = be.randn((num_visible_units, ))
    b = be.randn((num_hidden_units, ))
    W = be.randn((num_visible_units, num_hidden_units))

    rbm.layers[0].params.loc[:] = a
    rbm.layers[1].params.loc[:] = b
    rbm.weights[0].params.matrix[:] = W

    # generate a random batch of data
    vdata = rbm.layers[0].random((batch_size, num_visible_units))
    data_state = State.from_visible(vdata, rbm)
    dropout_scale = State.dropout_rescale(rbm)

    # since we set no dropout, dropout_scale should be None
    assert dropout_scale is None

    for u in [
            'markov_chain', 'mean_field_iteration', 'deterministic_iteration'
    ]:
        # set up the sampler
        sampler = fit.DrivenSequentialMC(rbm, updater=u, clamped=[0])
        sampler.set_state(data_state)

        # update the state of the hidden layer
        grad_state = sampler.state_for_grad(1, dropout_scale)

        assert be.allclose(data_state.units[0], grad_state.units[0]), \
        "visible layer is clamped, and shouldn't get updated: {}".format(u)

        assert not be.allclose(data_state.units[1], grad_state.units[1]), \
        "hidden layer is not clamped, and should get updated: {}".format(u)

        # compute the conditional mean with the layer function
        ave = rbm.layers[1].conditional_mean(
            rbm._connected_rescaled_units(1, data_state, dropout_scale),
            rbm._connected_weights(1))

        assert be.allclose(ave, grad_state.units[1]), \
        "hidden layer of grad_state should be conditional mean: {}".format(u)
Esempio n. 3
0
def compute_reconstructions(rbm, v_data, fit, n_recon=10, vertical=False):
    sampler = fit.DrivenSequentialMC(rbm)
    data_state = State.from_visible(v_data, rbm)
    sampler.set_positive_state(data_state)
    sampler.update_positive_state(1)
    v_model = rbm.deterministic_iteration(1, sampler.pos_state).units[0]

    idx = numpy.random.choice(range(len(v_model)), n_recon, replace=False)
    grid = numpy.array(
        [[be.to_numpy_array(v_data[i]),
          be.to_numpy_array(v_model[i])] for i in idx])
    if vertical:
        return grid
    else:
        return grid.swapaxes(0, 1)
Esempio n. 4
0
def test_clamped_SequentialMC():
    num_visible_units = 100
    num_hidden_units = 50
    batch_size = 25
    steps = 1

    # set a seed for the random number generator
    be.set_seed()

    # set up some layer and model objects
    vis_layer = layers.BernoulliLayer(num_visible_units)
    hid_layer = layers.BernoulliLayer(num_hidden_units)
    rbm = model.Model([vis_layer, hid_layer])

    # randomly set the intrinsic model parameters
    a = be.randn((num_visible_units, ))
    b = be.randn((num_hidden_units, ))
    W = be.randn((num_visible_units, num_hidden_units))

    rbm.layers[0].params.loc[:] = a
    rbm.layers[1].params.loc[:] = b
    rbm.weights[0].params.matrix[:] = W

    # generate a random batch of data
    vdata = rbm.layers[0].random((batch_size, num_visible_units))
    data_state = State.from_visible(vdata, rbm)
    dropout_scale = State.dropout_rescale(rbm)

    # since we set no dropout, dropout_scale should be None
    assert dropout_scale is None

    for u in [
            'markov_chain', 'mean_field_iteration', 'deterministic_iteration'
    ]:

        # set up the sampler with the visible layer clamped
        sampler = fit.SequentialMC(rbm, updater=u, clamped=[0])
        sampler.set_state(data_state)

        # update the sampler state and check the output
        sampler.update_state(steps, dropout_scale)

        assert be.allclose(data_state.units[0], sampler.state.units[0]), \
        "visible layer is clamped, and shouldn't get updated: {}".format(u)

        assert not be.allclose(data_state.units[1], sampler.state.units[1]), \
        "hidden layer is not clamped, and should get updated: {}".format(u)
Esempio n. 5
0
def compute_fantasy_particles(rbm, v_data, fit, n_fantasy=25):
    grid_size = int(sqrt(n_fantasy))
    assert grid_size == sqrt(
        n_fantasy), "n_fantasy must be the square of an integer"

    random_samples = rbm.random(v_data)
    model_state = State.from_visible(random_samples, rbm)

    schedule = schedules.PowerLawDecay(initial=1.0, coefficient=0.5)
    fantasy = fit.DrivenSequentialMC(rbm, schedule=schedule)
    dropout_scale = State.dropout_rescale(rbm)
    fantasy.set_state(model_state)
    fantasy.update_state(1000, dropout_scale)

    v_model = rbm.deterministic_iteration(1, fantasy.state,
                                          dropout_scale).units[0]
    idx = numpy.random.choice(range(len(v_model)), n_fantasy, replace=False)

    grid = numpy.array([be.to_numpy_array(v_model[i]) for i in idx])
    return grid.reshape(grid_size, grid_size, -1)