Example #1
0
def test_gaussian_derivatives():
    num_visible_units = 100
    num_hidden_units = 50
    batch_size = 25

    # set a seed for the random number generator
    be.set_seed()

    # set up some layer and model objects
    vis_layer = layers.GaussianLayer(num_visible_units)
    hid_layer = layers.GaussianLayer(num_hidden_units)
    rbm = hidden.Model([vis_layer, hid_layer])

    # randomly set the intrinsic model parameters
    a = be.randn((num_visible_units, ))
    b = be.randn((num_hidden_units, ))
    log_var_a = 0.1 * be.randn((num_visible_units, ))
    log_var_b = 0.1 * be.randn((num_hidden_units, ))
    W = be.randn((num_visible_units, num_hidden_units))

    rbm.layers[0].int_params.loc[:] = a
    rbm.layers[1].int_params.loc[:] = b
    rbm.layers[0].int_params.log_var[:] = log_var_a
    rbm.layers[1].int_params.log_var[:] = log_var_b
    rbm.weights[0].int_params.matrix[:] = W

    # generate a random batch of data
    vdata = rbm.layers[0].random((batch_size, num_visible_units))
    visible_var = be.exp(log_var_a)
    vdata_scaled = vdata / be.broadcast(visible_var, vdata)

    # compute the mean of the hidden layer
    rbm.layers[1].update([vdata_scaled], [rbm.weights[0].W()])
    hidden_var = be.exp(log_var_b)
    hid_mean = rbm.layers[1].mean()
    hid_mean_scaled = rbm.layers[1].rescale(hid_mean)

    # compute the derivatives
    d_vis_loc = -be.mean(vdata_scaled, axis=0)
    d_vis_logvar = -0.5 * be.mean(be.square(be.subtract(a, vdata)), axis=0)
    d_vis_logvar += be.batch_dot(
        hid_mean_scaled, be.transpose(W), vdata, axis=0) / len(vdata)
    d_vis_logvar /= visible_var

    d_hid_loc = -be.mean(hid_mean_scaled, axis=0)

    d_hid_logvar = -0.5 * be.mean(
        be.square(hid_mean - be.broadcast(b, hid_mean)), axis=0)
    d_hid_logvar += be.batch_dot(vdata_scaled, W, hid_mean,
                                 axis=0) / len(hid_mean)
    d_hid_logvar /= hidden_var

    d_W = -be.batch_outer(vdata_scaled, hid_mean_scaled) / len(vdata_scaled)

    # compute the derivatives using the layer functions
    rbm.layers[1].update([vdata_scaled], [rbm.weights[0].W()])
    rbm.layers[0].update([hid_mean_scaled], [rbm.weights[0].W_T()])

    vis_derivs = rbm.layers[0].derivatives(vdata, [hid_mean_scaled],
                                           [rbm.weights[0].W()])

    hid_derivs = rbm.layers[1].derivatives(hid_mean, [vdata_scaled],
                                           [rbm.weights[0].W_T()])

    weight_derivs = rbm.weights[0].derivatives(vdata_scaled, hid_mean_scaled)

    assert be.allclose(d_vis_loc, vis_derivs.loc), \
    "derivative of visible loc wrong in gaussian-gaussian rbm"

    assert be.allclose(d_hid_loc, hid_derivs.loc), \
    "derivative of hidden loc wrong in gaussian-gaussian rbm"

    assert be.allclose(d_vis_logvar, vis_derivs.log_var, rtol=1e-05, atol=1e-01), \
    "derivative of visible log_var wrong in gaussian-gaussian rbm"

    assert be.allclose(d_hid_logvar, hid_derivs.log_var, rtol=1e-05, atol=1e-01), \
    "derivative of hidden log_var wrong in gaussian-gaussian rbm"

    assert be.allclose(d_W, weight_derivs.matrix), \
    "derivative of weights wrong in gaussian-gaussian rbm"
Example #2
0
def test_gaussian_derivatives():
    num_visible_units = 100
    num_hidden_units = 50
    batch_size = 25

    # set a seed for the random number generator
    be.set_seed()

    # set up some layer and model objects
    vis_layer = layers.GaussianLayer(num_visible_units)
    hid_layer = layers.GaussianLayer(num_hidden_units)
    rbm = BoltzmannMachine([vis_layer, hid_layer])

    # randomly set the intrinsic model parameters
    a = be.randn((num_visible_units,))
    b = be.randn((num_hidden_units,))
    log_var_a = 0.1 * be.randn((num_visible_units,))
    log_var_b = 0.1 * be.randn((num_hidden_units,))
    W = be.randn((num_visible_units, num_hidden_units))

    rbm.layers[0].params.loc[:] = a
    rbm.layers[1].params.loc[:] = b
    rbm.layers[0].params.log_var[:] = log_var_a
    rbm.layers[1].params.log_var[:] = log_var_b
    rbm.connections[0].weights.params.matrix[:] = W

    # generate a random batch of data
    vdata = rbm.layers[0].random((batch_size, num_visible_units))
    visible_var = be.exp(log_var_a)
    vdata_scaled = vdata / visible_var

    # compute the mean of the hidden layer
    hid_mean = rbm.layers[1].conditional_mean(
        [vdata_scaled], [rbm.connections[0].W()])
    hidden_var = be.exp(log_var_b)
    hid_mean_scaled = rbm.layers[1].rescale(hid_mean)

    # compute the derivatives
    d_vis_loc = be.mean((a-vdata)/visible_var, axis=0)
    d_vis_logvar = -0.5 * be.mean(be.square(be.subtract(a, vdata)), axis=0)
    d_vis_logvar += be.batch_quadratic(hid_mean_scaled, be.transpose(W), vdata,
                                 axis=0) / len(vdata)
    d_vis_logvar /= visible_var

    d_hid_loc = be.mean((b-hid_mean)/hidden_var, axis=0)

    d_hid_logvar = -0.5 * be.mean(be.square(hid_mean - b), axis=0)
    d_hid_logvar += be.batch_quadratic(vdata_scaled, W, hid_mean,
                                 axis=0) / len(hid_mean)
    d_hid_logvar /= hidden_var

    d_W = -be.batch_outer(vdata_scaled, hid_mean_scaled) / len(vdata_scaled)

    # compute the derivatives using the layer functions
    vis_derivs = rbm.layers[0].derivatives(vdata, [hid_mean_scaled],
                                            [rbm.connections[0].W(trans=True)])

    hid_derivs = rbm.layers[1].derivatives(hid_mean, [vdata_scaled],
                                           [rbm.connections[0].W()])

    weight_derivs = rbm.connections[0].weights.derivatives(vdata_scaled, hid_mean_scaled)

    # compute simple weighted derivatives using the layer functions
    scale = 2
    scale_func = partial(be.multiply, be.float_scalar(scale))
    vis_derivs_scaled = rbm.layers[0].derivatives(vdata, [hid_mean_scaled],
                        [rbm.connections[0].W(trans=True)], weighting_function=scale_func)

    hid_derivs_scaled = rbm.layers[1].derivatives(hid_mean, [vdata_scaled],
                        [rbm.connections[0].W()], weighting_function=scale_func)

    weight_derivs_scaled = rbm.connections[0].weights.derivatives(vdata_scaled, hid_mean_scaled,
                                                weighting_function=scale_func)

    assert be.allclose(d_vis_loc, vis_derivs[0].loc), \
    "derivative of visible loc wrong in gaussian-gaussian rbm"

    assert be.allclose(d_hid_loc, hid_derivs[0].loc), \
    "derivative of hidden loc wrong in gaussian-gaussian rbm"

    assert be.allclose(d_vis_logvar, vis_derivs[0].log_var, rtol=1e-05, atol=1e-01), \
    "derivative of visible log_var wrong in gaussian-gaussian rbm"

    assert be.allclose(d_hid_logvar, hid_derivs[0].log_var, rtol=1e-05, atol=1e-01), \
    "derivative of hidden log_var wrong in gaussian-gaussian rbm"

    assert be.allclose(d_W, weight_derivs[0].matrix), \
    "derivative of weights wrong in gaussian-gaussian rbm"

    assert be.allclose(scale * d_vis_loc, vis_derivs_scaled[0].loc), \
    "weighted derivative of visible loc wrong in gaussian-gaussian rbm"

    assert be.allclose(scale * d_hid_loc, hid_derivs_scaled[0].loc), \
    "weighted derivative of hidden loc wrong in gaussian-gaussian rbm"

    assert be.allclose(scale * d_vis_logvar, vis_derivs_scaled[0].log_var, rtol=1e-05, atol=1e-01), \
    "weighted derivative of visible log_var wrong in gaussian-gaussian rbm"

    assert be.allclose(scale * d_hid_logvar, hid_derivs_scaled[0].log_var, rtol=1e-05, atol=1e-01), \
    "weighted derivative of hidden log_var wrong in gaussian-gaussian rbm"

    assert be.allclose(scale * d_W, weight_derivs_scaled[0].matrix), \
    "weighted derivative of weights wrong in gaussian-gaussian rbm"
Example #3
0
def test_gaussian_update():
    num_visible_units = 100
    num_hidden_units = 50
    batch_size = 25

    # set a seed for the random number generator
    be.set_seed()

    # set up some layer and model objects
    vis_layer = layers.GaussianLayer(num_visible_units)
    hid_layer = layers.GaussianLayer(num_hidden_units)
    rbm = hidden.Model([vis_layer, hid_layer])

    # randomly set the intrinsic model parameters
    a = be.randn((num_visible_units, ))
    b = be.randn((num_hidden_units, ))
    log_var_a = 0.1 * be.randn((num_visible_units, ))
    log_var_b = 0.1 * be.randn((num_hidden_units, ))
    W = be.randn((num_visible_units, num_hidden_units))

    rbm.layers[0].int_params.loc[:] = a
    rbm.layers[1].int_params.loc[:] = b
    rbm.layers[0].int_params.log_var[:] = log_var_a
    rbm.layers[1].int_params.log_var[:] = log_var_b
    rbm.weights[0].int_params.matrix[:] = W

    # generate a random batch of data
    vdata = rbm.layers[0].random((batch_size, num_visible_units))
    hdata = rbm.layers[1].random((batch_size, num_hidden_units))

    # compute the variance
    visible_var = be.exp(log_var_a)
    hidden_var = be.exp(log_var_b)

    # rescale the data
    vdata_scaled = vdata / be.broadcast(visible_var, vdata)
    hdata_scaled = hdata / be.broadcast(hidden_var, hdata)

    # test rescale
    assert be.allclose(vdata_scaled, rbm.layers[0].rescale(vdata)),\
    "visible rescale wrong in gaussian-gaussian rbm"

    assert be.allclose(hdata_scaled, rbm.layers[1].rescale(hdata)),\
    "hidden rescale wrong in gaussian-gaussian rbm"

    # compute the mean
    hidden_mean = be.dot(vdata_scaled, W)  # (batch_size, num_hidden_units)
    hidden_mean += be.broadcast(b, hidden_mean)

    visible_mean = be.dot(hdata_scaled,
                          be.transpose(W))  # (batch_size, num_hidden_units)
    visible_mean += be.broadcast(a, visible_mean)

    # update the extrinsic parameters using the layer functions
    rbm.layers[0].update([hdata_scaled], [rbm.weights[0].W_T()])
    rbm.layers[1].update([vdata_scaled], [rbm.weights[0].W()])

    assert be.allclose(visible_var, rbm.layers[0].ext_params.variance),\
    "visible variance wrong in gaussian-gaussian rbm"

    assert be.allclose(hidden_var, rbm.layers[1].ext_params.variance),\
    "hidden variance wrong in gaussian-gaussian rbm"

    assert be.allclose(visible_mean, rbm.layers[0].ext_params.mean),\
    "visible mean wrong in gaussian-gaussian rbm"

    assert be.allclose(hidden_mean, rbm.layers[1].ext_params.mean),\
    "hidden mean wrong in gaussian-gaussian rbm"
Example #4
0
def test_gaussian_conditional_params():
    num_visible_units = 100
    num_hidden_units = 50
    batch_size = 25

    # set a seed for the random number generator
    be.set_seed()

    # set up some layer and model objects
    vis_layer = layers.GaussianLayer(num_visible_units)
    hid_layer = layers.GaussianLayer(num_hidden_units)
    rbm = BoltzmannMachine([vis_layer, hid_layer])

    # randomly set the intrinsic model parameters
    a = be.randn((num_visible_units,))
    b = be.randn((num_hidden_units,))
    log_var_a = 0.1 * be.randn((num_visible_units,))
    log_var_b = 0.1 * be.randn((num_hidden_units,))
    W = be.randn((num_visible_units, num_hidden_units))

    rbm.layers[0].params.loc[:] = a
    rbm.layers[1].params.loc[:] = b
    rbm.layers[0].params.log_var[:] = log_var_a
    rbm.layers[1].params.log_var[:] = log_var_b
    rbm.connections[0].weights.params.matrix[:] = W

    # generate a random batch of data
    vdata = rbm.layers[0].random((batch_size, num_visible_units))
    hdata = rbm.layers[1].random((batch_size, num_hidden_units))

    # compute the variance
    visible_var = be.exp(log_var_a)
    hidden_var = be.exp(log_var_b)

    # rescale the data
    vdata_scaled = vdata / visible_var
    hdata_scaled = hdata / hidden_var

    # test rescale
    assert be.allclose(vdata_scaled, rbm.layers[0].rescale(vdata)),\
    "visible rescale wrong in gaussian-gaussian rbm"

    assert be.allclose(hdata_scaled, rbm.layers[1].rescale(hdata)),\
    "hidden rescale wrong in gaussian-gaussian rbm"

    # compute the mean
    hidden_mean = be.dot(vdata_scaled, W) # (batch_size, num_hidden_units)
    hidden_mean += b

    visible_mean = be.dot(hdata_scaled, be.transpose(W)) # (batch_size, num_hidden_units)
    visible_mean += a

    # update the conditional parameters using the layer functions
    vis_mean_func, vis_var_func = rbm.layers[0].conditional_params(
        [hdata_scaled], [rbm.connections[0].W(trans=True)])
    hid_mean_func, hid_var_func = rbm.layers[1].conditional_params(
        [vdata_scaled], [rbm.connections[0].W()])

    assert be.allclose(visible_var, vis_var_func),\
    "visible variance wrong in gaussian-gaussian rbm"

    assert be.allclose(hidden_var, hid_var_func),\
    "hidden variance wrong in gaussian-gaussian rbm"

    assert be.allclose(visible_mean, vis_mean_func),\
    "visible mean wrong in gaussian-gaussian rbm"

    assert be.allclose(hidden_mean, hid_mean_func),\
    "hidden mean wrong in gaussian-gaussian rbm"