def test_ising_derivatives(): num_visible_units = 100 num_hidden_units = 50 batch_size = 25 # set a seed for the random number generator be.set_seed() # set up some layer and model objects vis_layer = layers.IsingLayer(num_visible_units) hid_layer = layers.IsingLayer(num_hidden_units) rbm = hidden.Model([vis_layer, hid_layer]) # randomly set the intrinsic model parameters a = be.randn((num_visible_units, )) b = be.randn((num_hidden_units, )) W = be.randn((num_visible_units, num_hidden_units)) rbm.layers[0].int_params.loc[:] = a rbm.layers[1].int_params.loc[:] = b rbm.weights[0].int_params.matrix[:] = W # generate a random batch of data vdata = rbm.layers[0].random((batch_size, num_visible_units)) vdata_scaled = rbm.layers[0].rescale(vdata) # compute the mean of the hidden layer rbm.layers[1].update([vdata], [rbm.weights[0].W()]) hid_mean = rbm.layers[1].mean() hid_mean_scaled = rbm.layers[1].rescale(hid_mean) # compute the derivatives d_visible_loc = -be.mean(vdata, axis=0) d_hidden_loc = -be.mean(hid_mean_scaled, axis=0) d_W = -be.batch_outer(vdata, hid_mean_scaled) / len(vdata) # compute the derivatives using the layer functions vis_derivs = rbm.layers[0].derivatives(vdata, [hid_mean_scaled], [rbm.weights[0].W()]) hid_derivs = rbm.layers[1].derivatives(hid_mean, [vdata_scaled], [rbm.weights[0].W_T()]) weight_derivs = rbm.weights[0].derivatives(vdata, hid_mean_scaled) assert be.allclose(d_visible_loc, vis_derivs.loc), \ "derivative of visible loc wrong in ising-ising rbm" assert be.allclose(d_hidden_loc, hid_derivs.loc), \ "derivative of hidden loc wrong in ising-ising rbm" assert be.allclose(d_W, weight_derivs.matrix), \ "derivative of weights wrong in ising-ising rbm"
def test_ising_conditional_params(): ly = layers.IsingLayer(num_vis) w = layers.Weights((num_vis, num_hid)) scaled_units = [be.randn((num_samples, num_hid))] weights = [w.W_T()] beta = be.rand((num_samples, 1)) ly._conditional_params(scaled_units, weights, beta)
def test_ising_build_from_config(): ly = layers.IsingLayer(num_vis) ly.add_constraint({'loc': constraints.non_negative}) p = penalties.l2_penalty(0.37) ly.add_penalty({'log_var': p}) ly_new = layers.Layer.from_config(ly.get_config()) assert ly_new.get_config() == ly.get_config()
def test_ising_update(): ly = layers.IsingLayer(num_vis) w = layers.Weights((num_vis, num_hid)) scaled_units = [be.randn((num_samples, num_hid))] weights = [w.W_T()] beta = be.rand((num_samples, 1)) ly.update(scaled_units, weights, beta)
def test_ising_derivatives(): ly = layers.IsingLayer(num_vis) w = layers.Weights((num_vis, num_hid)) vis = ly.random((num_samples, num_vis)) hid = [be.randn((num_samples, num_hid))] weights = [w.W_T()] beta = be.rand((num_samples, 1)) ly.derivatives(vis, hid, weights, beta)
def test_ising_conditional_params(): num_visible_units = 100 num_hidden_units = 50 batch_size = 25 # set a seed for the random number generator be.set_seed() # set up some layer and model objects vis_layer = layers.IsingLayer(num_visible_units) hid_layer = layers.IsingLayer(num_hidden_units) rbm = model.Model([vis_layer, hid_layer]) # randomly set the intrinsic model parameters a = be.randn((num_visible_units, )) b = be.randn((num_hidden_units, )) W = be.randn((num_visible_units, num_hidden_units)) rbm.layers[0].params.loc[:] = a rbm.layers[1].params.loc[:] = b rbm.weights[0].params.matrix[:] = W # generate a random batch of data vdata = rbm.layers[0].random((batch_size, num_visible_units)) hdata = rbm.layers[1].random((batch_size, num_hidden_units)) # compute conditional parameters hidden_field = be.dot(vdata, W) # (batch_size, num_hidden_units) hidden_field += be.broadcast(b, hidden_field) visible_field = be.dot(hdata, be.transpose(W)) # (batch_size, num_visible_units) visible_field += be.broadcast(a, visible_field) # compute the conditional parameters using the layer functions hidden_field_func = rbm.layers[1]._conditional_params([vdata], [rbm.weights[0].W()]) visible_field_func = rbm.layers[0]._conditional_params( [hdata], [rbm.weights[0].W_T()]) assert be.allclose(hidden_field, hidden_field_func), \ "hidden field wrong in ising-ising rbm" assert be.allclose(visible_field, visible_field_func), \ "visible field wrong in ising-ising rbm"
def test_ising_update(): num_visible_units = 100 num_hidden_units = 50 batch_size = 25 # set a seed for the random number generator be.set_seed() # set up some layer and model objects vis_layer = layers.IsingLayer(num_visible_units) hid_layer = layers.IsingLayer(num_hidden_units) rbm = hidden.Model([vis_layer, hid_layer]) # randomly set the intrinsic model parameters a = be.randn((num_visible_units, )) b = be.randn((num_hidden_units, )) W = be.randn((num_visible_units, num_hidden_units)) rbm.layers[0].int_params['loc'] = a rbm.layers[1].int_params['loc'] = b rbm.weights[0].int_params['matrix'] = W # generate a random batch of data vdata = rbm.layers[0].random((batch_size, num_visible_units)) hdata = rbm.layers[1].random((batch_size, num_hidden_units)) # compute extrinsic parameters hidden_field = be.dot(vdata, W) # (batch_size, num_hidden_units) hidden_field += be.broadcast(b, hidden_field) visible_field = be.dot(hdata, be.transpose(W)) # (batch_size, num_visible_units) visible_field += be.broadcast(a, visible_field) # update the extrinsic parameter using the layer functions rbm.layers[1].update(vdata, rbm.weights[0].W()) rbm.layers[0].update(hdata, be.transpose(rbm.weights[0].W())) assert be.allclose(hidden_field, rbm.layers[1].ext_params['field']), \ "hidden field wrong in ising-ising rbm" assert be.allclose(visible_field, rbm.layers[0].ext_params['field']), \ "visible field wrong in ising-ising rbm"
def test_Ising_creation(): layers.IsingLayer(num_vis)
def test_ising_online_param_update(): ly = layers.IsingLayer(num_vis) vis = ly.random((num_samples, num_vis)) ly.online_param_update(vis)
def test_ising_log_partition_function(): ly = layers.IsingLayer(num_vis) vis = ly.random((num_samples, num_vis)) ly.log_partition_function(vis)
def test_ising_energy(): ly = layers.IsingLayer(num_vis) vis = ly.random((num_samples, num_vis)) ly.energy(vis)