コード例 #1
0
 def test_annealed_importance_sampling(self):
     sys.stdout.write(
         'RBM Estimator -> Performing annealed_importance_sampling test ...'
     )
     sys.stdout.flush()
     numx.random.seed(42)
     LogZ = Estimator.annealed_importance_sampling(self.bbrbm,
                                                   num_chains=100,
                                                   k=1,
                                                   betas=100,
                                                   status=False)
     assert numx.all(numx.abs(LogZ[0] - self.bbrbmTruelogZ) < 0.5)
     LogZ = Estimator.annealed_importance_sampling(self.bbrbm,
                                                   num_chains=100,
                                                   k=1,
                                                   betas=1000,
                                                   status=False)
     assert numx.all(numx.abs(LogZ[0] - self.bbrbmTruelogZ) < 0.05)
     LogZ = Estimator.annealed_importance_sampling(self.bbrbm,
                                                   num_chains=100,
                                                   k=1,
                                                   betas=10000,
                                                   status=False)
     assert numx.all(numx.abs(LogZ[0] - self.bbrbmTruelogZ) < 0.005)
     print(' successfully passed!')
     sys.stdout.flush()
コード例 #2
0
def partition_function_AIS(model,
                           num_chains=100,
                           k=1,
                           betas=10000,
                           status=False):
    ''' Approximates the partition function for the given model using annealed
        importance sampling.

        :Parameters:
            model:      The model.
                       -type: Valid RBM model.

            num_chains: Number of AIS runs.
                       -type: int

            k:          Number of Gibbs sampling steps.
                       -type: int

            beta:       Number or a list of inverse temperatures to sample from.
                       -type: int, numpy array [num_betas]

            status:     If true prints the progress on console.
                       -type: bool


        :Returns:
            Mean estimated log partition function.
           -type: float
            Mean +3std estimated log partition function.
           -type: float
            Mean -3std estimated log partition function.
           -type: float

    '''
    # We transform the DBM to an RBM with restricted connections.
    rbm = RBM_MODEL.BinaryBinaryRBM(
        number_visibles=model.input_dim + model.hidden2_dim,
        number_hiddens=model.hidden1_dim,
        data=None,
        initial_weights=numx.vstack((model.W1, model.W2.T)),
        initial_visible_bias=numx.hstack((model.b1, model.b3)),
        initial_hidden_bias=model.b2,
        initial_visible_offsets=numx.hstack((model.o1, model.o3)),
        initial_hidden_offsets=model.o2)
    # Run AIS for the transformed DBM
    return RBM_ESTIMATOR.annealed_importance_sampling(model=rbm,
                                                      num_chains=num_chains,
                                                      k=k,
                                                      betas=betas,
                                                      status=status)
コード例 #3
0
                         desired_sparseness=None,
                         update_visible_offsets=0.0,
                         update_hidden_offsets=0.0,
                         offset_typ='00',
                         restrict_gradient=restrict,
                         restriction_norm='Cols',
                         use_hidden_states=False,
                         use_centered_gradient=False)

# Calculate reconstruction error
RE_train = numx.mean(estimator.reconstruction_error(rbm, train_data))
RE_test = numx.mean(estimator.reconstruction_error(rbm, test_data))
print '%5d \t%0.5f \t%0.5f' % (max_epochs, RE_train, RE_test)

# Approximate partition function by AIS (tends to overestimate)
logZ = estimator.annealed_importance_sampling(rbm)[0]
LL_train = numx.mean(estimator.log_likelihood_v(rbm, logZ, train_data))
LL_test = numx.mean(estimator.log_likelihood_v(rbm, logZ, test_data))
print 'AIS: \t%0.5f \t%0.5f' % (LL_train, LL_test)

# Approximate partition function by reverse AIS (tends to underestimate)
logZ = estimator.reverse_annealed_importance_sampling(rbm)[0]
LL_train = numx.mean(estimator.log_likelihood_v(rbm, logZ, train_data))
LL_test = numx.mean(estimator.log_likelihood_v(rbm, logZ, test_data))
print 'reverse AIS \t%0.5f \t%0.5f' % (LL_train, LL_test)

# Reorder RBM features by average activity decreasingly
rbmReordered = vis.reorder_filter_by_hidden_activation(rbm, train_data)

# Display RBM parameters
vis.imshow_standard_rbm_parameters(rbmReordered, v1, v2, h1, h2)
コード例 #4
0
measurer.end()

# Print end/training time
print("End-time: \t{}".format(measurer.get_end_time()))
print("Training time:\t{}".format(measurer.get_interval()))

# Calculate true partition function
logZ = estimator.partition_function_factorize_h(rbm,
                                                batchsize_exponent=h1,
                                                status=False)
print("True Partition: {} (LL train: {}, LL test: {})".format(
    logZ, numx.mean(estimator.log_likelihood_v(rbm, logZ, train_data)),
    numx.mean(estimator.log_likelihood_v(rbm, logZ, test_data))))

# Approximate partition function by AIS (tends to overestimate)
logZ_approx_AIS = estimator.annealed_importance_sampling(rbm)[0]
print("AIS Partition: {} (LL train: {}, LL test: {})".format(
    logZ_approx_AIS,
    numx.mean(estimator.log_likelihood_v(rbm, logZ_approx_AIS, train_data)),
    numx.mean(estimator.log_likelihood_v(rbm, logZ_approx_AIS, test_data))))

# Approximate partition function by reverse AIS (tends to underestimate)
logZ_approx_rAIS = estimator.reverse_annealed_importance_sampling(rbm)[0]
print("reverse AIS Partition: {} (LL train: {}, LL test: {})".format(
    logZ_approx_rAIS,
    numx.mean(estimator.log_likelihood_v(rbm, logZ_approx_rAIS, train_data)),
    numx.mean(estimator.log_likelihood_v(rbm, logZ_approx_rAIS, test_data))))

# Reorder RBM features by average activity decreasingly
reordered_rbm = vis.reorder_filter_by_hidden_activation(rbm, train_data)
コード例 #5
0
                         use_centered_gradient=False)

    # Calculate Log likelihood and reconstruction error
    RE_train = numx.mean(estimator.reconstruction_error(rbm, train_data))
    RE_test = numx.mean(estimator.reconstruction_error(rbm, test_data))
    logZ = estimator.partition_function_factorize_h(rbm, batchsize_exponent=h1)
    LL_train = numx.mean(estimator.log_likelihood_v(rbm, logZ, train_data))
    LL_test = numx.mean(estimator.log_likelihood_v(rbm, logZ, test_data))
    print '%5d \t%0.5f \t%0.5f \t%0.5f \t%0.5f' % (epoch, RE_train, RE_test,
                                                   LL_train, LL_test)

# Calculate partition function and its AIS approximation
logZ = estimator.partition_function_factorize_h(rbm, batchsize_exponent=h1)
logZ_AIS = estimator.annealed_importance_sampling(rbm,
                                                  num_chains=100,
                                                  k=1,
                                                  betas=1000,
                                                  status=False)[0]
logZ_rAIS = estimator.reverse_annealed_importance_sampling(rbm,
                                                           num_chains=100,
                                                           k=1,
                                                           betas=1000,
                                                           status=False)[0]

# Calculate and print LL
print("")
print("\nTrue log partition: ", logZ, " ( LL_train: ",
      numx.mean(estimator.log_likelihood_v(rbm, logZ,
                                           train_data)), ",", "LL_test: ",
      numx.mean(estimator.log_likelihood_v(rbm, logZ, test_data)), " )")
print("\nAIS  log partition: ", logZ_AIS, " ( LL_train: ",