def test_log_likelihood_v(self): print('RBM Estimator -> Performing log_likelihood_v test ...') sys.stdout.flush() numx.random.seed(42) ll = numx.mean( Estimator.log_likelihood_v(self.bbrbm, self.bbrbmTruelogZ, self.bbrbmData, 1.0)) assert numx.all(numx.abs(ll - self.bbrbmTrueLL) < self.epsilon) # Test List testList = [] for i in range(self.bbrbmData.shape[0]): testList.append(self.bbrbmData[i].reshape(1, 4)) ll = numx.mean( Estimator.log_likelihood_v(self.bbrbm, self.bbrbmTruelogZ, testList, 1.0)) assert numx.all(numx.abs(ll - self.bbrbmTrueLL) < self.epsilon) print('successfully passed!') sys.stdout.flush()
def test_log_likelihood_h(self): print('RBM Estimator -> Performing log_likelihood_h test ...') sys.stdout.flush() numx.random.seed(42) hdata = numx.float64( self.bbrbm.probability_h_given_v(self.bbrbmData) < 0.5) ll = numx.mean( Estimator.log_likelihood_h(self.bbrbm, self.bbrbmTruelogZ, hdata, 1.0)) assert numx.all(numx.abs(ll + 9.55929166739) < self.epsilon) # Test List testList = [] for i in range(hdata.shape[0]): testList.append(hdata[i].reshape(1, 4)) ll = numx.mean( Estimator.log_likelihood_v(self.bbrbm, self.bbrbmTruelogZ, testList, 1.0)) assert numx.all(numx.abs(ll + 9.55929166739) < self.epsilon) print('successfully passed!') sys.stdout.flush()
update_visible_offsets=0.0, update_hidden_offsets=0.0, offset_typ='00', restrict_gradient=restrict, restriction_norm='Cols', use_hidden_states=False, use_centered_gradient=False) # Calculate reconstruction error RE_train = numx.mean(estimator.reconstruction_error(rbm, train_data)) RE_test = numx.mean(estimator.reconstruction_error(rbm, test_data)) print '%5d \t%0.5f \t%0.5f' % (max_epochs, RE_train, RE_test) # Approximate partition function by AIS (tends to overestimate) logZ = estimator.annealed_importance_sampling(rbm)[0] LL_train = numx.mean(estimator.log_likelihood_v(rbm, logZ, train_data)) LL_test = numx.mean(estimator.log_likelihood_v(rbm, logZ, test_data)) print 'AIS: \t%0.5f \t%0.5f' % (LL_train, LL_test) # Approximate partition function by reverse AIS (tends to underestimate) logZ = estimator.reverse_annealed_importance_sampling(rbm)[0] LL_train = numx.mean(estimator.log_likelihood_v(rbm, logZ, train_data)) LL_test = numx.mean(estimator.log_likelihood_v(rbm, logZ, test_data)) print 'reverse AIS \t%0.5f \t%0.5f' % (LL_train, LL_test) # Reorder RBM features by average activity decreasingly rbmReordered = vis.reorder_filter_by_hidden_activation(rbm, train_data) # Display RBM parameters vis.imshow_standard_rbm_parameters(rbmReordered, v1, v2, h1, h2)
'Epoch\tRecon. Error\tLog likelihood train\tLog likelihood test\tExpected End-Time' ) for epoch in range(epochs): # Loop over all batches for b in range(0, train_data.shape[0], batch_size): batch = train_data[b:b + batch_size, :] trainer_pcd.train(data=batch, epsilon=0.01, update_visible_offsets=update_offsets, update_hidden_offsets=update_offsets) # Calculate Log-Likelihood, reconstruction error and expected end time every 5th epoch if (epoch == 0 or (epoch + 1) % 5 == 0): logZ = estimator.partition_function_factorize_h(rbm) ll_train = numx.mean(estimator.log_likelihood_v(rbm, logZ, train_data)) ll_test = numx.mean(estimator.log_likelihood_v(rbm, logZ, test_data)) re = numx.mean(estimator.reconstruction_error(rbm, train_data)) print('{}\t\t{:.4f}\t\t\t{:.4f}\t\t\t\t{:.4f}\t\t\t{}'.format( epoch + 1, re, ll_train, ll_test, measurer.get_expected_end_time(epoch + 1, epochs))) else: print(epoch + 1) measurer.end() # Print end/training time print("End-time: \t{}".format(measurer.get_end_time())) print("Training time:\t{}".format(measurer.get_interval())) # Calculate true partition function
reg_l1norm=0.0, reg_l2norm=0.0, reg_sparseness=0.0, desired_sparseness=0.0, update_visible_offsets=0.0, update_hidden_offsets=0.0, restrict_gradient=False, restriction_norm='Cols', use_hidden_states=False, use_centered_gradient=False) # Calculate Log likelihood and reconstruction error RE_train = numx.mean(estimator.reconstruction_error(rbm, train_data)) RE_test = numx.mean(estimator.reconstruction_error(rbm, test_data)) logZ = estimator.partition_function_factorize_h(rbm, batchsize_exponent=h1) LL_train = numx.mean(estimator.log_likelihood_v(rbm, logZ, train_data)) LL_test = numx.mean(estimator.log_likelihood_v(rbm, logZ, test_data)) print '%5d \t%0.5f \t%0.5f \t%0.5f \t%0.5f' % (epoch, RE_train, RE_test, LL_train, LL_test) # Calculate partition function and its AIS approximation logZ = estimator.partition_function_factorize_h(rbm, batchsize_exponent=h1) logZ_AIS = estimator.annealed_importance_sampling(rbm, num_chains=100, k=1, betas=1000, status=False)[0] logZ_rAIS = estimator.reverse_annealed_importance_sampling(rbm, num_chains=100, k=1, betas=1000,
# Measuring time measurer = MEASURE.Stopwatch() # Train model print "Training" print "Epoch\tRecon. Error\tLog likelihood \tExpected End-Time" for epoch in range(1, epochs + 1): train_data = numx.random.permutation(train_data) for b in range(0, train_data.shape[0], batch_size): batch = train_data[b : b + batch_size, :] trainer.train(data=batch, epsilon=0.1, regL2Norm=0.001) # Calculate Log-Likelihood, reconstruction error and expected end time every 10th epoch if epoch % 10 == 0: Z = ESTIMATOR.partition_function_factorize_h(rbm) LL = numx.mean(ESTIMATOR.log_likelihood_v(rbm, Z, train_data)) RE = numx.mean(ESTIMATOR.reconstruction_error(rbm, train_data)) print "%d\t\t%8.6f\t\t%8.4f\t\t" % (epoch, RE, LL), print measurer.get_expected_end_time(epoch, epochs), print measurer.end() # Print end time print print "End-time: \t", measurer.get_end_time() print "Training time:\t", measurer.get_interval() # Calculate and approximate partition function Z = ESTIMATOR.partition_function_factorize_h(rbm, batchsize_exponent=h1, status=False)