def test_reverse_annealed_importance_sampling(self): sys.stdout.write( 'RBM Estimator -> Performing reverse_annealed_importance_sampling test ...' ) sys.stdout.flush() numx.random.seed(42) LogZ = Estimator.reverse_annealed_importance_sampling(self.bbrbm, num_chains=100, k=1, betas=100, status=False) assert numx.all(numx.abs(LogZ[0] - self.bbrbmTruelogZ) < 0.5) LogZ = Estimator.reverse_annealed_importance_sampling(self.bbrbm, num_chains=100, k=1, betas=1000, status=False) assert numx.all(numx.abs(LogZ[0] - self.bbrbmTruelogZ) < 0.05) LogZ = Estimator.reverse_annealed_importance_sampling(self.bbrbm, num_chains=100, k=1, betas=10000, status=False) assert numx.all(numx.abs(LogZ[0] - self.bbrbmTruelogZ) < 0.005) print(' successfully passed!') sys.stdout.flush()
logZ = estimator.partition_function_factorize_h(rbm, batchsize_exponent=h1, status=False) print("True Partition: {} (LL train: {}, LL test: {})".format( logZ, numx.mean(estimator.log_likelihood_v(rbm, logZ, train_data)), numx.mean(estimator.log_likelihood_v(rbm, logZ, test_data)))) # Approximate partition function by AIS (tends to overestimate) logZ_approx_AIS = estimator.annealed_importance_sampling(rbm)[0] print("AIS Partition: {} (LL train: {}, LL test: {})".format( logZ_approx_AIS, numx.mean(estimator.log_likelihood_v(rbm, logZ_approx_AIS, train_data)), numx.mean(estimator.log_likelihood_v(rbm, logZ_approx_AIS, test_data)))) # Approximate partition function by reverse AIS (tends to underestimate) logZ_approx_rAIS = estimator.reverse_annealed_importance_sampling(rbm)[0] print("reverse AIS Partition: {} (LL train: {}, LL test: {})".format( logZ_approx_rAIS, numx.mean(estimator.log_likelihood_v(rbm, logZ_approx_rAIS, train_data)), numx.mean(estimator.log_likelihood_v(rbm, logZ_approx_rAIS, test_data)))) # Reorder RBM features by average activity decreasingly reordered_rbm = vis.reorder_filter_by_hidden_activation(rbm, train_data) # Display RBM parameters vis.imshow_standard_rbm_parameters(reordered_rbm, v1, v2, h1, h2) # Sample some steps and show results samples = vis.generate_samples(rbm, train_data[0:30], 30, 1, v1, v2, False, None) vis.imshow_matrix(samples, 'Samples')
use_hidden_states=False, use_centered_gradient=False) # Calculate reconstruction error RE_train = numx.mean(estimator.reconstruction_error(rbm, train_data)) RE_test = numx.mean(estimator.reconstruction_error(rbm, test_data)) print '%5d \t%0.5f \t%0.5f' % (max_epochs, RE_train, RE_test) # Approximate partition function by AIS (tends to overestimate) logZ = estimator.annealed_importance_sampling(rbm)[0] LL_train = numx.mean(estimator.log_likelihood_v(rbm, logZ, train_data)) LL_test = numx.mean(estimator.log_likelihood_v(rbm, logZ, test_data)) print 'AIS: \t%0.5f \t%0.5f' % (LL_train, LL_test) # Approximate partition function by reverse AIS (tends to underestimate) logZ = estimator.reverse_annealed_importance_sampling(rbm)[0] LL_train = numx.mean(estimator.log_likelihood_v(rbm, logZ, train_data)) LL_test = numx.mean(estimator.log_likelihood_v(rbm, logZ, test_data)) print 'reverse AIS \t%0.5f \t%0.5f' % (LL_train, LL_test) # Reorder RBM features by average activity decreasingly rbmReordered = vis.reorder_filter_by_hidden_activation(rbm, train_data) # Display RBM parameters vis.imshow_standard_rbm_parameters(rbmReordered, v1, v2, h1, h2) # Sample some steps and show results samples = vis.generate_samples(rbm, train_data[0:30], 30, 1, v1, v2, False, None) vis.imshow_matrix(samples, 'Samples')
logZ = estimator.partition_function_factorize_h(rbm, batchsize_exponent=h1) LL_train = numx.mean(estimator.log_likelihood_v(rbm, logZ, train_data)) LL_test = numx.mean(estimator.log_likelihood_v(rbm, logZ, test_data)) print '%5d \t%0.5f \t%0.5f \t%0.5f \t%0.5f' % (epoch, RE_train, RE_test, LL_train, LL_test) # Calculate partition function and its AIS approximation logZ = estimator.partition_function_factorize_h(rbm, batchsize_exponent=h1) logZ_AIS = estimator.annealed_importance_sampling(rbm, num_chains=100, k=1, betas=1000, status=False)[0] logZ_rAIS = estimator.reverse_annealed_importance_sampling(rbm, num_chains=100, k=1, betas=1000, status=False)[0] # Calculate and print LL print("") print("\nTrue log partition: ", logZ, " ( LL_train: ", numx.mean(estimator.log_likelihood_v(rbm, logZ, train_data)), ",", "LL_test: ", numx.mean(estimator.log_likelihood_v(rbm, logZ, test_data)), " )") print("\nAIS log partition: ", logZ_AIS, " ( LL_train: ", numx.mean(estimator.log_likelihood_v(rbm, logZ_AIS, train_data)), ",", "LL_test: ", numx.mean(estimator.log_likelihood_v(rbm, logZ_AIS, test_data)), " )") print("\nrAIS log partition: ", logZ_rAIS, " ( LL_train: ", numx.mean(estimator.log_likelihood_v(rbm, logZ_rAIS,