def test_reconstruction_error(self): sys.stdout.write( 'RBM Estimator -> Performing reconstruction_error test ...') sys.stdout.flush() numx.random.seed(42) rec = Estimator.reconstruction_error(self.bbrbm, self.bbrbmData, k=1, beta=1.0, use_states=True, absolut_error=False) assert numx.all(numx.abs(rec) < self.epsilon) rec = Estimator.reconstruction_error(self.bbrbm, self.bbrbmData, k=1, beta=1.0, use_states=False, absolut_error=False) assert numx.all(numx.abs(rec) < self.epsilon) rec = Estimator.reconstruction_error(self.bbrbm, self.bbrbmData, k=1, beta=1.0, use_states=True, absolut_error=True) assert numx.all(numx.abs(rec) < self.epsilon) rec = Estimator.reconstruction_error(self.bbrbm, self.bbrbmData, k=1, beta=1.0, use_states=False, absolut_error=True) assert numx.all(numx.abs(rec) < self.epsilon) rec = Estimator.reconstruction_error(self.bbrbm, self.bbrbmData, k=10, beta=1.0, use_states=False, absolut_error=False) assert numx.all(numx.abs(rec) < self.epsilon) # Test List testList = [] for i in range(self.bbrbmData.shape[0]): testList.append(self.bbrbmData[i].reshape(1, 4)) rec = Estimator.reconstruction_error(self.bbrbm, testList, k=10, beta=1.0, use_states=False, absolut_error=False) assert numx.all(numx.abs(rec) < self.epsilon) print(' successfully passed!') sys.stdout.flush()
for epoch in range(epochs): # Loop over all batches for b in range(0, train_data.shape[0], batch_size): batch = train_data[b:b + batch_size, :] trainer_pcd.train(data=batch, epsilon=0.01, update_visible_offsets=update_offsets, update_hidden_offsets=update_offsets) # Calculate Log-Likelihood, reconstruction error and expected end time every 5th epoch if (epoch == 0 or (epoch + 1) % 5 == 0): logZ = estimator.partition_function_factorize_h(rbm) ll_train = numx.mean(estimator.log_likelihood_v(rbm, logZ, train_data)) ll_test = numx.mean(estimator.log_likelihood_v(rbm, logZ, test_data)) re = numx.mean(estimator.reconstruction_error(rbm, train_data)) print('{}\t\t{:.4f}\t\t\t{:.4f}\t\t\t\t{:.4f}\t\t\t{}'.format( epoch + 1, re, ll_train, ll_test, measurer.get_expected_end_time(epoch + 1, epochs))) else: print(epoch + 1) measurer.end() # Print end/training time print("End-time: \t{}".format(measurer.get_end_time())) print("Training time:\t{}".format(measurer.get_interval())) # Calculate true partition function logZ = estimator.partition_function_factorize_h(rbm, batchsize_exponent=h1,
# Training with CD-1 k = 1 trainer_cd = trainer.CD(rbm) # Train model, status every 10th epoch step = 10 print 'Training' print 'Epoch\tRE train\tRE test \tLL train\tLL test ' for epoch in range(0, max_epochs + 1, 1): # Shuffle training samples (optional) train_data = numx.random.permutation(train_data) # Print epoch and reconstruction errors every 'step' epochs. if epoch % step == 0: RE_train = numx.mean(estimator.reconstruction_error(rbm, train_data)) RE_test = numx.mean(estimator.reconstruction_error(rbm, test_data)) print '%5d \t%0.5f \t%0.5f' % (epoch, RE_train, RE_test) # Train one epoch with gradient restriction/clamping # No weight decay, momentum or sparseness is used for b in range(0, train_data.shape[0], batch_size): trainer_cd.train(data=train_data[b:(b + batch_size), :], num_epochs=1, epsilon=[eps, 0.0, eps, eps * 0.1], k=k, momentum=0.0, reg_l1norm=0.0, reg_l2norm=0.0, reg_sparseness=0, desired_sparseness=None,
# Measuring time measurer = MEASURE.Stopwatch() # Train model print 'Training' print 'Epoch\tRecon. Error\tExpected End-Time' for epoch in range(1, epochs+1): train_data = numx.random.permutation(train_data) for b in range(0, train_data.shape[0], batch_size): batch = train_data[b:b + batch_size, :] trainer.train(data=batch, epsilon=0.1, regL2Norm= 0.001) # Calculate Log-Likelihood, reconstruction error and expected end time every 10th epoch if (epoch % 10 == 0): RE = numx.mean(ESTIMATOR.reconstruction_error(rbm, train_data)) print '%d\t\t%8.6f\t\t' % (epoch, RE), print measurer.get_expected_end_time(epoch , epochs), print measurer.end() # Print end time print print 'End-time: \t', measurer.get_end_time() print 'Training time:\t', measurer.get_interval() # Reorder RBM features by average activity decreasingly reordered_rbm = STATISTICS.reorder_filter_by_hidden_activation(rbm, train_data) # Display RBM parameters VISUALIZATION.imshow_standard_rbm_parameters(reordered_rbm, v1, v2, h1, h2)