def get_CRBM(how_long=4000): print "Creating data...." mean1 = np.array([-0.2, 0.3]) mean2 = np.array([0.5, -0.5]) cov1 = np.array([[0.02, 0.005], [0.001, 0.01]]) cov2 = np.array([[0.02, 0.0], [0.0, 0.02]]) dataset = np.random.multivariate_normal(mean1, cov1, 200).tolist() dataset += np.random.multivariate_normal(mean2, cov2, 200).tolist() r = CRBM(2, 4) # Train at the following learning rates: print "Training..." for i in range(how_long): print ' ' + str(i) + '...', r.train_epoch(dataset, (0.9, 0.9), 20) E = np.sum([r.energy(data) for data in dataset]) print 'Energy = ' + str(E) + '...', print 'Done' return r, dataset
return self.pi, self.A, self.O, self.A_ijk, self.O_jl, self.O_jf if __name__ == "__main__": num_str = '25' itr = 5 feedback_tperiod = 5 session_len = 250 it_str = str(itr) f_str = str(feedback_tperiod) # import inital transition and emissions probability matrices prob_transition = CRBM.CRBM('transition') prob_emission = CRBM.CRBM('emission') prob_emission_f = CRBM.CRBM('emission_f') a_matrix = prob_transition.total_transition_probs() o_matrix = prob_emission._o_jk() o_f_matrix = prob_emission_f._o_jf() # Training Data Import TrainingD = trainingData.TrainingData() [training_input_seq, training_output_seq, feedback_seq_all] = TrainingD.io_sequence_generator() training_total_len = len(training_input_seq) training_output_f_seq = np.zeros((int(len(training_input_seq)//feedback_tperiod)+1,)) output_f_time_stamp = np.zeros_like(training_output_f_seq) # feedback time stamps
from scipy import * import pickle sys.path.insert(0, '../Categorical_Boltzmann_Machines') np.set_printoptions(linewidth=700) np.set_printoptions(precision=3, edgeitems=25) # prob_transition = CRBM.CRBM('transition') # A_average = prob_transition.total_transition_probs() # for i in arange(0, 1000, 5): # barchart(A_average[i]) # view(azimuth=180, elevation=180) # show() # prob_emission = CRBM.CRBM('emission') # O_average = prob_emission._o_jk() # # prob_emission_f = CRBM.CRBM('emission_f') # O_f_average = prob_emission_f._o_jf() # TestD = testdata.TrainingData() # [test_input_seq, test_output_seq, test_output_f_seq] = TestD.io_sequence_generator() TrainingD = trainingData.TrainingData() [test_input_seq, test_output_seq, test_output_f_seq] = TrainingD.io_sequence_generator() with open('data_info.pickle', 'rb') as d_i: data_info = pickle.load(d_i)
#dataset = [[-1.0 + np.random.normal(0,0.15) + 0.02*n, 0.5 + np.random.normal(0,0.2)] for n in range(100)] XY = np.array(dataset) # And show it f = plt.figure(1) plt.scatter(XY[:,0], XY[:,1], 20, 'b', 'o') plt.title('Distribution') plt.draw() f.show() # Make a CRBM print "Creating RBM" rbm = CRBM(2,20) rbm.lo = -2 rbm.hi = 2 # Train it k=20 for i in range(10000): #for i in range(0): err = rbm.train_epoch(dataset, 0.1, k) if i%10 == 0: print "Epoch", i, ": Error =", err # Now reconstruct some stuff XY_rec = np.random.uniform(-2,2,(100,2))