class Experience(object): def __init__(self, N, name=None, disp=False, noise=None): # If no name, we create an experience if name is None: num = 1 fname = path.join(EXP_DIR, 'exp%i.data' % num) while path.exists(fname): num += 1 fname = path.join(EXP_DIR, 'exp%i.data' % num) name = 'exp%i' % num self.name = name # If there exist no previous file, # We create one from the default experience fname = path.join(EXP_DIR, '%s.data' % name) if not path.exists(fname): e_name = ''.join(c for c in name if c.isalpha()) e_name = path.join(EXP_DIR, '%s.data' % e_name) from shutil import copy if not path.exists(e_name): copy(path.join(EXP_DIR, 'default.data'), fname) else: copy(e_name, fname) #We load the experience parameters import json with open(fname) as f: params_exp = json.load(f) self.fname = fname self.params_exp = params_exp # Create the instance of the experience self.queue = Queue() self.obs = lambda: Observer(name, self.queue, disp) self.dbn = DBN(params_exp['lay_shape'], self.queue, noise) #Load the model weights if it exists self.exists = self.dbn.load(name) #Load the dataset and split between train and test set from DataFeeder import DataFeeder self.data = DataFeeder(N, batch_s=1000) def pretrain(self, epochs=30, lr=0.1): l_r = lr if type(l_r) == float: l_r = lambda e: lr self.dbn.pretrain(self.data.X_trn, epochs=epochs, lr=l_r) for i in range(self.params_exp['N_layer']): self.params_exp['epochs_lay'][i] += epochs def fine_tune(self, epochs=30, lr=.1, cl=.1, lcost=False, dropout=False): l_r = lr c_l = cl if type(l_r) == float: l_r = lambda e: lr if type(c_l) == float: c_l = lambda e: cl p = self.obs() p.start() self.dbn.fine_tune(self.data, epochs=epochs, lr=l_r, cl=c_l, lcost=lcost, dropout=dropout) self.params_exp['epochs_ft'] += epochs self.queue.put(('end', )) p.join() def eval_perf(self): X_tst, y_tst = self.data.get_test_set() code = self.dbn.f_code(X_tst) from sklearn import metrics sil_c = metrics.silhouette_score(code, y_tst) sil_X = metrics.silhouette_score(X_tst, y_tst) print 'Silhouette code y', sil_c print 'Silhouette X y', sil_X def save(self): '''Save the experience so we can reload it later ''' #Save the experience parameters import json with open(self.fname, 'w') as f: json.dump(self.params_exp, f) # Save the model weights self.dbn.save(self.name)
import tensorflow.examples.tutorials.mnist.input_data as input_data import numpy as np import tensorflow as tf import tensorlayer as tl from DBN import DBN mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) X_train = mnist.train.images Y_train = mnist.train.labels X_test = mnist.test.images Y_test = mnist.test.labels dbn = DBN(n_units=[784, 500, 300, 100, 10], learning_rate_rbm=[0.001, 0.001, 0.001], batch_size_rbm=[100, 100, 100], n_epoch_rbm=[10, 10, 10], visible_unit_type_rbm=['bin', 'bin', 'bin'], weight_cost_rbm=0.0001, momentum_rbm=0.5, learning_rate_dbn=0.001, batch_size_dbn=100, n_epoch_dbn=100, dropout_dbn=[1, 1, 1]) dbn.pretrain(X_train, X_test) dbn.fit(X_train, Y_train, X_test, Y_test)
x_train = trainset[:, :-1] y_train = trainset[:, -1:] x_test = testset[:, :-1] y_test = testset[:, -1:] print('x_train.shape:' + str(x_train.shape)) print('y_train.shape:' + str(y_train.shape)) print('x_test.shape:' + str(x_test.shape)) print('y_test.shape' + str(y_test.shape)) # Build model dbn = DBN(hidden_units, input_length, output_length, device=device) # Train model dbn.pretrain(x_train, epoch=epoch_pretrain, batch_size=batch_size) dbn.finetune(x_train, y_train, epoch_finetune, batch_size, loss_function, optimizer(dbn.parameters())) # Make prediction and plot y_predict = dbn.predict(x_test, batch_size) y_real = scaler.inverse_transform(y_test.reshape(-1, 1)).flatten() y_predict = scaler.inverse_transform(y_predict.reshape(-1, 1)).flatten() plt.figure(1) plt.plot(y_real, label='real') plt.plot(y_predict, label='prediction') plt.xlabel('MSE Error: {}'.format(mean_squared_error(y_real, y_predict))) plt.legend() plt.title('Prediction result') plt.show()
weight_decay=weight_decay) dbn = DBN(n_visible=vis, hidden_layer_sizes=hidden_layers, sample_copies=sample_copies, sampler=sampler, optimizer=optimizer, continuous_output=continuous_out, device=device) if pre_trained: dbn.load_model('DBN.h5') # ----------------------------------------------------------------------------- # Training # ----------------------------------------------------------------------------- if not pre_trained: dbn.pretrain(input_data=data, epochs=pretrain_epochs, batch_size=batch_size, test=test) dbn.finetune(input_data=data, lr=finetune_lr, epochs=finetune_epochs, batch_size=batch_size) dbn.save_model('DBN.h5') # ----------------------------------------------------------------------------- # Plotting # ----------------------------------------------------------------------------- print('#########################################') print('# Generating samples #') print('#########################################') top_RBM = dbn.gen_layers[-1]
class Experience(object): def __init__(self, N, name=None, disp=False, noise=None): # If no name, we create an experience if name is None: num = 1 fname = path.join(EXP_DIR,'exp%i.data'%num) while path.exists(fname): num += 1 fname = path.join(EXP_DIR, 'exp%i.data'%num) name = 'exp%i'%num self.name = name # If there exist no previous file, # We create one from the default experience fname = path.join(EXP_DIR, '%s.data'%name) if not path.exists(fname): e_name = ''.join(c for c in name if c.isalpha()) e_name = path.join(EXP_DIR, '%s.data'%e_name) from shutil import copy if not path.exists(e_name): copy(path.join(EXP_DIR, 'default.data'), fname) else: copy(e_name, fname) #We load the experience parameters import json with open(fname) as f: params_exp = json.load(f) self.fname = fname self.params_exp = params_exp # Create the instance of the experience self.queue = Queue() self.obs = lambda: Observer(name, self.queue, disp) self.dbn = DBN(params_exp['lay_shape'], self.queue, noise) #Load the model weights if it exists self.exists = self.dbn.load(name) #Load the dataset and split between train and test set from DataFeeder import DataFeeder self.data = DataFeeder(N, batch_s=1000) def pretrain(self, epochs=30, lr=0.1): l_r = lr if type(l_r) == float: l_r = lambda e: lr self.dbn.pretrain(self.data.X_trn, epochs=epochs, lr=l_r) for i in range(self.params_exp['N_layer']): self.params_exp['epochs_lay'][i] += epochs def fine_tune(self, epochs=30, lr=.1, cl=.1, lcost=False ,dropout=False): l_r = lr c_l = cl if type(l_r) == float: l_r = lambda e: lr if type(c_l) == float: c_l= lambda e: cl p = self.obs() p.start() self.dbn.fine_tune(self.data, epochs=epochs, lr=l_r, cl=c_l, lcost=lcost, dropout=dropout) self.params_exp['epochs_ft'] += epochs self.queue.put(('end',)) p.join() def eval_perf(self): X_tst, y_tst = self.data.get_test_set() code = self.dbn.f_code(X_tst) from sklearn import metrics sil_c = metrics.silhouette_score(code, y_tst) sil_X = metrics.silhouette_score(X_tst, y_tst) print 'Silhouette code y', sil_c print 'Silhouette X y', sil_X def save(self): '''Save the experience so we can reload it later ''' #Save the experience parameters import json with open(self.fname, 'w') as f: json.dump(self.params_exp, f) # Save the model weights self.dbn.save(self.name)