def make_BBRBM_kfold(train_data, valid_data, num_visible, num_hidden, num_epoches, k, save_path, layer_num): ##############% Transform the data with Bernoulli restricted Boltzmann machine bbrbm = BBRBM(n_visible=num_visible, n_hidden = num_hidden, learning_rate = 0.001, momentum=0.9, use_tqdm=True) foldName = str(k)+'_fold/'+'layer'+str(layer_num)+'/' createFolder(save_path+foldName) if os.listdir(save_path+foldName): bbrbm.load_weights(filename = save_path+foldName, name = 'layer'+str(layer_num)+'_model') else : errs,errs_val = bbrbm.fit(train_data, valid_data, n_epoches = num_epoches, batch_size=20) plt.plot(errs) plt.show() plt.savefig(save_path+foldName+'train.png') plt.plot(errs_val) plt.savefig(save_path+foldName+'val.png') plt.show() bbrbm.save_weights(filename = save_path+foldName, name = 'layer'+str(layer_num)+'_model') transform_data_train = np.zeros([train_data.shape[0] , num_hidden]).astype(np.float32) for i in range(0,train_data.shape[0]): transform_data_train[i,:] = bbrbm.transform(train_data[i,:].reshape(1,-1)) transform_data_val = bbrbm.transform(valid_data) return transform_data_train, transform_data_val
def make_BBRBM_layer2(train, validation, num_visible, num_hidden, num_epoches, lr, save_path): bbrbm = BBRBM(n_visible=num_visible, n_hidden=num_hidden, learning_rate=lr, momentum=0.9, use_tqdm=True) if os.listdir(save_path): bbrbm.load_weights(filename=save_path, name='fusion_layer') else: errs, errs_val = bbrbm.fit(train, validation, n_epoches=num_epoches, batch_size=20) plt.plot(errs) plt.show() plt.plot(errs_val) plt.show() # bbrbm.save_weights(filename=save_path, name='fusion_layer') transform_train = bbrbm.transform(train) transform_valid = bbrbm.transform(validation) return transform_train, transform_valid
print("train_data_list.shape", train_x.shape) print("label_list.shape", train_y.shape) test_x = train_x[7:] test_y = train_y[7:] print("test_data_list.shape", test_x.shape) print("label_list.shape", test_y.shape) # RBM bbrbm = BBRBM(n_visible=train_x.shape[1], n_hidden=640, learning_rate=0.01, momentum=0.95, use_tqdm=True) errs = bbrbm.fit(train_x, n_epoches=2, batch_size=10) doc_vec = bbrbm.transform(train_x.reshape(train_x.shape[0], -1)) # doc_vec = doc_vec.reshape(train_data_list.shape[0],10,-1) print("doc_vec.shape", doc_vec.shape) # tensorflow placeholders tf_x = tf.placeholder( tf.float32, [None, TIME_STEP * INPUT_SIZE]) # shape(batch, 64*10) image = tf.reshape( tf_x, [-1, TIME_STEP, INPUT_SIZE]) # (batch, height, width, channel) tf_y = tf.placeholder(tf.int32, [None, 2]) # input y # RNN lstm_cell = tf.contrib.rnn.BasicLSTMCell(num_units=32) #lstm_cell = tf.contrib.rnn.DropoutWrapper(cell=lstm_cell, output_keep_prob=0.75) # Dropout层 outputs, (h_c, h_n) = tf.nn.dynamic_rnn( lstm_cell, # cell you have chosen
bb_n_epoches_1 = 10 bb_batch_size_1 = 128 errs_1 = bbrbm_1.fit(data_x=bb_input_data_1, n_epoches=bb_n_epoches_1, batch_size=bb_batch_size_1, shuffle=True, verbose=True) bb_filename_1 = 'pretrain_models/bbrbm_1.ckpt' bb_name_1 = 'rbm' bbrbm_1.save_weights(filename=bb_filename_1, name=bb_name_1) #begin pretraining the second Bernoulli-Bernoulli RBM bb_input_data_2 = bbrbm_1.transform(bb_input_data_1) bb_input_data_2 = np.array(bb_input_data_2) bb_n_visible_2 = bb_input_data_2.shape[1] bb_n_hid_2 = 2048 bb_learning_rate_2 = 0.01 bb_momentum_2 = 0.95 bb_err_function_2 = 'mse' bbrbm_2 = BBRBM(n_visible=bb_n_visible_2, n_hidden=bb_n_hid_2, learning_rate=bb_learning_rate_2, momentum=bb_momentum_2, err_function=bb_err_function_2, use_tqdm=False)