def continue_run_rand_nn(matrixA, matrixB, num_gene, num_pathway, layer1, layer2, layer3, path, dir_opt, RNA_seq_filename, input_num, epoch, batch_size, verbose): # RECONSTRCUT TO BE TRAINED MODEL model = RandNN().keras_rand_nn(matrixA, matrixB, num_gene, layer0, layer1, layer2, layer3) with open(path + '/layer_bias_list.txt', 'rb') as filebias: layer_bias_list = pickle.load(filebias) with open(path + '/layer_weight_list.txt', 'rb') as fileweight: layer_weight_list = pickle.load(fileweight) model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mse', 'accuracy']) xTmp, yTmp = LoadData(dir_opt, RNA_seq_filename).load_train(0, 1) model.fit(xTmp, yTmp, epochs=1, validation_split=1, verbose=0) model_layer_list = [] num_layer = len(model.layers) for i in range(num_layer): each_layer_list = [layer_weight_list[i], layer_bias_list[i]] model_layer_list.append(each_layer_list) model.layers[i].set_weights(each_layer_list) # AUTO UPDATE WEIGHT model, history, num_layer, path = RunRandNN( model, dir_opt, RNA_seq_filename).train(input_num, epoch, batch_size, verbose) return model, history, path
def run_rand_nn(model, dir_opt, RNA_seq_filename, matrixA, matrixB, input_num, epoch, batch_size, verbose): # AUTO UPDATE WEIGHT model, history, num_layer, path = RunRandNN( model, dir_opt, RNA_seq_filename).train(input_num, epoch, batch_size, verbose) return model, history, path
def manual_test_rand_nn(matrixA, matrixB, num_gene, num_pathway, layer1, layer2, layer3, path, dir_opt, learning_rate): # MANUAL REBUILD THE MODEL input_model, gene_model, pathway_model, model = build_rand_nn(matrixA, matrixB, num_gene, num_pathway, layer1, layer2, layer3) with open(path + '/layer_list.txt', 'rb') as filelayer: layer_list = pickle.load(filelayer) model.compile(loss='mean_squared_error', optimizer=Adam(lr=learning_rate), metrics=['mse', 'accuracy']) xTmp, yTmp = LoadData(dir_opt).load_train(0, 1) model.fit(xTmp, yTmp, epochs = 1, validation_split = 1, verbose = 0) num_layer = len(model.layers) for i in range(num_layer): model.get_layer(index = i).set_weights(layer_list[i]) # PREDICT MODEL USING [xTe, yTe] verbose = 1 y_pred, score = RunRandNN(model, dir_opt).test(verbose, path)
def manual_test_rand_nn(matrixA, matrixB, num_gene, num_pathway, layer1, layer2, layer3, path, dir_opt, RNA_seq_filename): # RECONSTRCUT TEST MODEL model = RandNN().keras_rand_nn(matrixA, matrixB, num_gene, layer0, layer1, layer2, layer3) with open(path + '/layer_bias_list.txt', 'rb') as filebias: layer_bias_list = pickle.load(filebias) with open(path + '/layer_weight_list.txt', 'rb') as fileweight: layer_weight_list = pickle.load(fileweight) model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mse', 'accuracy']) xTmp, yTmp = LoadData(dir_opt, RNA_seq_filename).load_train(0, 1) model.fit(xTmp, yTmp, epochs=1, validation_split=1, verbose=0) model_layer_list = [] num_layer = len(model.layers) for i in range(num_layer): each_layer_list = [layer_weight_list[i], layer_bias_list[i]] model_layer_list.append(each_layer_list) model.layers[i].set_weights(each_layer_list) # PREDICT MODEL USING [xTe, yTe] verbose = 1 y_pred, score = RunRandNN(model, dir_opt, RNA_seq_filename).test(verbose, path)
def auto_test_rand_nn(model, dir_opt, RNA_seq_filename, verbose, path): y_pred, score = RunRandNN(model, dir_opt, RNA_seq_filename).test(verbose, path)
def auto_test_rand_nn(model, dir_opt, verbose, path): y_pred, score = RunRandNN(model, dir_opt).test(verbose, path)
def run_rand_nn(model, dir_opt, matrixA, matrixB, input_num, epoch, batch_size, verbose, learning_rate, end_epoch): model, history, path = RunRandNN(model, dir_opt).train(input_num, epoch, batch_size, verbose, learning_rate, end_epoch) return model, history, path
def auto_test_rand_nn(model, dir_opt, RNA_seq_filename, verbose, path): # GET MODEL IMMEDIATELY FROM TRAINED MODEL y_pred, score = RunRandNN(model, dir_opt, RNA_seq_filename).test(verbose, path)