def move_and_save(this_run=i, new_folder=new_folder, old_folder=old_folder, dataset_folder=dataset_dict[dataset], stage_name=stage_name, AD_folder=AD_folder, move_dataset=args.move_dataset): ### Move dataset over? if move_dataset == 1 and this_run == 0: send_message('Moving dataset over: ' + dataset_folder) os.system( 'scp -r ' + dataset_folder + ' [email protected]:/scratch/shenghuahe/datasets/' ) send_message( 'Moving model files around, in order to ship over to chpc') # Locate important files: training_nums = old_folder + 'training_nums.out' # parse this file: train_loss, val_loss, learning_rate = parse_training_nums( training_nums) # Find the lowest val_loss min_index = np.argmin(val_loss) min_val_loss = val_loss[min_index] # Find the lowest train_loss at the lowest val_loss min_val_train_loss = train_loss[min_index] # Find lowest overall train_loss min_train_loss = np.min(train_loss) best_index = 'best' ### ? Create visualization of filters for this specific model model = hf.load_trained_CNN(version=1, name=best_index + '_simple', folder=old_folder) ### Move model file into transfer folder model_file_yaml = old_folder + best_index + '_simple.yaml' model_file_h5 = old_folder + best_index + '_simple.h5' copyfile(model_file_yaml, new_folder + 'best_simple.yaml') copyfile(model_file_h5, new_folder + 'best_simple.h5') ### Move training_nums file over copyfile(training_nums, new_folder + 'training_nums.out') ### Move png plot over copyfile(old_folder + 'training_plot.png', new_folder + 'training_plot.png') ### Move .batch file over run_file_name = generate_run_file() os.system( 'scp ' + run_file_name + ' [email protected]:/scratch/shenghuahe/batch_files') ### Move model folder over os.system( 'scp -r ' + new_folder + ' [email protected]:/scratch/shenghuahe/models/' + stage_name) ### Make directory for the AD files to be transfered into: hf.generate_folder(AD_folder) ### Create AD folder on CHPC to put files into os.system( 'ssh [email protected] "mkdir /scratch/shenghuahe/' + AD_folder + '"') return model
send_message('Attempting to train with this command: ' + train_command + pretrained_model_addition) os.system(train_command + pretrained_model_addition) #### Find and move best model to transfer over #### # Saving/Moving best model, a new training plot/data, visualization of filters model_folder = mf + 'stage' + str(i) + '_model/' # os.system(train_command) os.system(train_command + ' --just_return_name True') with open('/home/shenghua/DL-recon/dl-limitedview-prior/tmp.out', 'r') as infile: model_name = infile.readline() new_folder = model_folder hf.generate_folder(new_folder) old_folder = 'projection_results7/' + model_name + '/' print('------------- stage ' + str(i) + ' training ends!!-----------------------') # Eventual AD directory on turing AD_folder = 'datasets/v' + save_dir ### Generate .batch file to train this model def generate_run_file(model_folder_func='models/' + stage_name + '/', nb_filters=nb_filters, depth=depth, file_name=stage_name, new_folder=new_folder, dataset_name=dataset_name, save_dir=AD_folder,
from keras.callbacks import Callback import helper_functions as hf import CNN_generator as cg import numpy as np np.random.seed(1337) import argparse import random from keras.optimizers import SGD, Adam import contextlib import os import contextlib from keras import backend as K output_folder = 'projection_results4/' hf.generate_folder(output_folder) hf.generate_folder('models/') class generateImageCallback(Callback): def on_train_begin(self, logs={}): self.losses = [] self.val_losses = [] self.lrs = [] def on_epoch_end(self, epoch, logs={}): self.losses.append(logs.get('loss')) self.val_losses.append(logs.get('val_loss')) self.lrs.append(K.get_value(model.optimizer.lr)) # save_predictions # image = self.generate_image()
k2=f_dim2[i], k3=f_dim3[i], nb_filters=nb_filters[i], elu=elu[i], longer=args.longer, dropout=args.dropout) model.name = 'deartifact_'+str(lrelu[i])+'_'+'{0:.3f}'.format(alpha[i])+ '_' + str(f_dim1[i]) + \ '_' + str(f_dim2[i]) + '_' + str(f_dim3[i])+ '_' + str(nb_filters[i])+ \ '_' + str(elu[i]) + '_' + str(lr[i])+ '_' + args.loss+ '_' + str(args.longer)+ \ '_' + str(args.nb_epochs) + '_' + str(args.normalize)+ '_'+ str(args.dropout) + \ '_' + str(args.final_act)+'_' + str(args.relu) +'_' + str(args.dataset)+'_regular' print('model name: ' + model.name) adam = Adam(lr=args.lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) model.compile(loss=args.loss, optimizer=adam) hf.generate_folder(output_folder + model.name) ## Load dataset X_train,X_test,Y_train,Y_test = hf.load_data(version=args.dataset,normalize_projection=args.normalize,\ normalize=False,normalize_simplistic = args.normalize_simplistic) # X_train = X_train[1:100,:,:,:] # Y_train = Y_train[1:100,:,:,:] ## Fix Y's of dataset to be cropped to middle square from output of model cropped_output = model.output_shape print('output shape : ' + str(cropped_output)) def fix_Ys(Y, cropped_output=cropped_output): y_cropped = np.zeros((Y.shape[0], ) + cropped_output[1:]) dif = int((Y.shape[2] - cropped_output[2]) / 2) for j in range(Y.shape[0]):
import helper_functions as hf import CNN_generator as cg import numpy as np import argparse import random from keras.optimizers import SGD,Adam import contextlib import os import math import sys from keras import backend as K from multi_gpu import make_parallel output_folder = 'Prostatex2_Results/' hf.generate_folder(output_folder) hf.generate_folder('models/') class generateImageCallback(Callback): def on_train_begin(self, logs={}): self.losses = [] self.val_losses = [] self.lrs = [] def on_epoch_end(self, epoch,logs={}): self.losses.append(logs.get('loss'))