bin_label = None # 0.5 cuda = True losstype = 'BCElogit' lr = 1e-4 model_name = 'model_unet' if bin_label: model_name += '_labelBin{}'.format(bin_label) model_name += '_{}_lr{}_B{}_W{}_spv{}_nw{}'.format(losstype, lr, batch_size, windows_size[0], samples_per_volume, num_workers) resdir = "/network/lustre/iss01/cenir/analyse/irm/users/romain.valabregue/QCcnn/UNET_saved_pytorch/" + model_name if not os.path.isdir(resdir): os.mkdir(resdir) log = get_log_file(resdir + '/training.log') transforms = None data_parameters = { 'image': { 'csv_file': '/data/romain/data_exemple/file_ms.csv' }, 'label1': { 'csv_file': '/data/romain/data_exemple/file_p1.csv' }, 'label2': { 'csv_file': '/data/romain/data_exemple/file_p2.csv' }, 'label3': { 'csv_file': '/data/romain/data_exemple/file_p3.csv'
def set_model(self, par_model, res_model_file=None, verbose=True, log_filename='training.log'): network_name = par_model['network_name'] losstype = par_model['losstype'] lr = par_model['lr'] in_size = par_model['in_size'] self.cuda = par_model['cuda'] self.max_epochs = par_model['max_epochs'] optim_name = par_model['optim'] if 'optim' in par_model else 'Adam' self.validation_droupout = par_model[ 'validation_droupout'] if 'validation_droupout' in par_model else False if network_name == 'unet_f': self.model = UNet( in_channels=1, dimensions=3, out_classes=1, num_encoding_blocks=3, out_channels_first_layer=16, normalization='batch', padding=True, pooling_type='max', # max avg AdaptiveMax AdaptiveAvg upsampling_type='trilinear', residual=False, dropout=False, monte_carlo_dropout=0.5) elif network_name == 'unet': self.model = SmallUnet(in_channels=1, out_channels=1) elif network_name == 'ConvN': conv_block = par_model['conv_block'] dropout, drop_conv, batch_norm = par_model['dropout'], par_model[ 'drop_conv'], par_model['batch_norm'] linear_block = par_model['linear_block'] output_fnc = par_model[ 'output_fnc'] if 'output_fnc' in par_model else None self.model = ConvN_FC3(in_size=in_size, conv_block=conv_block, linear_block=linear_block, dropout=dropout, drop_conv=drop_conv, batch_norm=batch_norm, output_fnc=output_fnc) network_name += '_C{}_{}_Lin{}_{}_D{}_DC{}'.format( np.abs(conv_block[0]), conv_block[-1], linear_block[0], linear_block[-1], dropout, drop_conv) if output_fnc is not None: network_name += '_fnc_{}'.format(output_fnc) if batch_norm: network_name += '_BN' if self.validation_droupout: network_name += '_VD' self.res_name += '_Size{}_{}_Loss_{}_lr{}'.format( in_size[0], network_name, losstype, lr) if 'Adam' not in optim_name: #only write if not default Adam self.res_name += '_{}'.format(optim_name) self.res_dir += self.res_name + '/' if res_model_file is not None: #to avoid handeling batch size and num worker used for model training self.res_dir, self.res_name = get_parent_path(res_model_file) if not os.path.isdir(self.res_dir): os.mkdir(self.res_dir) self.log = get_log_file(self.res_dir + '/' + log_filename) self.log.info(self.log_string) if losstype == 'MSE': self.loss = tnn.MSELoss() elif losstype == 'L1': self.loss = tnn.L1Loss() elif losstype == 'ssim': self.loss = SSIM3D() elif losstype == 'ssim_dist': self.loss = SSIM3D(distance=2) elif losstype == 'BCE': self.loss = tnn.BCELoss() elif losstype == 'BCElogit': self.loss = tnn.BCEWithLogitsLoss() if self.cuda: self.model = self.model.cuda() self.loss = self.loss.cuda() device = "cuda" else: device = 'cpu' if verbose: self.log.info( summary(self.model, (1, in_size[0], in_size[1], in_size[2]), device=device, batch_size=1)) self.ep_start, self.last_model_saved = load_existing_weights_if_exist( self.res_dir, self.model, log=self.log, device=device, res_model_file=res_model_file) if "Adam" in optim_name: self.optimizer = optim.Adam(self.model.parameters(), lr=lr) elif "SGD" in optim_name: self.optimizer = optim.SGD(self.model.parameters(), lr=lr, momentum=0.5)
#get option for dataset selection parser = get_cmd_select_data_option() #option for model evaluation parser.add_option("-n", "--out_name", action="store", dest="out_name", default='res_val', help="name to be append to the results ") parser.add_option("--val_number", action="store", dest="val_number", default='-1', type="int", help="number to be prepend to out name default 1 ") parser.add_option("-w", "--saved_model", action="store", dest="saved_model", default='', help="full path of the model's weights file ") parser.add_option("--use_gpu", action="store", dest="use_gpu", default=0, type="int", help="0 means no gpu 1 to 4 means gpu device (0) ") (options, args) = parser.parse_args() log = get_log_file() name, val_number = options.out_name, options.val_number saved_model = options.saved_model cuda = True if options.use_gpu > 0 else False if val_number<0: out_name = name subdir = None #'eval_rrr__{}_{}'.format(name) else: out_name = 'eval_num_{:04d}'.format(val_number) subdir = 'eval_{}_{}'.format(name, get_parent_path(saved_model)[1][:-3])