def evaluate(eval_params, net_params, data_params, common_params, train_params): eval_model_path = eval_params['eval_model_path'] num_classes = net_params['num_class'] labels = data_params['labels'] data_dir = eval_params['data_dir'] label_dir = eval_params['label_dir'] volumes_txt_file = eval_params['volumes_txt_file'] remap_config = eval_params['remap_config'] device = common_params['device'] log_dir = common_params['log_dir'] exp_dir = common_params['exp_dir'] exp_name = train_params['exp_name'] save_predictions_dir = eval_params['save_predictions_dir'] prediction_path = os.path.join(exp_dir, exp_name, save_predictions_dir) orientation = eval_params['orientation'] data_id = eval_params['data_id'] logWriter = LogWriter(num_classes, log_dir, exp_name, labels=labels) avg_dice_score, class_dist = eu.evaluate_dice_score(eval_model_path, num_classes, data_dir, label_dir, volumes_txt_file, remap_config, orientation, prediction_path, data_id, device, logWriter) logWriter.close()
def __init__(self, model, exp_name, device, num_class, optim=torch.optim.SGD, optim_args={}, loss_func=losses.DiceLoss(), model_name='OneShotSegmentor', labels=None, num_epochs=10, log_nth=5, lr_scheduler_step_size=5, lr_scheduler_gamma=0.5, use_last_checkpoint=True, exp_dir='experiments', log_dir='logs'): self.device = device self.model = model self.model_name = model_name self.labels = labels self.num_epochs = num_epochs if torch.cuda.is_available(): self.loss_func = loss_func.cuda(device) else: self.loss_func = loss_func self.optim_c = optim( [{'params': model.conditioner.parameters(), 'lr': 1e-3, 'momentum': 0.99, 'weight_decay': 0.0001} ], **optim_args) self.optim_s = optim( [{'params': model.segmentor.parameters(), 'lr': 1e-3, 'momentum': 0.99, 'weight_decay': 0.0001} ], **optim_args) self.scheduler_s = lr_scheduler.StepLR(self.optim_s, step_size=10, gamma=0.1) self.scheduler_c = lr_scheduler.StepLR(self.optim_c, step_size=10, gamma=0.001) exp_dir_path = os.path.join(exp_dir, exp_name) common_utils.create_if_not(exp_dir_path) common_utils.create_if_not(os.path.join(exp_dir_path, CHECKPOINT_DIR)) self.exp_dir_path = exp_dir_path self.log_nth = log_nth self.logWriter = LogWriter( num_class, log_dir, exp_name, use_last_checkpoint, labels) self.use_last_checkpoint = use_last_checkpoint self.start_epoch = 1 self.start_iteration = 1 self.best_ds_mean = 0 self.best_ds_mean_epoch = 0 if use_last_checkpoint: self.load_checkpoint()
def __init__(self, model, exp_name, device, num_class, optim=torch.optim.SGD, optim_args={}, loss_func=additional_losses.CombinedLoss(), model_name='quicknat', labels=None, num_epochs=10, log_nth=5, lr_scheduler_step_size=5, lr_scheduler_gamma=0.5, use_last_checkpoint=True, exp_dir='experiments', log_dir='logs', arch_file_path=None): self.device = device self.model = model # self.swa_model = torch.optim.swa_utils.AveragedModel(self.model) self.model_name = model_name self.labels = labels self.num_epochs = num_epochs if torch.cuda.is_available(): self.loss_func = loss_func.cuda(device) else: self.loss_func = loss_func self.optim = optim(model.parameters(), **optim_args) # self.scheduler = lr_scheduler.StepLR(self.optim, step_size=lr_scheduler_step_size, # gamma=lr_scheduler_gamma) self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self.optim, T_max=100) # self.swa_start = -1 #int(np.round(self.num_epochs*0.75)) # print(self.swa_start) # self.swa_scheduler = torch.optim.swa_utils.SWALR(self.optim, swa_lr=0.05) exp_dir_path = os.path.join(exp_dir, exp_name) common_utils.create_if_not(exp_dir_path) common_utils.create_if_not(os.path.join(exp_dir_path, CHECKPOINT_DIR)) self.exp_dir_path = exp_dir_path self.save_architectural_files(arch_file_path) self.log_nth = log_nth self.logWriter = LogWriter(num_class, log_dir, exp_name, use_last_checkpoint, labels) # self.wandb = wandb self.use_last_checkpoint = use_last_checkpoint self.start_epoch = 1 self.start_iteration = 1 self.best_ds_mean = 0 self.best_ds_mean_epoch = 0 if use_last_checkpoint: self.load_checkpoint() print(self.best_ds_mean, self.best_ds_mean_epoch, self.start_epoch)
def evaluate(eval_params, net_params, data_params, common_params, train_params): eval_model_path = eval_params['eval_model_path'] num_classes = net_params['num_class'] labels = data_params['labels'] data_dir = eval_params['data_dir'] label_dir = eval_params['label_dir'] volumes_txt_file = eval_params['volumes_txt_file'] device = common_params['device'] log_dir = common_params['log_dir'] exp_dir = common_params['exp_dir'] exp_name = train_params['exp_name'] save_predictions_dir = eval_params['save_predictions_dir'] prediction_path = os.path.join(exp_dir, exp_name, save_predictions_dir) orientation = eval_params['orientation'] multi_channel = data_params['use_3channel'] use_2channel = data_params['use_2channel'] thick_channel = data_params['thick_channel'] logWriter = LogWriter(num_classes, log_dir, exp_name, labels=labels) avg_dice_score, class_dist = evaluate_dice_score(eval_model_path, num_classes, data_dir, label_dir, volumes_txt_file, orientation, prediction_path, device, logWriter, multi_channel=multi_channel, use_2channel=use_2channel, thick_ch=thick_channel) logWriter.close()
def __init__(self, model, exp_name, device, num_class, optim=torch.optim.SGD, optim_args={}, loss_func=losses.CombinedLoss(), model_name='segmentor', labels=None, num_epochs=10, log_nth=5, lr_scheduler_step_size=5, lr_scheduler_gamma=0.5, use_last_checkpoint=True, exp_dir='experiments', log_dir='logs'): self.device = device self.model = model self.model_name = model_name self.labels = labels self.num_epochs = num_epochs if torch.cuda.is_available(): self.loss_func = loss_func.cuda(device) else: self.loss_func = loss_func self.optim = optim(model.parameters(), **optim_args) self.scheduler = lr_scheduler.StepLR(self.optim, step_size=lr_scheduler_step_size, gamma=lr_scheduler_gamma) exp_dir_path = os.path.join(exp_dir, exp_name) common_utils.create_if_not(exp_dir_path) common_utils.create_if_not(os.path.join(exp_dir_path, CHECKPOINT_DIR)) self.exp_dir_path = exp_dir_path self.log_nth = log_nth self.logWriter = LogWriter(num_class, log_dir, exp_name, use_last_checkpoint, labels) self.use_last_checkpoint = use_last_checkpoint self.start_epoch = 1 self.start_iteration = 1 self.best_ds_mean = 0 self.best_ds_mean_epoch = 0 if use_last_checkpoint: self.load_checkpoint()
def evaluate(eval_params, net_params, data_params, common_params, train_params): eval_model_path = eval_params['eval_model_path'] num_classes = net_params['num_class'] labels = data_params['labels'] data_dir = eval_params['data_dir'] query_txt_file = eval_params['query_txt_file'] support_txt_file = eval_params['support_txt_file'] remap_config = eval_params['remap_config'] device = common_params['device'] log_dir = common_params['log_dir'] exp_dir = common_params['exp_dir'] exp_name = train_params['exp_name'] save_predictions_dir = eval_params['save_predictions_dir'] orientation = eval_params['orientation'] logWriter = LogWriter(num_classes, log_dir, exp_name, labels=labels) folds = ['fold1', 'fold2', 'fold3', 'fold4'] for fold in folds: prediction_path = os.path.join(exp_dir, exp_name) prediction_path = prediction_path + "_" + fold prediction_path = os.path.join(prediction_path, save_predictions_dir) eval_model_path = os.path.join( 'saved_models', eval_model_path + '_' + fold + '.pth.tar') query_labels = get_lab_list('val', fold) num_classes = len(fold) avg_dice_score = eu.evaluate_dice_score(eval_model_path, num_classes, query_labels, data_dir, query_txt_file, support_txt_file, remap_config, orientation, prediction_path, device, logWriter, fold=fold) logWriter.log(avg_dice_score) logWriter.close()
def evaluate(eval_params, net_params, data_params, common_params, train_params): eval_model_path = eval_params['eval_model_path'] num_classes = net_params['num_class'] labels = data_params['labels'] data_dir = eval_params['data_dir'] label_dir = eval_params['label_dir'] query_txt_file = eval_params['query_txt_file'] support_txt_file = eval_params['support_txt_file'] remap_config = eval_params['remap_config'] device = common_params['device'] log_dir = common_params['log_dir'] exp_dir = common_params['exp_dir'] exp_name = train_params['exp_name'] save_predictions_dir = eval_params['save_predictions_dir'] prediction_path = os.path.join(exp_dir, exp_name, save_predictions_dir) orientation = eval_params['orientation'] logWriter = LogWriter(num_classes, log_dir, exp_name, labels=labels) # model_name = 'model6_Dice_L2_loss_target_fold1.pth.tar' folds = ['fold4'] # eval_model_path1 = "saved_models/sne_position_all_type_spatial_fold1.pth.tar" eval_model_path1 = "finetuned_segmentor" eval_model_path2 = "saved_models/model6_coronal_wholebody_condch16_e4Skip_inter_e3e4bnd4d3_ch_e1e2d1d2_noSseSeg_DiceLoss_lowrate_fold2.pth.tar" # eval_model_path3 = "saved_models/model6_sagittal_fold1.pth.tar" orientaion1 = 'AXI' orientaion2 = 'COR' for fold in folds: eval_model_path = os.path.join( 'saved_models', eval_model_path1 + '_' + fold + '.pth.tar') query_labels = _get_lab_list('val', fold) num_classes = len(fold) avg_dice_score = eu.evaluate_dice_score(eval_model_path, num_classes, query_labels, data_dir, query_txt_file, support_txt_file, remap_config, orientaion1, prediction_path, device, logWriter, fold=fold) # avg_dice_score = eu.evaluate_dice_score_3view(eval_model_path1, # eval_model_path2, # eval_model_path3, # num_classes, # query_labels, # data_dir, # query_txt_file, # support_txt_file, # remap_config, # orientaion1, # prediction_path, # device, # logWriter, fold=fold) # avg_dice_score = eu.evaluate_dice_score_2view(eval_model_path1, # eval_model_path2, # num_classes, # query_labels, # data_dir, # query_txt_file, # support_txt_file, # remap_config, # orientaion1, # prediction_path, # device, # logWriter, fold=fold) logWriter.log(avg_dice_score) logWriter.close()