def run_command(context, n_gif=0, thr_increment=None, resume_training=False): """Run main command. This function is central in the ivadomed project as training / testing / evaluation commands are run via this function. All the process parameters are defined in the config. Args: context (dict): Dictionary containing all parameters that are needed for a given process. See :doc:`configuration_file` for more details. n_gif (int): Generates a GIF during training if larger than zero, one frame per epoch for a given slice. The parameter indicates the number of 2D slices used to generate GIFs, one GIF per slice. A GIF shows predictions of a given slice from the validation sub-dataset. They are saved within the log directory. thr_increment (float): A threshold analysis is performed at the end of the training using the trained model and the training + validation sub-dataset to find the optimal binarization threshold. The specified value indicates the increment between 0 and 1 used during the ROC analysis (e.g. 0.1). resume_training (bool): Load a saved model ("checkpoint.pth.tar" in the log_directory) for resume training. This training state is saved everytime a new best model is saved in the log directory. Returns: Float or pandas Dataframe: If "train" command: Returns floats: best loss score for both training and validation. If "test" command: Returns a pandas Dataframe: of metrics computed for each subject of the testing sub dataset and return the prediction metrics before evaluation. If "segment" command: No return value. """ command = copy.deepcopy(context["command"]) log_directory = copy.deepcopy(context["log_directory"]) if not os.path.isdir(log_directory): print('Creating log directory: {}'.format(log_directory)) os.makedirs(log_directory) else: print('Log directory already exists: {}'.format(log_directory)) # Define device cuda_available, device = imed_utils.define_device(context['gpu']) # Get subject lists train_lst, valid_lst, test_lst = imed_loader_utils.get_subdatasets_subjects_list( context["split_dataset"], context['loader_parameters']['bids_path'], log_directory) # Loader params loader_params = copy.deepcopy(context["loader_parameters"]) if command == "train": loader_params["contrast_params"]["contrast_lst"] = loader_params[ "contrast_params"]["training_validation"] else: loader_params["contrast_params"]["contrast_lst"] = loader_params[ "contrast_params"]["testing"] if "FiLMedUnet" in context and context["FiLMedUnet"]["applied"]: loader_params.update( {"metadata_type": context["FiLMedUnet"]["metadata"]}) # Get transforms for each subdataset transform_train_params, transform_valid_params, transform_test_params = \ imed_transforms.get_subdatasets_transforms(context["transformation"]) # MODEL PARAMETERS model_params = copy.deepcopy(context["default_model"]) model_params["folder_name"] = copy.deepcopy(context["model_name"]) model_context_list = [ model_name for model_name in MODEL_LIST if model_name in context and context[model_name]["applied"] ] if len(model_context_list) == 1: model_params["name"] = model_context_list[0] model_params.update(context[model_context_list[0]]) elif 'Modified3DUNet' in model_context_list and 'FiLMedUnet' in model_context_list and len( model_context_list) == 2: model_params["name"] = 'Modified3DUNet' for i in range(len(model_context_list)): model_params.update(context[model_context_list[i]]) elif len(model_context_list) > 1: print( 'ERROR: Several models are selected in the configuration file: {}.' 'Please select only one (i.e. only one where: "applied": true).'. format(model_context_list)) exit() model_params['is_2d'] = False if "Modified3DUNet" in model_params[ 'name'] else model_params['is_2d'] # Get in_channel from contrast_lst if loader_params["multichannel"]: model_params["in_channel"] = len( loader_params["contrast_params"]["contrast_lst"]) else: model_params["in_channel"] = 1 # Get out_channel from target_suffix model_params["out_channel"] = len(loader_params["target_suffix"]) # If multi-class output, then add background class if model_params["out_channel"] > 1: model_params.update({"out_channel": model_params["out_channel"] + 1}) # Display for spec' check imed_utils.display_selected_model_spec(params=model_params) # Update loader params if 'object_detection_params' in context: object_detection_params = context['object_detection_params'] object_detection_params.update({ "gpu": context['gpu'], "log_directory": context['log_directory'] }) loader_params.update( {"object_detection_params": object_detection_params}) loader_params.update({"model_params": model_params}) # TESTING PARAMS # Aleatoric uncertainty if context['uncertainty'][ 'aleatoric'] and context['uncertainty']['n_it'] > 0: transformation_dict = transform_train_params else: transformation_dict = transform_test_params undo_transforms = imed_transforms.UndoCompose( imed_transforms.Compose(transformation_dict, requires_undo=True)) testing_params = copy.deepcopy(context["training_parameters"]) testing_params.update({'uncertainty': context["uncertainty"]}) testing_params.update({ 'target_suffix': loader_params["target_suffix"], 'undo_transforms': undo_transforms, 'slice_axis': loader_params['slice_axis'] }) if command == "train": imed_utils.display_selected_transfoms(transform_train_params, dataset_type=["training"]) imed_utils.display_selected_transfoms(transform_valid_params, dataset_type=["validation"]) elif command == "test": imed_utils.display_selected_transfoms(transformation_dict, dataset_type=["testing"]) if command == 'train': # LOAD DATASET # Get Validation dataset ds_valid = imed_loader.load_dataset(**{ **loader_params, **{ 'data_list': valid_lst, 'transforms_params': transform_valid_params, 'dataset_type': 'validation' } }, device=device, cuda_available=cuda_available) # Get Training dataset ds_train = imed_loader.load_dataset(**{ **loader_params, **{ 'data_list': train_lst, 'transforms_params': transform_train_params, 'dataset_type': 'training' } }, device=device, cuda_available=cuda_available) metric_fns = imed_metrics.get_metric_fns(ds_train.task) # If FiLM, normalize data if 'film_layers' in model_params and any(model_params['film_layers']): # Normalize metadata before sending to the FiLM network results = imed_film.get_film_metadata_models( ds_train=ds_train, metadata_type=model_params['metadata'], debugging=context["debugging"]) ds_train, train_onehotencoder, metadata_clustering_models = results ds_valid = imed_film.normalize_metadata( ds_valid, metadata_clustering_models, context["debugging"], model_params['metadata']) model_params.update({ "film_onehotencoder": train_onehotencoder, "n_metadata": len([ll for l in train_onehotencoder.categories_ for ll in l]) }) joblib.dump(metadata_clustering_models, "./" + log_directory + "/clustering_models.joblib") joblib.dump(train_onehotencoder, "./" + log_directory + "/one_hot_encoder.joblib") # Model directory path_model = os.path.join(log_directory, context["model_name"]) if not os.path.isdir(path_model): print('Creating model directory: {}'.format(path_model)) os.makedirs(path_model) if 'film_layers' in model_params and any( model_params['film_layers']): joblib.dump(train_onehotencoder, os.path.join(path_model, "one_hot_encoder.joblib")) if 'metadata_dict' in ds_train[0]['input_metadata'][0]: metadata_dict = ds_train[0]['input_metadata'][0][ 'metadata_dict'] joblib.dump( metadata_dict, os.path.join(path_model, "metadata_dict.joblib")) else: print('Model directory already exists: {}'.format(path_model)) # RUN TRAINING best_training_dice, best_training_loss, best_validation_dice, best_validation_loss = imed_training.train( model_params=model_params, dataset_train=ds_train, dataset_val=ds_valid, training_params=context["training_parameters"], log_directory=log_directory, device=device, cuda_available=cuda_available, metric_fns=metric_fns, n_gif=n_gif, resume_training=resume_training, debugging=context["debugging"]) if thr_increment: # LOAD DATASET if command != 'train': # If command == train, then ds_valid already load # Get Validation dataset ds_valid = imed_loader.load_dataset(**{ **loader_params, **{ 'data_list': valid_lst, 'transforms_params': transform_valid_params, 'dataset_type': 'validation' } }, device=device, cuda_available=cuda_available) # Get Training dataset with no Data Augmentation ds_train = imed_loader.load_dataset(**{ **loader_params, **{ 'data_list': train_lst, 'transforms_params': transform_valid_params, 'dataset_type': 'training' } }, device=device, cuda_available=cuda_available) # Choice of optimisation metric metric = "recall_specificity" if model_params[ "name"] in imed_utils.CLASSIFIER_LIST else "dice" # Model path model_path = os.path.join(log_directory, "best_model.pt") # Run analysis thr = imed_testing.threshold_analysis(model_path=model_path, ds_lst=[ds_train, ds_valid], model_params=model_params, testing_params=testing_params, metric=metric, increment=thr_increment, fname_out=os.path.join( log_directory, "roc.png"), cuda_available=cuda_available) # Update threshold in config file context["postprocessing"]["binarize_prediction"] = {"thr": thr} if command == 'train': # Save config file within log_directory and log_directory/model_name # Done after the threshold_analysis to propate this info in the config files with open(os.path.join(log_directory, "config_file.json"), 'w') as fp: json.dump(context, fp, indent=4) with open( os.path.join(log_directory, context["model_name"], context["model_name"] + ".json"), 'w') as fp: json.dump(context, fp, indent=4) return best_training_dice, best_training_loss, best_validation_dice, best_validation_loss if command == 'test': # LOAD DATASET ds_test = imed_loader.load_dataset(**{ **loader_params, **{ 'data_list': test_lst, 'transforms_params': transformation_dict, 'dataset_type': 'testing', 'requires_undo': True } }, device=device, cuda_available=cuda_available) metric_fns = imed_metrics.get_metric_fns(ds_test.task) if 'film_layers' in model_params and any(model_params['film_layers']): clustering_path = os.path.join(log_directory, "clustering_models.joblib") metadata_clustering_models = joblib.load(clustering_path) ohe_path = os.path.join(log_directory, "one_hot_encoder.joblib") one_hot_encoder = joblib.load(ohe_path) ds_test = imed_film.normalize_metadata(ds_test, metadata_clustering_models, context["debugging"], model_params['metadata']) model_params.update({ "film_onehotencoder": one_hot_encoder, "n_metadata": len([ll for l in one_hot_encoder.categories_ for ll in l]) }) # RUN INFERENCE pred_metrics = imed_testing.test( model_params=model_params, dataset_test=ds_test, testing_params=testing_params, log_directory=log_directory, device=device, cuda_available=cuda_available, metric_fns=metric_fns, postprocessing=context['postprocessing']) # RUN EVALUATION df_results = imed_evaluation.evaluate( bids_path=loader_params['bids_path'], log_directory=log_directory, target_suffix=loader_params["target_suffix"], eval_params=context["evaluation_parameters"]) return df_results, pred_metrics if command == 'segment': bids_ds = bids.BIDS(context["loader_parameters"]["bids_path"]) df = bids_ds.participants.content subj_lst = df['participant_id'].tolist() bids_subjects = [ s for s in bids_ds.get_subjects() if s.record["subject_id"] in subj_lst ] # Add postprocessing to packaged model path_model = os.path.join(context['log_directory'], context['model_name']) path_model_config = os.path.join(path_model, context['model_name'] + ".json") model_config = imed_config_manager.load_json(path_model_config) model_config['postprocessing'] = context['postprocessing'] with open(path_model_config, 'w') as fp: json.dump(model_config, fp, indent=4) options = None for subject in bids_subjects: fname_img = subject.record["absolute_path"] if 'film_layers' in model_params and any( model_params['film_layers']) and model_params['metadata']: subj_id = subject.record['subject_id'] metadata = df[df['participant_id'] == subj_id][ model_params['metadata']].values[0] options = {'metadata': metadata} pred = imed_inference.segment_volume(path_model, fname_image=fname_img, gpu_number=context['gpu'], options=options) pred_path = os.path.join(context['log_directory'], "pred_masks") if not os.path.exists(pred_path): os.makedirs(pred_path) filename = subject.record['subject_id'] + "_" + subject.record[ 'modality'] + "_pred" + ".nii.gz" nib.save(pred, os.path.join(pred_path, filename))
def run_command(context, n_gif=0, thr_increment=None, resume_training=False): """Run main command. This function is central in the ivadomed project as training / testing / evaluation commands are run via this function. All the process parameters are defined in the config. Args: context (dict): Dictionary containing all parameters that are needed for a given process. See :doc:`configuration_file` for more details. n_gif (int): Generates a GIF during training if larger than zero, one frame per epoch for a given slice. The parameter indicates the number of 2D slices used to generate GIFs, one GIF per slice. A GIF shows predictions of a given slice from the validation sub-dataset. They are saved within the output path. thr_increment (float): A threshold analysis is performed at the end of the training using the trained model and the training + validation sub-dataset to find the optimal binarization threshold. The specified value indicates the increment between 0 and 1 used during the ROC analysis (e.g. 0.1). resume_training (bool): Load a saved model ("checkpoint.pth.tar" in the output directory specified with flag "--path-output" or via the config file "output_path" ' This training state is saved everytime a new best model is saved in the log argument) for resume training directory. Returns: float or pandas.DataFrame or None: * If "train" command: Returns floats: best loss score for both training and validation. * If "test" command: Returns a pandas Dataframe: of metrics computed for each subject of the testing sub-dataset and return the prediction metrics before evaluation. * If "segment" command: No return value. """ command = copy.deepcopy(context["command"]) path_output = set_output_path(context) # Create a log with the version of the Ivadomed software and the version of the Annexed dataset (if present) create_dataset_and_ivadomed_version_log(context) cuda_available, device = imed_utils.define_device(context['gpu_ids'][0]) # BACKWARDS COMPATIBILITY: If bids_path is string, assign to list - Do this here so it propagates to all functions context['loader_parameters']['path_data'] = imed_utils.format_path_data( context['loader_parameters']['path_data']) # Loader params loader_params = set_loader_params(context, command == "train") # Get transforms for each subdataset transform_train_params, transform_valid_params, transform_test_params = \ imed_transforms.get_subdatasets_transforms(context["transformation"]) # MODEL PARAMETERS model_params, loader_params = set_model_params(context, loader_params) if command == 'segment': run_segment_command(context, model_params) return # Get subject lists. "segment" command uses all participants of data path, hence no need to split train_lst, valid_lst, test_lst = imed_loader_utils.get_subdatasets_subjects_list( context["split_dataset"], context['loader_parameters']['path_data'], path_output, context["loader_parameters"]['subject_selection']) # TESTING PARAMS # Aleatoric uncertainty if context['uncertainty'][ 'aleatoric'] and context['uncertainty']['n_it'] > 0: transformation_dict = transform_train_params else: transformation_dict = transform_test_params undo_transforms = imed_transforms.UndoCompose( imed_transforms.Compose(transformation_dict, requires_undo=True)) testing_params = copy.deepcopy(context["training_parameters"]) testing_params.update({'uncertainty': context["uncertainty"]}) testing_params.update({ 'target_suffix': loader_params["target_suffix"], 'undo_transforms': undo_transforms, 'slice_axis': loader_params['slice_axis'] }) if command == "train": imed_utils.display_selected_transfoms(transform_train_params, dataset_type=["training"]) imed_utils.display_selected_transfoms(transform_valid_params, dataset_type=["validation"]) elif command == "test": imed_utils.display_selected_transfoms(transformation_dict, dataset_type=["testing"]) # Check if multiple raters check_multiple_raters(command != "train", loader_params) if command == 'train': # Get Validation dataset ds_valid = get_dataset(loader_params, valid_lst, transform_valid_params, cuda_available, device, 'validation') # Get Training dataset ds_train = get_dataset(loader_params, train_lst, transform_train_params, cuda_available, device, 'training') metric_fns = imed_metrics.get_metric_fns(ds_train.task) # If FiLM, normalize data if 'film_layers' in model_params and any(model_params['film_layers']): model_params, ds_train, ds_valid, train_onehotencoder = \ film_normalize_data(context, model_params, ds_train, ds_valid, path_output) else: train_onehotencoder = None # Model directory create_path_model(context, model_params, ds_train, path_output, train_onehotencoder) save_config_file(context, path_output) # RUN TRAINING best_training_dice, best_training_loss, best_validation_dice, best_validation_loss = imed_training.train( model_params=model_params, dataset_train=ds_train, dataset_val=ds_valid, training_params=context["training_parameters"], path_output=path_output, device=device, cuda_available=cuda_available, metric_fns=metric_fns, n_gif=n_gif, resume_training=resume_training, debugging=context["debugging"]) if thr_increment: # LOAD DATASET if command != 'train': # If command == train, then ds_valid already load # Get Validation dataset ds_valid = get_dataset(loader_params, valid_lst, transform_valid_params, cuda_available, device, 'validation') # Get Training dataset with no Data Augmentation ds_train = get_dataset(loader_params, train_lst, transform_valid_params, cuda_available, device, 'training') # Choice of optimisation metric metric = "recall_specificity" if model_params[ "name"] in imed_utils.CLASSIFIER_LIST else "dice" # Model path model_path = os.path.join(path_output, "best_model.pt") # Run analysis thr = imed_testing.threshold_analysis(model_path=model_path, ds_lst=[ds_train, ds_valid], model_params=model_params, testing_params=testing_params, metric=metric, increment=thr_increment, fname_out=os.path.join( path_output, "roc.png"), cuda_available=cuda_available) # Update threshold in config file context["postprocessing"]["binarize_prediction"] = {"thr": thr} save_config_file(context, path_output) if command == 'train': return best_training_dice, best_training_loss, best_validation_dice, best_validation_loss if command == 'test': # LOAD DATASET ds_test = imed_loader.load_dataset(**{ **loader_params, **{ 'data_list': test_lst, 'transforms_params': transformation_dict, 'dataset_type': 'testing', 'requires_undo': True } }, device=device, cuda_available=cuda_available) metric_fns = imed_metrics.get_metric_fns(ds_test.task) if 'film_layers' in model_params and any(model_params['film_layers']): ds_test, model_params = update_film_model_params( context, ds_test, model_params, path_output) # RUN INFERENCE pred_metrics = imed_testing.test( model_params=model_params, dataset_test=ds_test, testing_params=testing_params, path_output=path_output, device=device, cuda_available=cuda_available, metric_fns=metric_fns, postprocessing=context['postprocessing']) # RUN EVALUATION df_results = imed_evaluation.evaluate( path_data=loader_params['path_data'], path_output=path_output, target_suffix=loader_params["target_suffix"], eval_params=context["evaluation_parameters"]) return df_results, pred_metrics
def test_inference(transforms_dict, test_lst, target_lst, roi_params, testing_params): cuda_available, device = imed_utils.define_device(GPU_ID) model_params = {"name": "Unet", "is_2d": True} loader_params = { "transforms_params": transforms_dict, "data_list": test_lst, "dataset_type": "testing", "requires_undo": True, "contrast_params": {"contrast_lst": ['T2w'], "balance": {}}, "path_data": [__data_testing_dir__], "target_suffix": target_lst, "roi_params": roi_params, "slice_filter_params": { "filter_empty_mask": False, "filter_empty_input": True }, "slice_axis": SLICE_AXIS, "multichannel": False } loader_params.update({"model_params": model_params}) # Get Testing dataset ds_test = imed_loader.load_dataset(**loader_params) test_loader = DataLoader(ds_test, batch_size=BATCH_SIZE, shuffle=False, pin_memory=True, collate_fn=imed_loader_utils.imed_collate, num_workers=0) # Undo transform val_undo_transform = imed_transforms.UndoCompose(imed_transforms.Compose(transforms_dict)) # Update testing_params testing_params.update({ "slice_axis": loader_params["slice_axis"], "target_suffix": loader_params["target_suffix"], "undo_transforms": val_undo_transform }) # Model model = imed_models.Unet() if cuda_available: model.cuda() model.eval() metric_fns = [imed_metrics.dice_score, imed_metrics.hausdorff_score, imed_metrics.precision_score, imed_metrics.recall_score, imed_metrics.specificity_score, imed_metrics.intersection_over_union, imed_metrics.accuracy_score] metric_mgr = imed_metrics.MetricManager(metric_fns) if not os.path.isdir(__output_dir__): os.makedirs(__output_dir__) preds_npy, gt_npy = imed_testing.run_inference(test_loader=test_loader, model=model, model_params=model_params, testing_params=testing_params, ofolder=__output_dir__, cuda_available=cuda_available) metric_mgr(preds_npy, gt_npy) metrics_dict = metric_mgr.get_results() metric_mgr.reset() print(metrics_dict)
def test_inference_target_suffix(download_data_testing_test_files, transforms_dict, test_lst, target_lst, roi_params, testing_params): """ This test checks if the filename(s) of the prediction(s) saved as NifTI file(s) in the pred_masks dir conform to the target_suffix or not. Thus, independent of underscore(s) in the target_suffix. As a result, _seg-axon-manual or _seg-axon_manual should yield the same filename(s). (c.f: https://github.com/ivadomed/ivadomed/issues/1135) """ cuda_available, device = imed_utils.define_device(GPU_ID) model_params = {"name": "Unet", "is_2d": True, "out_channel": 3} loader_params = { "transforms_params": transforms_dict, "data_list": test_lst, "dataset_type": "testing", "requires_undo": True, "contrast_params": {"contrast_lst": ['SEM'], "balance": {}}, "path_data": [str(Path(__data_testing_dir__, "microscopy_png"))], "bids_config": f"{path_repo_root}/ivadomed/config/config_bids.json", "target_suffix": target_lst, "extensions": [".png"], "roi_params": roi_params, "slice_filter_params": {"filter_empty_mask": False, "filter_empty_input": True}, "patch_filter_params": {"filter_empty_mask": False, "filter_empty_input": False}, "slice_axis": SLICE_AXIS, "multichannel": False } loader_params.update({"model_params": model_params}) # restructuring the dataset gt_path = f'{loader_params["path_data"][0]}/derivatives/labels/' for file_path in Path(gt_path).rglob('*.png'): src_filename = file_path.resolve() dst_filename = '_'.join(str(src_filename).rsplit('-', 1)) src_filename.rename(Path(dst_filename)) bids_df = BidsDataframe(loader_params, __tmp_dir__, derivatives=True) ds_test = imed_loader.load_dataset(bids_df, **loader_params) test_loader = DataLoader(ds_test, batch_size=BATCH_SIZE, shuffle=False, pin_memory=True, collate_fn=imed_loader_utils.imed_collate, num_workers=0) # Undo transform val_undo_transform = imed_transforms.UndoCompose(imed_transforms.Compose(transforms_dict)) # Update testing_params testing_params.update({ "slice_axis": loader_params["slice_axis"], "target_suffix": loader_params["target_suffix"], "undo_transforms": val_undo_transform }) # Model model = imed_models.Unet(out_channel=model_params['out_channel']) if cuda_available: model.cuda() model.eval() if not __output_dir__.is_dir(): __output_dir__.mkdir(parents=True, exist_ok=True) preds_npy, gt_npy = imed_testing.run_inference(test_loader=test_loader, model=model, model_params=model_params, testing_params=testing_params, ofolder=str(__output_dir__), cuda_available=cuda_available) for x in __output_dir__.iterdir(): if x.name.endswith('_pred.nii.gz'): assert x.name.rsplit('_', 1)[0].endswith(loader_params['contrast_params']['contrast_lst'][-1]), ( 'Incompatible filename(s) of the prediction(s) saved as NifTI file(s)!' )
def test_inference_2d_microscopy(download_data_testing_test_files, transforms_dict, test_lst, target_lst, roi_params, testing_params): """ This test checks if the number of NifTI predictions equals the number of test subjects on 2d microscopy data. Used to catch a bug where the last slice of the last volume wasn't appended to the prediction (see: https://github.com/ivadomed/ivadomed/issues/823) Also tests the conversions to PNG predictions when source files are not Nifti and checks if the number of PNG predictions is 2x the number of test subjects (2-class model, outputs 1 PNG per class per subject). """ cuda_available, device = imed_utils.define_device(GPU_ID) model_params = {"name": "Unet", "is_2d": True, "out_channel": 3} loader_params = { "transforms_params": transforms_dict, "data_list": test_lst, "dataset_type": "testing", "requires_undo": True, "contrast_params": {"contrast_lst": ['SEM'], "balance": {}}, "path_data": [str(Path(__data_testing_dir__, "microscopy_png"))], "bids_config": f"{path_repo_root}/ivadomed/config/config_bids.json", "target_suffix": target_lst, "extensions": [".png"], "roi_params": roi_params, "slice_filter_params": {"filter_empty_mask": False, "filter_empty_input": True}, "patch_filter_params": {"filter_empty_mask": False, "filter_empty_input": False}, "slice_axis": SLICE_AXIS, "multichannel": False } loader_params.update({"model_params": model_params}) bids_df = BidsDataframe(loader_params, __tmp_dir__, derivatives=True) # Get Testing dataset ds_test = imed_loader.load_dataset(bids_df, **loader_params) test_loader = DataLoader(ds_test, batch_size=BATCH_SIZE, shuffle=False, pin_memory=True, collate_fn=imed_loader_utils.imed_collate, num_workers=0) # Undo transform val_undo_transform = imed_transforms.UndoCompose(imed_transforms.Compose(transforms_dict)) # Update testing_params testing_params.update({ "slice_axis": loader_params["slice_axis"], "target_suffix": loader_params["target_suffix"], "undo_transforms": val_undo_transform }) # Model model = imed_models.Unet(out_channel=model_params['out_channel']) if cuda_available: model.cuda() model.eval() if not __output_dir__.is_dir(): __output_dir__.mkdir(parents=True, exist_ok=True) preds_npy, gt_npy = imed_testing.run_inference(test_loader=test_loader, model=model, model_params=model_params, testing_params=testing_params, ofolder=str(__output_dir__), cuda_available=cuda_available) assert len([x for x in __output_dir__.iterdir() if x.name.endswith(".nii.gz")]) == len(test_lst) assert len([x for x in __output_dir__.iterdir() if x.name.endswith(".png")]) == 2*len(test_lst)
# Update loader params if 'object_detection_params' in context: object_detection_params = context['object_detection_params'] object_detection_params.update({ "gpu_ids": context['gpu_ids'][0], "path_output": context['path_output'] }) loader_params.update({"object_detection_params": object_detection_params}) loader_params.update({"model_params": model_params}) # TESTING PARAMS # Aleatoric uncertainty if context['uncertainty']['aleatoric'] and context['uncertainty']['n_it'] > 0: transformation_dict = transform_train_params else: transformation_dict = transform_test_params undo_transforms = imed_transforms.UndoCompose( imed_transforms.Compose(transformation_dict, requires_undo=True)) testing_params = copy.deepcopy(context["training_parameters"]) testing_params.update({'uncertainty': context["uncertainty"]}) testing_params.update({ 'target_suffix': loader_params["target_suffix"], 'undo_transforms': undo_transforms, 'slice_axis': loader_params['slice_axis'] }) if command == "train": imed_utils.display_selected_transfoms(transform_train_params, dataset_type=["training"]) imed_utils.display_selected_transfoms(transform_valid_params, dataset_type=["validation"]) elif command == "test": imed_utils.display_selected_transfoms(transformation_dict, dataset_type=["testing"])