def adjust_transforms(transforms, seg_pair, length=None, stride=None): """This function adapts the transforms by adding the BoundingBoxCrop transform according the specific parameters of an image. The dimensions of the crop are also adapted to fit the length and stride parameters if the 3D loader is used. Args: transforms (Compose): Prepreocessing transforms. seg_pair (dict): Segmentation pair (input, gt and metadata). length (list or tuple): Patch size of the 3D loader. stride (list or tuple): Stride value of the 3D loader. Returns: Compose: Modified transforms. """ resample_idx = [-1, -1, -1] if transforms is None: transforms = imed_transforms.Compose({}) for i, img_type in enumerate(transforms.transform): for idx, transfo in enumerate( transforms.transform[img_type].transforms): if "BoundingBoxCrop" == transfo.__class__.__name__: transforms.transform[img_type].transforms.pop(idx) if "Resample" == transfo.__class__.__name__: resample_idx[i] = idx resample_bounding_box(seg_pair, transforms) index_shape = [] for i, img_type in enumerate(transforms.transform): x_min, x_max, y_min, y_max, z_min, z_max = seg_pair['input_metadata'][ 0]['bounding_box'] size = [x_max - x_min, y_max - y_min, z_max - z_min] if length is not None and stride is not None: for idx, dim in enumerate(size): if dim < length[idx]: size[idx] = length[idx] # Adjust size according to stride to avoid dimension mismatch size = resize_to_multiple(size, stride, length) index_shape.append(tuple(size)) transform_obj = imed_transforms.BoundingBoxCrop(size=size) transforms.transform[img_type].transforms.insert( resample_idx[i] + 1, transform_obj) for metadata in seg_pair['input_metadata']: assert len(set(index_shape)) == 1 metadata['index_shape'] = index_shape[0] return transforms
def run_command(context, n_gif=0, thr_increment=None, resume_training=False): """Run main command. This function is central in the ivadomed project as training / testing / evaluation commands are run via this function. All the process parameters are defined in the config. Args: context (dict): Dictionary containing all parameters that are needed for a given process. See :doc:`configuration_file` for more details. n_gif (int): Generates a GIF during training if larger than zero, one frame per epoch for a given slice. The parameter indicates the number of 2D slices used to generate GIFs, one GIF per slice. A GIF shows predictions of a given slice from the validation sub-dataset. They are saved within the log directory. thr_increment (float): A threshold analysis is performed at the end of the training using the trained model and the training + validation sub-dataset to find the optimal binarization threshold. The specified value indicates the increment between 0 and 1 used during the ROC analysis (e.g. 0.1). resume_training (bool): Load a saved model ("checkpoint.pth.tar" in the log_directory) for resume training. This training state is saved everytime a new best model is saved in the log directory. Returns: Float or pandas Dataframe: If "train" command: Returns floats: best loss score for both training and validation. If "test" command: Returns a pandas Dataframe: of metrics computed for each subject of the testing sub dataset and return the prediction metrics before evaluation. If "segment" command: No return value. """ command = copy.deepcopy(context["command"]) log_directory = copy.deepcopy(context["log_directory"]) if not os.path.isdir(log_directory): print('Creating log directory: {}'.format(log_directory)) os.makedirs(log_directory) else: print('Log directory already exists: {}'.format(log_directory)) # Define device cuda_available, device = imed_utils.define_device(context['gpu']) # Get subject lists train_lst, valid_lst, test_lst = imed_loader_utils.get_subdatasets_subjects_list( context["split_dataset"], context['loader_parameters']['bids_path'], log_directory) # Loader params loader_params = copy.deepcopy(context["loader_parameters"]) if command == "train": loader_params["contrast_params"]["contrast_lst"] = loader_params[ "contrast_params"]["training_validation"] else: loader_params["contrast_params"]["contrast_lst"] = loader_params[ "contrast_params"]["testing"] if "FiLMedUnet" in context and context["FiLMedUnet"]["applied"]: loader_params.update( {"metadata_type": context["FiLMedUnet"]["metadata"]}) # Get transforms for each subdataset transform_train_params, transform_valid_params, transform_test_params = \ imed_transforms.get_subdatasets_transforms(context["transformation"]) # MODEL PARAMETERS model_params = copy.deepcopy(context["default_model"]) model_params["folder_name"] = copy.deepcopy(context["model_name"]) model_context_list = [ model_name for model_name in MODEL_LIST if model_name in context and context[model_name]["applied"] ] if len(model_context_list) == 1: model_params["name"] = model_context_list[0] model_params.update(context[model_context_list[0]]) elif 'Modified3DUNet' in model_context_list and 'FiLMedUnet' in model_context_list and len( model_context_list) == 2: model_params["name"] = 'Modified3DUNet' for i in range(len(model_context_list)): model_params.update(context[model_context_list[i]]) elif len(model_context_list) > 1: print( 'ERROR: Several models are selected in the configuration file: {}.' 'Please select only one (i.e. only one where: "applied": true).'. format(model_context_list)) exit() model_params['is_2d'] = False if "Modified3DUNet" in model_params[ 'name'] else model_params['is_2d'] # Get in_channel from contrast_lst if loader_params["multichannel"]: model_params["in_channel"] = len( loader_params["contrast_params"]["contrast_lst"]) else: model_params["in_channel"] = 1 # Get out_channel from target_suffix model_params["out_channel"] = len(loader_params["target_suffix"]) # If multi-class output, then add background class if model_params["out_channel"] > 1: model_params.update({"out_channel": model_params["out_channel"] + 1}) # Display for spec' check imed_utils.display_selected_model_spec(params=model_params) # Update loader params if 'object_detection_params' in context: object_detection_params = context['object_detection_params'] object_detection_params.update({ "gpu": context['gpu'], "log_directory": context['log_directory'] }) loader_params.update( {"object_detection_params": object_detection_params}) loader_params.update({"model_params": model_params}) # TESTING PARAMS # Aleatoric uncertainty if context['uncertainty'][ 'aleatoric'] and context['uncertainty']['n_it'] > 0: transformation_dict = transform_train_params else: transformation_dict = transform_test_params undo_transforms = imed_transforms.UndoCompose( imed_transforms.Compose(transformation_dict, requires_undo=True)) testing_params = copy.deepcopy(context["training_parameters"]) testing_params.update({'uncertainty': context["uncertainty"]}) testing_params.update({ 'target_suffix': loader_params["target_suffix"], 'undo_transforms': undo_transforms, 'slice_axis': loader_params['slice_axis'] }) if command == "train": imed_utils.display_selected_transfoms(transform_train_params, dataset_type=["training"]) imed_utils.display_selected_transfoms(transform_valid_params, dataset_type=["validation"]) elif command == "test": imed_utils.display_selected_transfoms(transformation_dict, dataset_type=["testing"]) if command == 'train': # LOAD DATASET # Get Validation dataset ds_valid = imed_loader.load_dataset(**{ **loader_params, **{ 'data_list': valid_lst, 'transforms_params': transform_valid_params, 'dataset_type': 'validation' } }, device=device, cuda_available=cuda_available) # Get Training dataset ds_train = imed_loader.load_dataset(**{ **loader_params, **{ 'data_list': train_lst, 'transforms_params': transform_train_params, 'dataset_type': 'training' } }, device=device, cuda_available=cuda_available) metric_fns = imed_metrics.get_metric_fns(ds_train.task) # If FiLM, normalize data if 'film_layers' in model_params and any(model_params['film_layers']): # Normalize metadata before sending to the FiLM network results = imed_film.get_film_metadata_models( ds_train=ds_train, metadata_type=model_params['metadata'], debugging=context["debugging"]) ds_train, train_onehotencoder, metadata_clustering_models = results ds_valid = imed_film.normalize_metadata( ds_valid, metadata_clustering_models, context["debugging"], model_params['metadata']) model_params.update({ "film_onehotencoder": train_onehotencoder, "n_metadata": len([ll for l in train_onehotencoder.categories_ for ll in l]) }) joblib.dump(metadata_clustering_models, "./" + log_directory + "/clustering_models.joblib") joblib.dump(train_onehotencoder, "./" + log_directory + "/one_hot_encoder.joblib") # Model directory path_model = os.path.join(log_directory, context["model_name"]) if not os.path.isdir(path_model): print('Creating model directory: {}'.format(path_model)) os.makedirs(path_model) if 'film_layers' in model_params and any( model_params['film_layers']): joblib.dump(train_onehotencoder, os.path.join(path_model, "one_hot_encoder.joblib")) if 'metadata_dict' in ds_train[0]['input_metadata'][0]: metadata_dict = ds_train[0]['input_metadata'][0][ 'metadata_dict'] joblib.dump( metadata_dict, os.path.join(path_model, "metadata_dict.joblib")) else: print('Model directory already exists: {}'.format(path_model)) # RUN TRAINING best_training_dice, best_training_loss, best_validation_dice, best_validation_loss = imed_training.train( model_params=model_params, dataset_train=ds_train, dataset_val=ds_valid, training_params=context["training_parameters"], log_directory=log_directory, device=device, cuda_available=cuda_available, metric_fns=metric_fns, n_gif=n_gif, resume_training=resume_training, debugging=context["debugging"]) if thr_increment: # LOAD DATASET if command != 'train': # If command == train, then ds_valid already load # Get Validation dataset ds_valid = imed_loader.load_dataset(**{ **loader_params, **{ 'data_list': valid_lst, 'transforms_params': transform_valid_params, 'dataset_type': 'validation' } }, device=device, cuda_available=cuda_available) # Get Training dataset with no Data Augmentation ds_train = imed_loader.load_dataset(**{ **loader_params, **{ 'data_list': train_lst, 'transforms_params': transform_valid_params, 'dataset_type': 'training' } }, device=device, cuda_available=cuda_available) # Choice of optimisation metric metric = "recall_specificity" if model_params[ "name"] in imed_utils.CLASSIFIER_LIST else "dice" # Model path model_path = os.path.join(log_directory, "best_model.pt") # Run analysis thr = imed_testing.threshold_analysis(model_path=model_path, ds_lst=[ds_train, ds_valid], model_params=model_params, testing_params=testing_params, metric=metric, increment=thr_increment, fname_out=os.path.join( log_directory, "roc.png"), cuda_available=cuda_available) # Update threshold in config file context["postprocessing"]["binarize_prediction"] = {"thr": thr} if command == 'train': # Save config file within log_directory and log_directory/model_name # Done after the threshold_analysis to propate this info in the config files with open(os.path.join(log_directory, "config_file.json"), 'w') as fp: json.dump(context, fp, indent=4) with open( os.path.join(log_directory, context["model_name"], context["model_name"] + ".json"), 'w') as fp: json.dump(context, fp, indent=4) return best_training_dice, best_training_loss, best_validation_dice, best_validation_loss if command == 'test': # LOAD DATASET ds_test = imed_loader.load_dataset(**{ **loader_params, **{ 'data_list': test_lst, 'transforms_params': transformation_dict, 'dataset_type': 'testing', 'requires_undo': True } }, device=device, cuda_available=cuda_available) metric_fns = imed_metrics.get_metric_fns(ds_test.task) if 'film_layers' in model_params and any(model_params['film_layers']): clustering_path = os.path.join(log_directory, "clustering_models.joblib") metadata_clustering_models = joblib.load(clustering_path) ohe_path = os.path.join(log_directory, "one_hot_encoder.joblib") one_hot_encoder = joblib.load(ohe_path) ds_test = imed_film.normalize_metadata(ds_test, metadata_clustering_models, context["debugging"], model_params['metadata']) model_params.update({ "film_onehotencoder": one_hot_encoder, "n_metadata": len([ll for l in one_hot_encoder.categories_ for ll in l]) }) # RUN INFERENCE pred_metrics = imed_testing.test( model_params=model_params, dataset_test=ds_test, testing_params=testing_params, log_directory=log_directory, device=device, cuda_available=cuda_available, metric_fns=metric_fns, postprocessing=context['postprocessing']) # RUN EVALUATION df_results = imed_evaluation.evaluate( bids_path=loader_params['bids_path'], log_directory=log_directory, target_suffix=loader_params["target_suffix"], eval_params=context["evaluation_parameters"]) return df_results, pred_metrics if command == 'segment': bids_ds = bids.BIDS(context["loader_parameters"]["bids_path"]) df = bids_ds.participants.content subj_lst = df['participant_id'].tolist() bids_subjects = [ s for s in bids_ds.get_subjects() if s.record["subject_id"] in subj_lst ] # Add postprocessing to packaged model path_model = os.path.join(context['log_directory'], context['model_name']) path_model_config = os.path.join(path_model, context['model_name'] + ".json") model_config = imed_config_manager.load_json(path_model_config) model_config['postprocessing'] = context['postprocessing'] with open(path_model_config, 'w') as fp: json.dump(model_config, fp, indent=4) options = None for subject in bids_subjects: fname_img = subject.record["absolute_path"] if 'film_layers' in model_params and any( model_params['film_layers']) and model_params['metadata']: subj_id = subject.record['subject_id'] metadata = df[df['participant_id'] == subj_id][ model_params['metadata']].values[0] options = {'metadata': metadata} pred = imed_inference.segment_volume(path_model, fname_image=fname_img, gpu_number=context['gpu'], options=options) pred_path = os.path.join(context['log_directory'], "pred_masks") if not os.path.exists(pred_path): os.makedirs(pred_path) filename = subject.record['subject_id'] + "_" + subject.record[ 'modality'] + "_pred" + ".nii.gz" nib.save(pred, os.path.join(pred_path, filename))
def run_visualization(input, config, number, output, roi): """Utility function to visualize Data Augmentation transformations. Data augmentation is a key part of the Deep Learning training scheme. This script aims at facilitating the fine-tuning of data augmentation parameters. To do so, this script provides a step-by-step visualization of the transformations that are applied on data. This function applies a series of transformations (defined in a configuration file ``-c``) to ``-n`` 2D slices randomly extracted from an input image (``-i``), and save as png the resulting sample after each transform. For example:: ivadomed_visualize_transforms -i t2s.nii.gz -n 1 -c config.json -r t2s_seg.nii.gz Provides a visualization of a series of three transformation on a randomly selected slice: .. image:: https://raw.githubusercontent.com/ivadomed/doc-figures/main/scripts/transforms_im.png :width: 600px :align: center And on a binary mask:: ivadomed_visualize_transforms -i t2s_gmseg.nii.gz -n 1 -c config.json -r t2s_seg.nii.gz Gives: .. image:: https://raw.githubusercontent.com/ivadomed/doc-figures/main/scripts/transforms_gt.png :width: 600px :align: center Args: input (string): Image filename. Flag: ``--input``, ``-i`` config (string): Configuration file filename. Flag: ``--config``, ``-c`` number (int): Number of slices randomly extracted. Flag: ``--number``, ``-n`` output (string): Folder path where the results are saved. Flag: ``--ofolder``, ``-o`` roi (string): Filename of the region of interest. Only needed if ROICrop is part of the transformations. Flag: ``--roi``, ``-r`` """ # Load context context = imed_config_manager.ConfigurationManager(config).get_config() # Create output folder if not Path(output).is_dir(): Path(output).mkdir(parents=True) # Slice extracted according to below axis axis = imed_utils.AXIS_DCT[context[ConfigKW.LOADER_PARAMETERS][LoaderParamsKW.SLICE_AXIS]] # Get data input_img, input_data = get_data(input, axis) # Image or Mask is_mask = np.array_equal(input_data, input_data.astype(bool)) # Get zooms zooms = imed_loader_utils.orient_shapes_hwd(input_img.header.get_zooms(), slice_axis=axis) # Get indexes indexes = random.sample(range(0, input_data.shape[2]), number) # Get training transforms training_transforms, _, _ = imed_transforms.get_subdatasets_transforms(context[ConfigKW.TRANSFORMATION]) if TransformationKW.ROICROP in training_transforms: if roi and Path(roi).is_file(): roi_img, roi_data = get_data(roi, axis) else: raise ValueError("\nPlease provide ROI image (-r) in order to apply ROICrop transformation.") # Compose transforms dict_transforms = {} stg_transforms = "" for transform_name in training_transforms: # We skip NumpyToTensor transform since that s only a change of data type if transform_name == "NumpyToTensor": continue # Update stg_transforms stg_transforms += transform_name + "_" # Add new transform to Compose dict_transforms.update({transform_name: training_transforms[transform_name]}) composed_transforms = imed_transforms.Compose(dict_transforms) # Loop across slices for i in indexes: data = [input_data[:, :, i]] # Init metadata metadata = SampleMetadata({MetadataKW.ZOOMS: zooms, MetadataKW.DATA_TYPE: "gt" if is_mask else "im"}) # Apply transformations to ROI if TransformationKW.CENTERCROP in training_transforms or \ (TransformationKW.ROICROP in training_transforms and Path(roi).is_file()): metadata.__setitem__(MetadataKW.CROP_PARAMS, {}) # Apply transformations to image stack_im, _ = composed_transforms(sample=data, metadata=[metadata for _ in range(number)], data_type="im") # Plot before / after transformation fname_out = str(Path(output, stg_transforms + "slice" + str(i) + ".png")) logger.debug(f"Fname out: {fname_out}.") logger.debug(f"\t{dict(metadata)}") # rescale intensities if len(stg_transforms[:-1].split("_")) == 1: before = np.rot90(imed_maths.rescale_values_array(data[0], 0.0, 1.0)) else: before = after if isinstance(stack_im[0], torch.Tensor): after = np.rot90(imed_maths.rescale_values_array(stack_im[0].numpy(), 0.0, 1.0)) else: after = np.rot90(imed_maths.rescale_values_array(stack_im[0], 0.0, 1.0)) # Plot imed_utils.plot_transformed_sample(before, after, list_title=["\n".join(stg_transforms[:-1].split("_")[:-1]), "\n".join(stg_transforms[:-1].split("_"))], fname_out=fname_out, cmap="jet" if is_mask else "gray")
def run_command(context, n_gif=0, thr_increment=None, resume_training=False): """Run main command. This function is central in the ivadomed project as training / testing / evaluation commands are run via this function. All the process parameters are defined in the config. Args: context (dict): Dictionary containing all parameters that are needed for a given process. See :doc:`configuration_file` for more details. n_gif (int): Generates a GIF during training if larger than zero, one frame per epoch for a given slice. The parameter indicates the number of 2D slices used to generate GIFs, one GIF per slice. A GIF shows predictions of a given slice from the validation sub-dataset. They are saved within the output path. thr_increment (float): A threshold analysis is performed at the end of the training using the trained model and the training + validation sub-dataset to find the optimal binarization threshold. The specified value indicates the increment between 0 and 1 used during the ROC analysis (e.g. 0.1). resume_training (bool): Load a saved model ("checkpoint.pth.tar" in the output directory specified with flag "--path-output" or via the config file "output_path" ' This training state is saved everytime a new best model is saved in the log argument) for resume training directory. Returns: float or pandas.DataFrame or None: * If "train" command: Returns floats: best loss score for both training and validation. * If "test" command: Returns a pandas Dataframe: of metrics computed for each subject of the testing sub-dataset and return the prediction metrics before evaluation. * If "segment" command: No return value. """ command = copy.deepcopy(context["command"]) path_output = set_output_path(context) # Create a log with the version of the Ivadomed software and the version of the Annexed dataset (if present) create_dataset_and_ivadomed_version_log(context) cuda_available, device = imed_utils.define_device(context['gpu_ids'][0]) # BACKWARDS COMPATIBILITY: If bids_path is string, assign to list - Do this here so it propagates to all functions context['loader_parameters']['path_data'] = imed_utils.format_path_data( context['loader_parameters']['path_data']) # Loader params loader_params = set_loader_params(context, command == "train") # Get transforms for each subdataset transform_train_params, transform_valid_params, transform_test_params = \ imed_transforms.get_subdatasets_transforms(context["transformation"]) # MODEL PARAMETERS model_params, loader_params = set_model_params(context, loader_params) if command == 'segment': run_segment_command(context, model_params) return # Get subject lists. "segment" command uses all participants of data path, hence no need to split train_lst, valid_lst, test_lst = imed_loader_utils.get_subdatasets_subjects_list( context["split_dataset"], context['loader_parameters']['path_data'], path_output, context["loader_parameters"]['subject_selection']) # TESTING PARAMS # Aleatoric uncertainty if context['uncertainty'][ 'aleatoric'] and context['uncertainty']['n_it'] > 0: transformation_dict = transform_train_params else: transformation_dict = transform_test_params undo_transforms = imed_transforms.UndoCompose( imed_transforms.Compose(transformation_dict, requires_undo=True)) testing_params = copy.deepcopy(context["training_parameters"]) testing_params.update({'uncertainty': context["uncertainty"]}) testing_params.update({ 'target_suffix': loader_params["target_suffix"], 'undo_transforms': undo_transforms, 'slice_axis': loader_params['slice_axis'] }) if command == "train": imed_utils.display_selected_transfoms(transform_train_params, dataset_type=["training"]) imed_utils.display_selected_transfoms(transform_valid_params, dataset_type=["validation"]) elif command == "test": imed_utils.display_selected_transfoms(transformation_dict, dataset_type=["testing"]) # Check if multiple raters check_multiple_raters(command != "train", loader_params) if command == 'train': # Get Validation dataset ds_valid = get_dataset(loader_params, valid_lst, transform_valid_params, cuda_available, device, 'validation') # Get Training dataset ds_train = get_dataset(loader_params, train_lst, transform_train_params, cuda_available, device, 'training') metric_fns = imed_metrics.get_metric_fns(ds_train.task) # If FiLM, normalize data if 'film_layers' in model_params and any(model_params['film_layers']): model_params, ds_train, ds_valid, train_onehotencoder = \ film_normalize_data(context, model_params, ds_train, ds_valid, path_output) else: train_onehotencoder = None # Model directory create_path_model(context, model_params, ds_train, path_output, train_onehotencoder) save_config_file(context, path_output) # RUN TRAINING best_training_dice, best_training_loss, best_validation_dice, best_validation_loss = imed_training.train( model_params=model_params, dataset_train=ds_train, dataset_val=ds_valid, training_params=context["training_parameters"], path_output=path_output, device=device, cuda_available=cuda_available, metric_fns=metric_fns, n_gif=n_gif, resume_training=resume_training, debugging=context["debugging"]) if thr_increment: # LOAD DATASET if command != 'train': # If command == train, then ds_valid already load # Get Validation dataset ds_valid = get_dataset(loader_params, valid_lst, transform_valid_params, cuda_available, device, 'validation') # Get Training dataset with no Data Augmentation ds_train = get_dataset(loader_params, train_lst, transform_valid_params, cuda_available, device, 'training') # Choice of optimisation metric metric = "recall_specificity" if model_params[ "name"] in imed_utils.CLASSIFIER_LIST else "dice" # Model path model_path = os.path.join(path_output, "best_model.pt") # Run analysis thr = imed_testing.threshold_analysis(model_path=model_path, ds_lst=[ds_train, ds_valid], model_params=model_params, testing_params=testing_params, metric=metric, increment=thr_increment, fname_out=os.path.join( path_output, "roc.png"), cuda_available=cuda_available) # Update threshold in config file context["postprocessing"]["binarize_prediction"] = {"thr": thr} save_config_file(context, path_output) if command == 'train': return best_training_dice, best_training_loss, best_validation_dice, best_validation_loss if command == 'test': # LOAD DATASET ds_test = imed_loader.load_dataset(**{ **loader_params, **{ 'data_list': test_lst, 'transforms_params': transformation_dict, 'dataset_type': 'testing', 'requires_undo': True } }, device=device, cuda_available=cuda_available) metric_fns = imed_metrics.get_metric_fns(ds_test.task) if 'film_layers' in model_params and any(model_params['film_layers']): ds_test, model_params = update_film_model_params( context, ds_test, model_params, path_output) # RUN INFERENCE pred_metrics = imed_testing.test( model_params=model_params, dataset_test=ds_test, testing_params=testing_params, path_output=path_output, device=device, cuda_available=cuda_available, metric_fns=metric_fns, postprocessing=context['postprocessing']) # RUN EVALUATION df_results = imed_evaluation.evaluate( path_data=loader_params['path_data'], path_output=path_output, target_suffix=loader_params["target_suffix"], eval_params=context["evaluation_parameters"]) return df_results, pred_metrics
def test_inference(transforms_dict, test_lst, target_lst, roi_params, testing_params): cuda_available, device = imed_utils.define_device(GPU_ID) model_params = {"name": "Unet", "is_2d": True} loader_params = { "transforms_params": transforms_dict, "data_list": test_lst, "dataset_type": "testing", "requires_undo": True, "contrast_params": {"contrast_lst": ['T2w'], "balance": {}}, "path_data": [__data_testing_dir__], "target_suffix": target_lst, "roi_params": roi_params, "slice_filter_params": { "filter_empty_mask": False, "filter_empty_input": True }, "slice_axis": SLICE_AXIS, "multichannel": False } loader_params.update({"model_params": model_params}) # Get Testing dataset ds_test = imed_loader.load_dataset(**loader_params) test_loader = DataLoader(ds_test, batch_size=BATCH_SIZE, shuffle=False, pin_memory=True, collate_fn=imed_loader_utils.imed_collate, num_workers=0) # Undo transform val_undo_transform = imed_transforms.UndoCompose(imed_transforms.Compose(transforms_dict)) # Update testing_params testing_params.update({ "slice_axis": loader_params["slice_axis"], "target_suffix": loader_params["target_suffix"], "undo_transforms": val_undo_transform }) # Model model = imed_models.Unet() if cuda_available: model.cuda() model.eval() metric_fns = [imed_metrics.dice_score, imed_metrics.hausdorff_score, imed_metrics.precision_score, imed_metrics.recall_score, imed_metrics.specificity_score, imed_metrics.intersection_over_union, imed_metrics.accuracy_score] metric_mgr = imed_metrics.MetricManager(metric_fns) if not os.path.isdir(__output_dir__): os.makedirs(__output_dir__) preds_npy, gt_npy = imed_testing.run_inference(test_loader=test_loader, model=model, model_params=model_params, testing_params=testing_params, ofolder=__output_dir__, cuda_available=cuda_available) metric_mgr(preds_npy, gt_npy) metrics_dict = metric_mgr.get_results() metric_mgr.reset() print(metrics_dict)
def run_visualization(input, config, number, output, roi): """Utility function to visualize Data Augmentation transformations. Data augmentation is a key part of the Deep Learning training scheme. This script aims at facilitating the fine-tuning of data augmentation parameters. To do so, this script provides a step-by-step visualization of the transformations that are applied on data. This function applies a series of transformations (defined in a configuration file ``-c``) to ``-n`` 2D slices randomly extracted from an input image (``-i``), and save as png the resulting sample after each transform. For example:: ivadomed_visualize_transforms -i t2s.nii.gz -n 1 -c config.json -r t2s_seg.nii.gz Provides a visualization of a series of three transformation on a randomly selected slice: .. image:: ../../images/transforms_im.png :width: 600px :align: center And on a binary mask:: ivadomed_visualize_transforms -i t2s_gmseg.nii.gz -n 1 -c config.json -r t2s_seg.nii.gz Gives: .. image:: ../../images/transforms_gt.png :width: 600px :align: center Args: input (string): Image filename. Flag: ``--input``, ``-i`` config (string): Configuration file filename. Flag: ``--config``, ``-c`` number (int): Number of slices randomly extracted. Flag: ``--number``, ``-n`` output (string): Folder path where the results are saved. Flag: ``--ofolder``, ``-o`` roi (string): Filename of the region of interest. Only needed if ROICrop is part of the transformations. Flag: ``--roi``, ``-r`` """ # Load context with open(config, "r") as fhandle: context = json.load(fhandle) # Create output folder if not os.path.isdir(output): os.makedirs(output) # Slice extracted according to below axis axis = imed_utils.AXIS_DCT[context["loader_parameters"]["slice_axis"]] # Get data input_img, input_data = get_data(input, axis) # Image or Mask is_mask = np.array_equal(input_data, input_data.astype(bool)) # Get zooms zooms = imed_loader_utils.orient_shapes_hwd(input_img.header.get_zooms(), slice_axis=axis) # Get indexes indexes = random.sample(range(0, input_data.shape[2]), number) # Get training transforms training_transforms, _, _ = imed_transforms.get_subdatasets_transforms( context["transformation"]) if "ROICrop" in training_transforms: if roi and os.path.isfile(roi): roi_img, roi_data = get_data(roi, axis) else: print( "\nPlease provide ROI image (-r) in order to apply ROICrop transformation." ) exit() # Compose transforms dict_transforms = {} stg_transforms = "" for transform_name in training_transforms: # We skip NumpyToTensor transform since that s only a change of data type if transform_name == "NumpyToTensor": continue # Update stg_transforms stg_transforms += transform_name + "_" # Add new transform to Compose dict_transforms.update( {transform_name: training_transforms[transform_name]}) composed_transforms = imed_transforms.Compose(dict_transforms) # Loop across slices for i in indexes: data = [input_data[:, :, i]] # Init metadata metadata = imed_loader_utils.SampleMetadata({ "zooms": zooms, "data_type": "gt" if is_mask else "im" }) # Apply transformations to ROI if "CenterCrop" in training_transforms or ( "ROICrop" in training_transforms and os.path.isfile(roi)): metadata.__setitem__('crop_params', {}) # Apply transformations to image stack_im, _ = composed_transforms( sample=data, metadata=[metadata for _ in range(number)], data_type="im") # Plot before / after transformation fname_out = os.path.join( output, stg_transforms + "slice" + str(i) + ".png") print("Fname out: {}.".format(fname_out)) print("\t{}".format(dict(metadata))) # rescale intensities if len(stg_transforms[:-1].split("_")) == 1: before = np.rot90( imed_maths.rescale_values_array(data[0], 0.0, 1.0)) else: before = after after = np.rot90( imed_maths.rescale_values_array(stack_im[0], 0.0, 1.0)) # Plot imed_utils.plot_transformed_sample( before, after, list_title=[ "\n".join(stg_transforms[:-1].split("_")[:-1]), "\n".join(stg_transforms[:-1].split("_")) ], fname_out=fname_out, cmap="jet" if is_mask else "gray")
def test_inference_target_suffix(download_data_testing_test_files, transforms_dict, test_lst, target_lst, roi_params, testing_params): """ This test checks if the filename(s) of the prediction(s) saved as NifTI file(s) in the pred_masks dir conform to the target_suffix or not. Thus, independent of underscore(s) in the target_suffix. As a result, _seg-axon-manual or _seg-axon_manual should yield the same filename(s). (c.f: https://github.com/ivadomed/ivadomed/issues/1135) """ cuda_available, device = imed_utils.define_device(GPU_ID) model_params = {"name": "Unet", "is_2d": True, "out_channel": 3} loader_params = { "transforms_params": transforms_dict, "data_list": test_lst, "dataset_type": "testing", "requires_undo": True, "contrast_params": {"contrast_lst": ['SEM'], "balance": {}}, "path_data": [str(Path(__data_testing_dir__, "microscopy_png"))], "bids_config": f"{path_repo_root}/ivadomed/config/config_bids.json", "target_suffix": target_lst, "extensions": [".png"], "roi_params": roi_params, "slice_filter_params": {"filter_empty_mask": False, "filter_empty_input": True}, "patch_filter_params": {"filter_empty_mask": False, "filter_empty_input": False}, "slice_axis": SLICE_AXIS, "multichannel": False } loader_params.update({"model_params": model_params}) # restructuring the dataset gt_path = f'{loader_params["path_data"][0]}/derivatives/labels/' for file_path in Path(gt_path).rglob('*.png'): src_filename = file_path.resolve() dst_filename = '_'.join(str(src_filename).rsplit('-', 1)) src_filename.rename(Path(dst_filename)) bids_df = BidsDataframe(loader_params, __tmp_dir__, derivatives=True) ds_test = imed_loader.load_dataset(bids_df, **loader_params) test_loader = DataLoader(ds_test, batch_size=BATCH_SIZE, shuffle=False, pin_memory=True, collate_fn=imed_loader_utils.imed_collate, num_workers=0) # Undo transform val_undo_transform = imed_transforms.UndoCompose(imed_transforms.Compose(transforms_dict)) # Update testing_params testing_params.update({ "slice_axis": loader_params["slice_axis"], "target_suffix": loader_params["target_suffix"], "undo_transforms": val_undo_transform }) # Model model = imed_models.Unet(out_channel=model_params['out_channel']) if cuda_available: model.cuda() model.eval() if not __output_dir__.is_dir(): __output_dir__.mkdir(parents=True, exist_ok=True) preds_npy, gt_npy = imed_testing.run_inference(test_loader=test_loader, model=model, model_params=model_params, testing_params=testing_params, ofolder=str(__output_dir__), cuda_available=cuda_available) for x in __output_dir__.iterdir(): if x.name.endswith('_pred.nii.gz'): assert x.name.rsplit('_', 1)[0].endswith(loader_params['contrast_params']['contrast_lst'][-1]), ( 'Incompatible filename(s) of the prediction(s) saved as NifTI file(s)!' )
def test_inference_2d_microscopy(download_data_testing_test_files, transforms_dict, test_lst, target_lst, roi_params, testing_params): """ This test checks if the number of NifTI predictions equals the number of test subjects on 2d microscopy data. Used to catch a bug where the last slice of the last volume wasn't appended to the prediction (see: https://github.com/ivadomed/ivadomed/issues/823) Also tests the conversions to PNG predictions when source files are not Nifti and checks if the number of PNG predictions is 2x the number of test subjects (2-class model, outputs 1 PNG per class per subject). """ cuda_available, device = imed_utils.define_device(GPU_ID) model_params = {"name": "Unet", "is_2d": True, "out_channel": 3} loader_params = { "transforms_params": transforms_dict, "data_list": test_lst, "dataset_type": "testing", "requires_undo": True, "contrast_params": {"contrast_lst": ['SEM'], "balance": {}}, "path_data": [str(Path(__data_testing_dir__, "microscopy_png"))], "bids_config": f"{path_repo_root}/ivadomed/config/config_bids.json", "target_suffix": target_lst, "extensions": [".png"], "roi_params": roi_params, "slice_filter_params": {"filter_empty_mask": False, "filter_empty_input": True}, "patch_filter_params": {"filter_empty_mask": False, "filter_empty_input": False}, "slice_axis": SLICE_AXIS, "multichannel": False } loader_params.update({"model_params": model_params}) bids_df = BidsDataframe(loader_params, __tmp_dir__, derivatives=True) # Get Testing dataset ds_test = imed_loader.load_dataset(bids_df, **loader_params) test_loader = DataLoader(ds_test, batch_size=BATCH_SIZE, shuffle=False, pin_memory=True, collate_fn=imed_loader_utils.imed_collate, num_workers=0) # Undo transform val_undo_transform = imed_transforms.UndoCompose(imed_transforms.Compose(transforms_dict)) # Update testing_params testing_params.update({ "slice_axis": loader_params["slice_axis"], "target_suffix": loader_params["target_suffix"], "undo_transforms": val_undo_transform }) # Model model = imed_models.Unet(out_channel=model_params['out_channel']) if cuda_available: model.cuda() model.eval() if not __output_dir__.is_dir(): __output_dir__.mkdir(parents=True, exist_ok=True) preds_npy, gt_npy = imed_testing.run_inference(test_loader=test_loader, model=model, model_params=model_params, testing_params=testing_params, ofolder=str(__output_dir__), cuda_available=cuda_available) assert len([x for x in __output_dir__.iterdir() if x.name.endswith(".nii.gz")]) == len(test_lst) assert len([x for x in __output_dir__.iterdir() if x.name.endswith(".png")]) == 2*len(test_lst)
if 'object_detection_params' in context: object_detection_params = context['object_detection_params'] object_detection_params.update({ "gpu_ids": context['gpu_ids'][0], "path_output": context['path_output'] }) loader_params.update({"object_detection_params": object_detection_params}) loader_params.update({"model_params": model_params}) # TESTING PARAMS # Aleatoric uncertainty if context['uncertainty']['aleatoric'] and context['uncertainty']['n_it'] > 0: transformation_dict = transform_train_params else: transformation_dict = transform_test_params undo_transforms = imed_transforms.UndoCompose( imed_transforms.Compose(transformation_dict, requires_undo=True)) testing_params = copy.deepcopy(context["training_parameters"]) testing_params.update({'uncertainty': context["uncertainty"]}) testing_params.update({ 'target_suffix': loader_params["target_suffix"], 'undo_transforms': undo_transforms, 'slice_axis': loader_params['slice_axis'] }) if command == "train": imed_utils.display_selected_transfoms(transform_train_params, dataset_type=["training"]) imed_utils.display_selected_transfoms(transform_valid_params, dataset_type=["validation"]) elif command == "test": imed_utils.display_selected_transfoms(transformation_dict, dataset_type=["testing"])