コード例 #1
0
def test_object_detection_inference():
    fname_images = ["testing_data/sub-unf01/anat/sub-unf01_T1w.nii.gz"]

    # Detection
    nib_detection, _ = imed_inference.segment_volume(folder_model="findcord_tumor", fname_images=fname_images)
    detection_file = "detection.nii.gz"
    nib.save(nib_detection[0], detection_file)

    # Segmentation
    imed_inference.segment_volume(folder_model="t2_tumor", fname_images=fname_images,
                                  options={'fname_prior': detection_file})
コード例 #2
0
def test_segment_volume_2d():
    model = imed_models.Unet(in_channel=1,
                             out_channel=1,
                             depth=2,
                             drop_rate=DROPOUT,
                             bn_momentum=BN)

    # temporary folder that will be deleted at the end of the test
    if not os.path.exists(PATH_MODEL):
        os.mkdir(PATH_MODEL)

    torch.save(model, os.path.join(PATH_MODEL, "model_test.pt"))
    config = {
        "loader_parameters": {
            "slice_filter_params": {
                "filter_empty_mask": False,
                "filter_empty_input": False
            },
            "roi_params": {
                "suffix": None,
                "slice_filter_roi": 10
            },
            "slice_axis": "axial"
        },
        "transformation": {
            "Resample": {
                "wspace": 0.75,
                "hspace": 0.75
            },
            "ROICrop": {
                "size": [48, 48]
            },
            "RandomTranslation": {
                "translate": [0.03, 0.03],
                "applied_to": ["im", "gt"],
                "dataset_type": ["training"]
            },
            "NumpyToTensor": {},
            "NormalizeInstance": {
                "applied_to": ["im"]
            }
        },
        "postprocessing": {},
        "training_parameters": {
            "batch_size": BATCH_SIZE
        }
    }

    PATH_CONFIG = os.path.join(PATH_MODEL, 'model_test.json')
    with open(PATH_CONFIG, 'w') as fp:
        json.dump(config, fp)

    nib_lst, _ = imed_inference.segment_volume(PATH_MODEL, [IMAGE_PATH],
                                               ROI_PATH)
    nib_img = nib_lst[0]
    assert np.squeeze(nib_img.get_fdata()).shape == nib.load(IMAGE_PATH).shape
    assert (nib_img.dataobj.max() <= 1.0) and (nib_img.dataobj.min() >= 0.0)
    assert nib_img.dataobj.dtype == 'float32'

    shutil.rmtree(PATH_MODEL)
コード例 #3
0
def test_segment_volume_2d(download_functional_test_files):
    model = imed_models.Unet(in_channel=1,
                             out_channel=1,
                             depth=2,
                             dropout_rate=DROPOUT,
                             bn_momentum=BN)

    if not PATH_MODEL.exists():
        PATH_MODEL.mkdir(parents=True, exist_ok=True)

    torch.save(model, Path(PATH_MODEL, "model_test.pt"))
    config = {
        "loader_parameters": {
            "slice_filter_params": {
                "filter_empty_mask": False,
                "filter_empty_input": False
            },
            "roi_params": {
                "suffix": "_seg-manual",
                "slice_filter_roi": 10
            },
            "slice_axis": "axial"
        },
        "transformation": {
            "Resample": {
                "wspace": 0.75,
                "hspace": 0.75
            },
            "ROICrop": {
                "size": [48, 48]
            },
            "RandomTranslation": {
                "translate": [0.03, 0.03],
                "applied_to": ["im", "gt"],
                "dataset_type": ["training"]
            },
            "NormalizeInstance": {
                "applied_to": ["im"]
            }
        },
        "postprocessing": {},
        "training_parameters": {
            "batch_size": BATCH_SIZE
        }
    }

    PATH_CONFIG = Path(PATH_MODEL, 'model_test.json')
    with PATH_CONFIG.open(mode='w') as fp:
        json.dump(config, fp)

    nib_lst, _ = imed_inference.segment_volume(
        str(PATH_MODEL), [str(PATH_IMAGE)],
        options={'fname_prior': str(PATH_ROI)})
    nib_img = nib_lst[0]
    assert np.squeeze(nib_img.get_fdata()).shape == nib.load(PATH_IMAGE).shape
    assert (nib_img.dataobj.max() <= 1.0) and (nib_img.dataobj.min() >= 0.0)
    assert nib_img.dataobj.dtype == 'float32'

    shutil.rmtree(PATH_MODEL)
コード例 #4
0
def generate_bounding_box_file(subject_list,
                               model_path,
                               path_output,
                               gpu_id=0,
                               slice_axis=0,
                               contrast_lst=None,
                               keep_largest_only=True,
                               safety_factor=None):
    """Creates json file containing the bounding box dimension for each images. The file has the following format:
    {"path/to/img.nii.gz": [[x1_min, x1_max, y1_min, y1_max, z1_min, z1_max],
    [x2_min, x2_max, y2_min, y2_max, z2_min, z2_max]]}
    where each list represents the coordinates of an object on the image (2 instance of a given object in this example).

    Args:
        subject_list (list): List of all subjects in the BIDS directory.
        model_path (string): Path to object detection model.
        path_output (string): Output path.
        gpu_id (int): If available, GPU number.
        slice_axis (int): Slice axis (0: sagittal, 1: coronal, 2: axial).
        contrast_lst (list): Contrasts.
        keep_largest_only (bool): Boolean representing if only the largest object of the prediction is kept.
        safety_factor (list or tuple): Factors to multiply each dimension of the bounding box.

    Returns:
        dict: Dictionary containing bounding boxes related to their image.

    """
    bounding_box_dict = {}
    if safety_factor is None:
        safety_factor = [1.0, 1.0, 1.0]
    for subject in subject_list:
        if subject.record["modality"] in contrast_lst:
            subject_path = str(subject.record["absolute_path"])
            object_mask, _ = imed_inference.segment_volume(model_path,
                                                           [subject_path],
                                                           gpu_id=gpu_id)
            object_mask = object_mask[0]
            if keep_largest_only:
                object_mask = imed_postpro.keep_largest_object(object_mask)

            mask_path = os.path.join(path_output, "detection_mask")
            if not os.path.exists(mask_path):
                os.mkdir(mask_path)
            nib.save(object_mask,
                     os.path.join(mask_path,
                                  subject_path.split("/")[-1]))
            ras_orientation = nib.as_closest_canonical(object_mask)
            hwd_orientation = imed_loader_utils.orient_img_hwd(
                ras_orientation.get_fdata(), slice_axis)
            bounding_boxes = get_bounding_boxes(hwd_orientation)
            bounding_box_dict[subject_path] = [
                adjust_bb_size(bb, safety_factor) for bb in bounding_boxes
            ]

    file_path = os.path.join(path_output, 'bounding_boxes.json')
    with open(file_path, 'w') as fp:
        json.dump(bounding_box_dict, fp, indent=4)
    return bounding_box_dict
コード例 #5
0
def test_segment_volume_2d_no_prepro_transform(download_functional_test_files):
    model = imed_models.Unet(in_channel=1,
                             out_channel=1,
                             depth=2,
                             dropout_rate=DROPOUT,
                             bn_momentum=BN)

    if not PATH_MODEL.exists():
        PATH_MODEL.mkdir()

    torch.save(model, Path(PATH_MODEL, "model_test.pt"))
    config = {
        "loader_parameters": {
            "slice_filter_params": {
                "filter_empty_mask": False,
                "filter_empty_input": False
            },
            "roi_params": {
                "suffix": None,
                "slice_filter_roi": None
            },
            "slice_axis": "axial"
        },
        "transformation": {
            "NormalizeInstance": {
                "applied_to": ["im"]
            }
        },
        "postprocessing": {},
        "training_parameters": {
            "batch_size": BATCH_SIZE
        }
    }

    PATH_CONFIG = Path(PATH_MODEL, 'model_test.json')
    with PATH_CONFIG.open(mode='w') as fp:
        json.dump(config, fp)

    nib_lst, _ = imed_inference.segment_volume(str(PATH_MODEL),
                                               [str(PATH_IMAGE)])
    nib_img = nib_lst[0]
    assert np.squeeze(nib_img.get_fdata()).shape == nib.load(PATH_IMAGE).shape
    assert (nib_img.dataobj.max() <= 1.0) and (nib_img.dataobj.min() >= 0.0)
    assert nib_img.dataobj.dtype == 'float32'

    shutil.rmtree(PATH_MODEL)
コード例 #6
0
def run_command(context, n_gif=0, thr_increment=None, resume_training=False):
    """Run main command.

    This function is central in the ivadomed project as training / testing / evaluation commands are run via this
    function. All the process parameters are defined in the config.

    Args:
        context (dict): Dictionary containing all parameters that are needed for a given process. See
            :doc:`configuration_file` for more details.
        n_gif (int): Generates a GIF during training if larger than zero, one frame per epoch for a given slice. The
            parameter indicates the number of 2D slices used to generate GIFs, one GIF per slice. A GIF shows
            predictions of a given slice from the validation sub-dataset. They are saved within the log directory.
        thr_increment (float): A threshold analysis is performed at the end of the training using the trained model and
            the training + validation sub-dataset to find the optimal binarization threshold. The specified value
            indicates the increment between 0 and 1 used during the ROC analysis (e.g. 0.1).
        resume_training (bool): Load a saved model ("checkpoint.pth.tar" in the log_directory) for resume training.
            This training state is saved everytime a new best model is saved in the log
            directory.

    Returns:
        Float or pandas Dataframe:
        If "train" command: Returns floats: best loss score for both training and validation.
        If "test" command: Returns a pandas Dataframe: of metrics computed for each subject of the testing
            sub dataset and return the prediction metrics before evaluation.
        If "segment" command: No return value.
    """
    command = copy.deepcopy(context["command"])
    log_directory = copy.deepcopy(context["log_directory"])
    if not os.path.isdir(log_directory):
        print('Creating log directory: {}'.format(log_directory))
        os.makedirs(log_directory)
    else:
        print('Log directory already exists: {}'.format(log_directory))

    # Define device
    cuda_available, device = imed_utils.define_device(context['gpu'])

    # Get subject lists
    train_lst, valid_lst, test_lst = imed_loader_utils.get_subdatasets_subjects_list(
        context["split_dataset"], context['loader_parameters']['bids_path'],
        log_directory)

    # Loader params
    loader_params = copy.deepcopy(context["loader_parameters"])
    if command == "train":
        loader_params["contrast_params"]["contrast_lst"] = loader_params[
            "contrast_params"]["training_validation"]
    else:
        loader_params["contrast_params"]["contrast_lst"] = loader_params[
            "contrast_params"]["testing"]
    if "FiLMedUnet" in context and context["FiLMedUnet"]["applied"]:
        loader_params.update(
            {"metadata_type": context["FiLMedUnet"]["metadata"]})

    # Get transforms for each subdataset
    transform_train_params, transform_valid_params, transform_test_params = \
        imed_transforms.get_subdatasets_transforms(context["transformation"])

    # MODEL PARAMETERS
    model_params = copy.deepcopy(context["default_model"])
    model_params["folder_name"] = copy.deepcopy(context["model_name"])
    model_context_list = [
        model_name for model_name in MODEL_LIST
        if model_name in context and context[model_name]["applied"]
    ]
    if len(model_context_list) == 1:
        model_params["name"] = model_context_list[0]
        model_params.update(context[model_context_list[0]])
    elif 'Modified3DUNet' in model_context_list and 'FiLMedUnet' in model_context_list and len(
            model_context_list) == 2:
        model_params["name"] = 'Modified3DUNet'
        for i in range(len(model_context_list)):
            model_params.update(context[model_context_list[i]])
    elif len(model_context_list) > 1:
        print(
            'ERROR: Several models are selected in the configuration file: {}.'
            'Please select only one (i.e. only one where: "applied": true).'.
            format(model_context_list))
        exit()

    model_params['is_2d'] = False if "Modified3DUNet" in model_params[
        'name'] else model_params['is_2d']
    # Get in_channel from contrast_lst
    if loader_params["multichannel"]:
        model_params["in_channel"] = len(
            loader_params["contrast_params"]["contrast_lst"])
    else:
        model_params["in_channel"] = 1
    # Get out_channel from target_suffix
    model_params["out_channel"] = len(loader_params["target_suffix"])
    # If multi-class output, then add background class
    if model_params["out_channel"] > 1:
        model_params.update({"out_channel": model_params["out_channel"] + 1})
    # Display for spec' check
    imed_utils.display_selected_model_spec(params=model_params)
    # Update loader params
    if 'object_detection_params' in context:
        object_detection_params = context['object_detection_params']
        object_detection_params.update({
            "gpu":
            context['gpu'],
            "log_directory":
            context['log_directory']
        })
        loader_params.update(
            {"object_detection_params": object_detection_params})

    loader_params.update({"model_params": model_params})

    # TESTING PARAMS
    # Aleatoric uncertainty
    if context['uncertainty'][
            'aleatoric'] and context['uncertainty']['n_it'] > 0:
        transformation_dict = transform_train_params
    else:
        transformation_dict = transform_test_params
    undo_transforms = imed_transforms.UndoCompose(
        imed_transforms.Compose(transformation_dict, requires_undo=True))
    testing_params = copy.deepcopy(context["training_parameters"])
    testing_params.update({'uncertainty': context["uncertainty"]})
    testing_params.update({
        'target_suffix': loader_params["target_suffix"],
        'undo_transforms': undo_transforms,
        'slice_axis': loader_params['slice_axis']
    })
    if command == "train":
        imed_utils.display_selected_transfoms(transform_train_params,
                                              dataset_type=["training"])
        imed_utils.display_selected_transfoms(transform_valid_params,
                                              dataset_type=["validation"])
    elif command == "test":
        imed_utils.display_selected_transfoms(transformation_dict,
                                              dataset_type=["testing"])

    if command == 'train':
        # LOAD DATASET
        # Get Validation dataset
        ds_valid = imed_loader.load_dataset(**{
            **loader_params,
            **{
                'data_list': valid_lst,
                'transforms_params': transform_valid_params,
                'dataset_type': 'validation'
            }
        },
                                            device=device,
                                            cuda_available=cuda_available)
        # Get Training dataset
        ds_train = imed_loader.load_dataset(**{
            **loader_params,
            **{
                'data_list': train_lst,
                'transforms_params': transform_train_params,
                'dataset_type': 'training'
            }
        },
                                            device=device,
                                            cuda_available=cuda_available)

        metric_fns = imed_metrics.get_metric_fns(ds_train.task)

        # If FiLM, normalize data
        if 'film_layers' in model_params and any(model_params['film_layers']):
            # Normalize metadata before sending to the FiLM network
            results = imed_film.get_film_metadata_models(
                ds_train=ds_train,
                metadata_type=model_params['metadata'],
                debugging=context["debugging"])
            ds_train, train_onehotencoder, metadata_clustering_models = results
            ds_valid = imed_film.normalize_metadata(
                ds_valid, metadata_clustering_models, context["debugging"],
                model_params['metadata'])
            model_params.update({
                "film_onehotencoder":
                train_onehotencoder,
                "n_metadata":
                len([ll for l in train_onehotencoder.categories_ for ll in l])
            })
            joblib.dump(metadata_clustering_models,
                        "./" + log_directory + "/clustering_models.joblib")
            joblib.dump(train_onehotencoder,
                        "./" + log_directory + "/one_hot_encoder.joblib")

        # Model directory
        path_model = os.path.join(log_directory, context["model_name"])
        if not os.path.isdir(path_model):
            print('Creating model directory: {}'.format(path_model))
            os.makedirs(path_model)
            if 'film_layers' in model_params and any(
                    model_params['film_layers']):
                joblib.dump(train_onehotencoder,
                            os.path.join(path_model, "one_hot_encoder.joblib"))
                if 'metadata_dict' in ds_train[0]['input_metadata'][0]:
                    metadata_dict = ds_train[0]['input_metadata'][0][
                        'metadata_dict']
                    joblib.dump(
                        metadata_dict,
                        os.path.join(path_model, "metadata_dict.joblib"))

        else:
            print('Model directory already exists: {}'.format(path_model))

        # RUN TRAINING
        best_training_dice, best_training_loss, best_validation_dice, best_validation_loss = imed_training.train(
            model_params=model_params,
            dataset_train=ds_train,
            dataset_val=ds_valid,
            training_params=context["training_parameters"],
            log_directory=log_directory,
            device=device,
            cuda_available=cuda_available,
            metric_fns=metric_fns,
            n_gif=n_gif,
            resume_training=resume_training,
            debugging=context["debugging"])

    if thr_increment:
        # LOAD DATASET
        if command != 'train':  # If command == train, then ds_valid already load
            # Get Validation dataset
            ds_valid = imed_loader.load_dataset(**{
                **loader_params,
                **{
                    'data_list': valid_lst,
                    'transforms_params': transform_valid_params,
                    'dataset_type': 'validation'
                }
            },
                                                device=device,
                                                cuda_available=cuda_available)
        # Get Training dataset with no Data Augmentation
        ds_train = imed_loader.load_dataset(**{
            **loader_params,
            **{
                'data_list': train_lst,
                'transforms_params': transform_valid_params,
                'dataset_type': 'training'
            }
        },
                                            device=device,
                                            cuda_available=cuda_available)

        # Choice of optimisation metric
        metric = "recall_specificity" if model_params[
            "name"] in imed_utils.CLASSIFIER_LIST else "dice"
        # Model path
        model_path = os.path.join(log_directory, "best_model.pt")
        # Run analysis
        thr = imed_testing.threshold_analysis(model_path=model_path,
                                              ds_lst=[ds_train, ds_valid],
                                              model_params=model_params,
                                              testing_params=testing_params,
                                              metric=metric,
                                              increment=thr_increment,
                                              fname_out=os.path.join(
                                                  log_directory, "roc.png"),
                                              cuda_available=cuda_available)

        # Update threshold in config file
        context["postprocessing"]["binarize_prediction"] = {"thr": thr}

    if command == 'train':
        # Save config file within log_directory and log_directory/model_name
        # Done after the threshold_analysis to propate this info in the config files
        with open(os.path.join(log_directory, "config_file.json"), 'w') as fp:
            json.dump(context, fp, indent=4)
        with open(
                os.path.join(log_directory, context["model_name"],
                             context["model_name"] + ".json"), 'w') as fp:
            json.dump(context, fp, indent=4)

        return best_training_dice, best_training_loss, best_validation_dice, best_validation_loss

    if command == 'test':
        # LOAD DATASET
        ds_test = imed_loader.load_dataset(**{
            **loader_params,
            **{
                'data_list': test_lst,
                'transforms_params': transformation_dict,
                'dataset_type': 'testing',
                'requires_undo': True
            }
        },
                                           device=device,
                                           cuda_available=cuda_available)

        metric_fns = imed_metrics.get_metric_fns(ds_test.task)

        if 'film_layers' in model_params and any(model_params['film_layers']):
            clustering_path = os.path.join(log_directory,
                                           "clustering_models.joblib")
            metadata_clustering_models = joblib.load(clustering_path)
            ohe_path = os.path.join(log_directory, "one_hot_encoder.joblib")
            one_hot_encoder = joblib.load(ohe_path)
            ds_test = imed_film.normalize_metadata(ds_test,
                                                   metadata_clustering_models,
                                                   context["debugging"],
                                                   model_params['metadata'])
            model_params.update({
                "film_onehotencoder":
                one_hot_encoder,
                "n_metadata":
                len([ll for l in one_hot_encoder.categories_ for ll in l])
            })

        # RUN INFERENCE
        pred_metrics = imed_testing.test(
            model_params=model_params,
            dataset_test=ds_test,
            testing_params=testing_params,
            log_directory=log_directory,
            device=device,
            cuda_available=cuda_available,
            metric_fns=metric_fns,
            postprocessing=context['postprocessing'])

        # RUN EVALUATION
        df_results = imed_evaluation.evaluate(
            bids_path=loader_params['bids_path'],
            log_directory=log_directory,
            target_suffix=loader_params["target_suffix"],
            eval_params=context["evaluation_parameters"])
        return df_results, pred_metrics

    if command == 'segment':
        bids_ds = bids.BIDS(context["loader_parameters"]["bids_path"])
        df = bids_ds.participants.content
        subj_lst = df['participant_id'].tolist()
        bids_subjects = [
            s for s in bids_ds.get_subjects()
            if s.record["subject_id"] in subj_lst
        ]

        # Add postprocessing to packaged model
        path_model = os.path.join(context['log_directory'],
                                  context['model_name'])
        path_model_config = os.path.join(path_model,
                                         context['model_name'] + ".json")
        model_config = imed_config_manager.load_json(path_model_config)
        model_config['postprocessing'] = context['postprocessing']
        with open(path_model_config, 'w') as fp:
            json.dump(model_config, fp, indent=4)

        options = None
        for subject in bids_subjects:
            fname_img = subject.record["absolute_path"]
            if 'film_layers' in model_params and any(
                    model_params['film_layers']) and model_params['metadata']:
                subj_id = subject.record['subject_id']
                metadata = df[df['participant_id'] == subj_id][
                    model_params['metadata']].values[0]
                options = {'metadata': metadata}
            pred = imed_inference.segment_volume(path_model,
                                                 fname_image=fname_img,
                                                 gpu_number=context['gpu'],
                                                 options=options)
            pred_path = os.path.join(context['log_directory'], "pred_masks")
            if not os.path.exists(pred_path):
                os.makedirs(pred_path)
            filename = subject.record['subject_id'] + "_" + subject.record[
                'modality'] + "_pred" + ".nii.gz"
            nib.save(pred, os.path.join(pred_path, filename))
コード例 #7
0
def main(argv=None):
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    if (arguments.list_tasks is False
            and arguments.install_task is None
            and (arguments.i is None or arguments.task is None)):
        parser.error("You must specify either '-list-tasks', '-install-task', or both '-i' + '-task'.")

    # Deal with task
    if arguments.list_tasks:
        deepseg.models.display_list_tasks()

    if arguments.install_task is not None:
        for name_model in deepseg.models.TASKS[arguments.install_task]['models']:
            deepseg.models.install_model(name_model)
        exit(0)

    # Deal with input/output
    for file in arguments.i:
        if not os.path.isfile(file):
            parser.error("This file does not exist: {}".format(file))

    # Verify if the task is part of the "official" tasks, or if it is pointing to paths containing custom models
    if len(arguments.task) == 1 and arguments.task[0] in deepseg.models.TASKS:
        # Check if all input images are provided
        required_contrasts = deepseg.models.get_required_contrasts(arguments.task[0])
        n_contrasts = len(required_contrasts)
        # Get pipeline model names
        name_models = deepseg.models.TASKS[arguments.task[0]]['models']
    else:
        n_contrasts = len(arguments.i)
        name_models = arguments.task

    if len(arguments.i) != n_contrasts:
        parser.error(
            "{} input files found. Please provide all required input files for the task {}, i.e. contrasts: {}."
            .format(len(arguments.i), arguments.task, ', '.join(required_contrasts)))

    # Check modality order
    if len(arguments.i) > 1 and arguments.c is None:
        parser.error(
            "Please specify the order in which you put the contrasts in the input images (-i) with flag -c, e.g., "
            "-c t1 t2")

    # Run pipeline by iterating through the models
    fname_prior = None
    output_filenames = None
    for name_model in name_models:
        # Check if this is an official model
        if name_model in list(deepseg.models.MODELS.keys()):
            # If it is, check if it is installed
            path_model = deepseg.models.folder(name_model)
            if not deepseg.models.is_valid(path_model):
                printv("Model {} is not installed. Installing it now...".format(name_model))
                deepseg.models.install_model(name_model)
        # If it is not, check if this is a path to a valid model
        else:
            path_model = os.path.abspath(name_model)
            if not deepseg.models.is_valid(path_model):
                parser.error("The input model is invalid: {}".format(path_model))

        # Order input images
        if arguments.c is not None:
            input_filenames = []
            for required_contrast in deepseg.models.MODELS[name_model]['contrasts']:
                for provided_contrast, input_filename in zip(arguments.c, arguments.i):
                    if required_contrast == provided_contrast:
                        input_filenames.append(input_filename)
        else:
            input_filenames = arguments.i

        # Call segment_nifti
        options = {**vars(arguments), "fname_prior": fname_prior}
        nii_lst, target_lst = imed_inference.segment_volume(path_model, input_filenames, options=options)

        # Delete intermediate outputs
        if fname_prior and os.path.isfile(fname_prior) and arguments.r:
            logger.info("Remove temporary files...")
            os.remove(fname_prior)

        output_filenames = []
        # Save output seg
        for nii_seg, target in zip(nii_lst, target_lst):
            if 'o' in options and options['o'] is not None:
                # To support if the user adds the extension or not
                extension = ".nii.gz" if ".nii.gz" in options['o'] else ".nii" if ".nii" in options['o'] else ""
                if extension == "":
                    fname_seg = options['o'] + target if len(target_lst) > 1 else options['o']
                else:
                    fname_seg = options['o'].replace(extension, target + extension) if len(target_lst) > 1 \
                        else options['o']
            else:
                fname_seg = ''.join([sct.image.splitext(input_filenames[0])[0], target + '.nii.gz'])

            # If output folder does not exist, create it
            path_out = os.path.dirname(fname_seg)
            if not (path_out == '' or os.path.exists(path_out)):
                os.makedirs(path_out)

            nib.save(nii_seg, fname_seg)
            output_filenames.append(fname_seg)

        # Use the result of the current model as additional input of the next model
        fname_prior = fname_seg

    for output_filename in output_filenames:
        display_viewer_syntax([arguments.i[0], output_filename], colormaps=['gray', 'red'], opacities=['', '0.7'])
コード例 #8
0
ファイル: main.py プロジェクト: cakester/ivadomed
def run_segment_command(context, model_params):
    bids_ds = []
    path_data = imed_utils.format_path_data(
        context["loader_parameters"]["path_data"])
    for bids_folder in path_data:
        bids_ds.append(bids.BIDS(bids_folder))

    # Get the merged df from all dataset paths
    df = imed_loader_utils.merge_bids_datasets(path_data)
    subj_lst = df['participant_id'].tolist()

    # Append subjects from all BIDSdatasets into a list
    bids_subjects = []
    for i_bids_folder in range(0, len(path_data)):
        bids_subjects += [
            s for s in bids_ds[i_bids_folder].get_subjects()
            if s.record["subject_id"] in subj_lst
        ]

    # Add postprocessing to packaged model
    path_model = os.path.join(context['path_output'], context['model_name'])
    path_model_config = os.path.join(path_model,
                                     context['model_name'] + ".json")
    model_config = imed_config_manager.load_json(path_model_config)
    model_config['postprocessing'] = context['postprocessing']
    with open(path_model_config, 'w') as fp:
        json.dump(model_config, fp, indent=4)

    options = None
    for subject in bids_subjects:
        if context['loader_parameters']['multichannel']:
            fname_img = []
            provided_contrasts = []
            contrasts = context['loader_parameters']['contrast_params'][
                'testing']
            # Keep contrast order
            for c in contrasts:
                for s in bids_subjects:
                    if subject.record['subject_id'] == s.record[
                            'subject_id'] and s.record['modality'] == c:
                        provided_contrasts.append(c)
                        fname_img.append(s.record['absolute_path'])
                        bids_subjects.remove(s)
            if len(fname_img) != len(contrasts):
                logger.warning(
                    "Missing contrast for subject {}. {} were provided but {} are required. Skipping "
                    "subject.".format(subject.record['subject_id'],
                                      provided_contrasts, contrasts))
                continue
        else:
            fname_img = [subject.record['absolute_path']]

        if 'film_layers' in model_params and any(
                model_params['film_layers']) and model_params['metadata']:
            subj_id = subject.record['subject_id']
            metadata = df[df['participant_id'] == subj_id][
                model_params['metadata']].values[0]
            options = {'metadata': metadata}
        pred_list, target_list = imed_inference.segment_volume(
            path_model,
            fname_images=fname_img,
            gpu_id=context['gpu_ids'][0],
            options=options)
        pred_path = os.path.join(context['path_output'], "pred_masks")
        if not os.path.exists(pred_path):
            os.makedirs(pred_path)

        for pred, target in zip(pred_list, target_list):
            filename = subject.record['subject_id'] + "_" + subject.record['modality'] + target + "_pred" + \
                        ".nii.gz"
            nib.save(pred, os.path.join(pred_path, filename))
コード例 #9
0
def test_segment_volume_3d(download_functional_test_files, center_crop):
    model = imed_models.Modified3DUNet(in_channel=1,
                                       out_channel=1,
                                       base_n_filter=1)

    if not os.path.exists(PATH_MODEL):
        os.mkdir(PATH_MODEL)

    torch.save(model, os.path.join(PATH_MODEL, "model_test.pt"))
    config = {
        "Modified3DUNet": {
            "applied": True,
            "length_3D": LENGTH_3D,
            "stride_3D": LENGTH_3D,
            "attention": False
        },
        "loader_parameters": {
            "slice_filter_params": {
                "filter_empty_mask": False,
                "filter_empty_input": False
            },
            "roi_params": {
                "suffix": None,
                "slice_filter_roi": None
            },
            "slice_axis": "sagittal"
        },
        "transformation": {
            "Resample": {
                "wspace": 1,
                "hspace": 1,
                "dspace": 2
            },
            "CenterCrop": {
                "size": center_crop
            },
            "RandomTranslation": {
                "translate": [0.03, 0.03],
                "applied_to": ["im", "gt"],
                "dataset_type": ["training"]
            },
            "NumpyToTensor": {},
            "NormalizeInstance": {
                "applied_to": ["im"]
            }
        },
        "postprocessing": {},
        "training_parameters": {
            "batch_size": BATCH_SIZE
        }
    }

    PATH_CONFIG = os.path.join(PATH_MODEL, 'model_test.json')
    with open(PATH_CONFIG, 'w') as fp:
        json.dump(config, fp)

    nib_lst, _ = imed_inference.segment_volume(PATH_MODEL, [IMAGE_PATH])
    nib_img = nib_lst[0]
    assert np.squeeze(nib_img.get_fdata()).shape == nib.load(IMAGE_PATH).shape
    assert (nib_img.dataobj.max() <= 1.0) and (nib_img.dataobj.min() >= 0.0)
    assert nib_img.dataobj.dtype == 'float32'

    shutil.rmtree(PATH_MODEL)
コード例 #10
0
def run_segment_command(context, model_params):
    # BIDSDataframe of all image files
    # Indexing of derivatives is False for command segment
    bids_df = imed_loader_utils.BidsDataframe(context['loader_parameters'],
                                              context['path_output'],
                                              derivatives=False)

    # Append subjects filenames into a list
    bids_subjects = sorted(bids_df.df['filename'].to_list())

    # Add postprocessing to packaged model
    path_model = os.path.join(context['path_output'], context['model_name'])
    path_model_config = os.path.join(path_model,
                                     context['model_name'] + ".json")
    model_config = imed_config_manager.load_json(path_model_config)
    model_config['postprocessing'] = context['postprocessing']
    with open(path_model_config, 'w') as fp:
        json.dump(model_config, fp, indent=4)
    options = None

    # Initialize a list of already seen subject ids for multichannel
    seen_subj_ids = []

    for subject in bids_subjects:
        if context['loader_parameters']['multichannel']:
            # Get subject_id for multichannel
            df_sub = bids_df.df.loc[bids_df.df['filename'] == subject]
            subj_id = re.sub(r'_' + df_sub['suffix'].values[0] + '.*', '',
                             subject)
            if subj_id not in seen_subj_ids:
                # if subj_id has not been seen yet
                fname_img = []
                provided_contrasts = []
                contrasts = context['loader_parameters']['contrast_params'][
                    'testing']
                # Keep contrast order
                for c in contrasts:
                    df_tmp = bids_df.df[
                        bids_df.df['filename'].str.contains(subj_id)
                        & bids_df.df['suffix'].str.contains(c)]
                    if ~df_tmp.empty:
                        provided_contrasts.append(c)
                        fname_img.append(df_tmp['path'].values[0])
                seen_subj_ids.append(subj_id)
                if len(fname_img) != len(contrasts):
                    logger.warning(
                        "Missing contrast for subject {}. {} were provided but {} are required. Skipping "
                        "subject.".format(subj_id, provided_contrasts,
                                          contrasts))
                    continue
            else:
                # Returns an empty list for subj_id already seen
                fname_img = []
        else:
            fname_img = bids_df.df[bids_df.df['filename'] ==
                                   subject]['path'].to_list()

        if 'film_layers' in model_params and any(
                model_params['film_layers']) and model_params['metadata']:
            metadata = bids_df.df[bids_df.df['filename'] == subject][
                model_params['metadata']].values[0]
            options = {'metadata': metadata}

        if fname_img:
            pred_list, target_list = imed_inference.segment_volume(
                path_model,
                fname_images=fname_img,
                gpu_id=context['gpu_ids'][0],
                options=options)
            pred_path = os.path.join(context['path_output'], "pred_masks")
            if not os.path.exists(pred_path):
                os.makedirs(pred_path)

            for pred, target in zip(pred_list, target_list):
                filename = subject.split('.')[0] + target + "_pred" + \
                           ".nii.gz"
                nib.save(pred, os.path.join(pred_path, filename))