示例#1
0
def _test_Crop(im_seg, crop_transform):
    im, seg = im_seg
    metadata_ = SampleMetadata({'data_shape': im[0].shape, 'crop_params': {}})
    metadata_in = [metadata_ for _ in im] if isinstance(im, list) else {}
    if crop_transform.__class__.__name__ == "ROICrop":
        _, metadata_in = crop_transform(seg, metadata_in)
        for metadata in metadata_in:
            assert crop_transform.__class__.__name__ in metadata["crop_params"]

    # Apply transform
    do_im, do_metadata = crop_transform(im, metadata_in)
    do_seg, do_seg_metadata = crop_transform(seg, metadata_in)
    crop_transfrom_size = crop_transform.size if not len(
        do_im[0].shape) == 2 else crop_transform.size[:2]

    # Loop and check
    for idx, i in enumerate(im):
        # Check data shape
        assert list(do_im[idx].shape) == crop_transfrom_size
        assert list(do_seg[idx].shape) == crop_transfrom_size
        # Check metadata
        assert do_metadata[idx]['crop_params'][crop_transform.__class__.__name__] == \
               do_seg_metadata[idx]['crop_params'][crop_transform.__class__.__name__]

    # Apply undo transform
    undo_im, _ = crop_transform.undo_transform(do_im, do_metadata)
    undo_seg, _ = crop_transform.undo_transform(do_seg, do_seg_metadata)

    # Check data type and shape
    _check_dtype(im, [undo_im])
    _check_shape(im, [undo_im])
    _check_dtype(seg, [undo_seg])
    _check_shape(seg, [undo_seg])

    # Loop and check
    for idx, i in enumerate(im):
        # Check data consistency
        fh, fw, fd, _, _, _ = do_metadata[idx]['crop_params'][
            crop_transform.__class__.__name__]
        th, tw, td = crop_transform.size
        if not td:
            assert np.array_equal(i[fh:fh + th, fw:fw + tw],
                                  undo_im[idx][fh:fh + th, fw:fw + tw])
            assert np.array_equal(seg[idx][fh:fh + th, fw:fw + tw],
                                  undo_seg[idx][fh:fh + th, fw:fw + tw])
            # Plot for debugging
            if DEBUGGING:
                plot_transformed_sample(seg[idx], undo_seg[idx],
                                        ['raw', 'undo'])
                plot_transformed_sample(i, undo_im[idx], ['raw', 'undo'])
        else:
            assert np.array_equal(
                i[fh:fh + th, fw:fw + tw, fd:fd + td],
                undo_im[idx][fh:fh + th, fw:fw + tw, fd:fd + td])
            assert np.array_equal(
                seg[idx][fh:fh + th, fw:fw + tw, fd:fd + td],
                undo_seg[idx][fh:fh + th, fw:fw + tw, fd:fd + td])
def test_Clahe(im_seg, clahe):
    im, seg = im_seg
    metadata_in = [SampleMetadata({}) for _ in im] if isinstance(im, list) else SampleMetadata({})

    # Transform on Numpy
    do_im, metadata_do = clahe(im.copy(), metadata_in)

    _check_dtype(im, [do_im])
    _check_shape(im, [do_im])

    if DEBUGGING and len(im[0].shape) == 2:
        plot_transformed_sample(im[0], do_im[0], ['raw', 'do'])
示例#3
0
def _test_Resample(im_seg, resample_transform, native_resolution, is_2D=False):
    im, seg = im_seg
    metadata_ = SampleMetadata({
        'zooms':
        native_resolution,
        'data_shape':
        im[0].shape if len(im[0].shape) == 3 else list(im[0].shape) + [1],
        'data_type':
        'im'
    })
    metadata_in = [metadata_
                   for _ in im] if isinstance(im, list) else SampleMetadata({})

    # Resample input data
    do_im, do_metadata = resample_transform(sample=im, metadata=metadata_in)
    # Undo Resample on input data
    undo_im, _ = resample_transform.undo_transform(sample=do_im,
                                                   metadata=do_metadata)

    # Resampler for label data
    resample_transform.interpolation_order = 0
    metadata_ = SampleMetadata({
        'zooms':
        native_resolution,
        'data_shape':
        seg[0].shape if len(seg[0].shape) == 3 else list(seg[0].shape) + [1],
        'data_type':
        'gt'
    })
    metadata_in = [metadata_ for _ in seg] if isinstance(
        seg, list) else SampleMetadata({})
    # Resample label data
    do_seg, do_metadata = resample_transform(sample=seg, metadata=metadata_in)
    # Undo Resample on label data
    undo_seg, _ = resample_transform.undo_transform(sample=do_seg,
                                                    metadata=do_metadata)

    # Check data type and shape
    _check_dtype(im, [undo_im])
    _check_shape(im, [undo_im])
    _check_dtype(seg, [undo_seg])
    _check_shape(seg, [undo_seg])

    # Check data content and data shape between input data and undo
    for idx, i in enumerate(im):
        # Plot for debugging
        if DEBUGGING and is_2D:
            plot_transformed_sample(im[idx], undo_im[idx], ['raw', 'undo'])
            plot_transformed_sample(seg[idx], undo_seg[idx], ['raw', 'undo'])
        # Data consistency
        assert dice_score(undo_seg[idx], seg[idx]) > 0.8
示例#4
0
def test_RandomAffine(im_seg, transform):
    im, seg = im_seg
    metadata_in = [SampleMetadata({})
                   for _ in im] if isinstance(im, list) else SampleMetadata({})

    # Transform on Numpy
    do_im, metadata_do = transform(im.copy(), metadata_in)
    do_seg, metadata_do = transform(seg.copy(), metadata_do)

    if DEBUGGING and len(im[0].shape) == 2:
        plot_transformed_sample(im[0], do_im[0], ['raw', 'do'])
        plot_transformed_sample(seg[0], do_seg[0], ['raw', 'do'])

    # Transform on Numpy
    undo_im, _ = transform.undo_transform(do_im, metadata_do)
    undo_seg, _ = transform.undo_transform(do_seg, metadata_do)

    if DEBUGGING and len(im[0].shape) == 2:
        # TODO: ERROR for image but not for seg.....
        plot_transformed_sample(im[0], undo_im[0], ['raw', 'undo'])
        plot_transformed_sample(seg[0], undo_seg[0], ['raw', 'undo'])

    # Check data type and shape
    _check_dtype(im, [do_im, undo_im])
    _check_shape(im, [do_im, undo_im])
    _check_dtype(seg, [undo_seg, do_seg])
    _check_shape(seg, [undo_seg, do_seg])

    # Loop and check
    for idx, i in enumerate(im):
        # Data consistency
        assert dice_score(undo_seg[idx], seg[idx]) > 0.85
def test_ElasticTransform(im_seg, elastic_transform):
    im, seg = im_seg
    metadata_in = [SampleMetadata({}) for _ in im] if isinstance(im, list) else SampleMetadata({})

    # Transform on Numpy
    do_im, metadata_do = elastic_transform(im.copy(), metadata_in)
    do_seg, metadata_do = elastic_transform(seg.copy(), metadata_do)

    if DEBUGGING and len(im[0].shape) == 2:
        plot_transformed_sample(im[0], do_im[0], ['raw', 'do'])
        plot_transformed_sample(seg[0], do_seg[0], ['raw', 'do'])

    _check_dtype(im, [do_im])
    _check_shape(im, [do_im])
    _check_dtype(seg, [do_seg])
    _check_shape(seg, [do_seg])
def test_DilateGT(im_seg, dilate_transform):
    im, seg = im_seg
    metadata_in = [SampleMetadata({}) for _ in im] if isinstance(im, list) else SampleMetadata({})

    # Transform on Numpy
    do_seg, metadata_do = dilate_transform(seg.copy(), metadata_in)

    if DEBUGGING and len(im[0].shape) == 2:
        plot_transformed_sample(seg[0], do_seg[0], ['raw', 'do'])

    # Check data shape and type
    _check_shape(ref=seg, list_mov=[do_seg])

    # Check data augmentation
    for idx, i in enumerate(seg):
        # data aug
        assert np.sum((do_seg[idx] > 0).astype(int)) >= np.sum(i)
        # same number of objects
        assert label((do_seg[idx] > 0).astype(int))[1] == label(i)[1]
def test_RandomReverse(im_seg, reverse_transform):
    im, seg = im_seg
    metadata_in = [SampleMetadata({}) for _ in im] if isinstance(im, list) else SampleMetadata({})

    # Transform on Numpy
    do_im, metadata_do = reverse_transform(im.copy(), metadata_in)
    do_seg, metadata_do = reverse_transform(seg.copy(), metadata_do)

    # Transform on Numpy
    undo_im, _ = reverse_transform.undo_transform(do_im, metadata_do)
    undo_seg, _ = reverse_transform.undo_transform(do_seg, metadata_do)

    if DEBUGGING and len(im[0].shape) == 2:
        plot_transformed_sample(seg[0], do_seg[0], ['raw', 'do'])
        plot_transformed_sample(seg[0], undo_seg[0], ['raw', 'undo'])

    _check_dtype(im, [do_im])
    _check_shape(im, [do_im])
    _check_dtype(seg, [do_seg])
    _check_shape(seg, [do_seg])
def run_visualization(input, config, number, output, roi):
    """Utility function to visualize Data Augmentation transformations.

    Data augmentation is a key part of the Deep Learning training scheme. This script aims at facilitating the
    fine-tuning of data augmentation parameters. To do so, this script provides a step-by-step visualization of the
    transformations that are applied on data.

    This function applies a series of transformations (defined in a configuration file
    ``-c``) to ``-n`` 2D slices randomly extracted from an input image (``-i``), and save as png the resulting sample
    after each transform.

    For example::

        ivadomed_visualize_transforms -i t2s.nii.gz -n 1 -c config.json -r t2s_seg.nii.gz

    Provides a visualization of a series of three transformation on a randomly selected slice:

    .. image:: https://raw.githubusercontent.com/ivadomed/doc-figures/main/scripts/transforms_im.png
        :width: 600px
        :align: center

    And on a binary mask::

        ivadomed_visualize_transforms -i t2s_gmseg.nii.gz -n 1 -c config.json -r t2s_seg.nii.gz

    Gives:

    .. image:: https://raw.githubusercontent.com/ivadomed/doc-figures/main/scripts/transforms_gt.png
        :width: 600px
        :align: center

    Args:
         input (string): Image filename. Flag: ``--input``, ``-i``
         config (string): Configuration file filename. Flag: ``--config``, ``-c``
         number (int): Number of slices randomly extracted. Flag: ``--number``, ``-n``
         output (string): Folder path where the results are saved. Flag: ``--ofolder``, ``-o``
         roi (string): Filename of the region of interest. Only needed if ROICrop is part of the transformations.
                       Flag: ``--roi``, ``-r``
    """
    # Load context
    context = imed_config_manager.ConfigurationManager(config).get_config()

    # Create output folder
    if not Path(output).is_dir():
        Path(output).mkdir(parents=True)

    # Slice extracted according to below axis
    axis = imed_utils.AXIS_DCT[context[ConfigKW.LOADER_PARAMETERS][LoaderParamsKW.SLICE_AXIS]]
    # Get data
    input_img, input_data = get_data(input, axis)
    # Image or Mask
    is_mask = np.array_equal(input_data, input_data.astype(bool))
    # Get zooms
    zooms = imed_loader_utils.orient_shapes_hwd(input_img.header.get_zooms(), slice_axis=axis)
    # Get indexes
    indexes = random.sample(range(0, input_data.shape[2]), number)

    # Get training transforms
    training_transforms, _, _ = imed_transforms.get_subdatasets_transforms(context[ConfigKW.TRANSFORMATION])

    if TransformationKW.ROICROP in training_transforms:
        if roi and Path(roi).is_file():
            roi_img, roi_data = get_data(roi, axis)
        else:
            raise ValueError("\nPlease provide ROI image (-r) in order to apply ROICrop transformation.")

    # Compose transforms
    dict_transforms = {}
    stg_transforms = ""
    for transform_name in training_transforms:
        # We skip NumpyToTensor transform since that s only a change of data type
        if transform_name == "NumpyToTensor":
            continue

        # Update stg_transforms
        stg_transforms += transform_name + "_"

        # Add new transform to Compose
        dict_transforms.update({transform_name: training_transforms[transform_name]})
        composed_transforms = imed_transforms.Compose(dict_transforms)

        # Loop across slices
        for i in indexes:
            data = [input_data[:, :, i]]
            # Init metadata
            metadata = SampleMetadata({MetadataKW.ZOOMS: zooms, MetadataKW.DATA_TYPE: "gt" if is_mask else "im"})

            # Apply transformations to ROI
            if TransformationKW.CENTERCROP in training_transforms or \
                    (TransformationKW.ROICROP in training_transforms and Path(roi).is_file()):
                metadata.__setitem__(MetadataKW.CROP_PARAMS, {})

            # Apply transformations to image
            stack_im, _ = composed_transforms(sample=data,
                                              metadata=[metadata for _ in range(number)],
                                              data_type="im")

            # Plot before / after transformation
            fname_out = str(Path(output, stg_transforms + "slice" + str(i) + ".png"))
            logger.debug(f"Fname out: {fname_out}.")
            logger.debug(f"\t{dict(metadata)}")
            # rescale intensities
            if len(stg_transforms[:-1].split("_")) == 1:
                before = np.rot90(imed_maths.rescale_values_array(data[0], 0.0, 1.0))
            else:
                before = after
            if isinstance(stack_im[0], torch.Tensor):
                after = np.rot90(imed_maths.rescale_values_array(stack_im[0].numpy(), 0.0, 1.0))
            else:
                after = np.rot90(imed_maths.rescale_values_array(stack_im[0], 0.0, 1.0))
            # Plot
            imed_utils.plot_transformed_sample(before,
                                               after,
                                               list_title=["\n".join(stg_transforms[:-1].split("_")[:-1]),
                                                           "\n".join(stg_transforms[:-1].split("_"))],
                                               fname_out=fname_out,
                                               cmap="jet" if is_mask else "gray")
示例#9
0
def run_visualization(input, config, number, output, roi):
    """Utility function to visualize Data Augmentation transformations.

    Data augmentation is a key part of the Deep Learning training scheme. This script aims at facilitating the
    fine-tuning of data augmentation parameters. To do so, this script provides a step-by-step visualization of the
    transformations that are applied on data.

    This function applies a series of transformations (defined in a configuration file
    ``-c``) to ``-n`` 2D slices randomly extracted from an input image (``-i``), and save as png the resulting sample
    after each transform.

    For example::

        ivadomed_visualize_transforms -i t2s.nii.gz -n 1 -c config.json -r t2s_seg.nii.gz

    Provides a visualization of a series of three transformation on a randomly selected slice:

    .. image:: ../../images/transforms_im.png
        :width: 600px
        :align: center

    And on a binary mask::

        ivadomed_visualize_transforms -i t2s_gmseg.nii.gz -n 1 -c config.json -r t2s_seg.nii.gz

    Gives:

    .. image:: ../../images/transforms_gt.png
        :width: 600px
        :align: center

    Args:
         input (string): Image filename. Flag: ``--input``, ``-i``
         config (string): Configuration file filename. Flag: ``--config``, ``-c``
         number (int): Number of slices randomly extracted. Flag: ``--number``, ``-n``
         output (string): Folder path where the results are saved. Flag: ``--ofolder``, ``-o``
         roi (string): Filename of the region of interest. Only needed if ROICrop is part of the transformations.
                       Flag: ``--roi``, ``-r``
    """
    # Load context
    with open(config, "r") as fhandle:
        context = json.load(fhandle)
    # Create output folder
    if not os.path.isdir(output):
        os.makedirs(output)

    # Slice extracted according to below axis
    axis = imed_utils.AXIS_DCT[context["loader_parameters"]["slice_axis"]]
    # Get data
    input_img, input_data = get_data(input, axis)
    # Image or Mask
    is_mask = np.array_equal(input_data, input_data.astype(bool))
    # Get zooms
    zooms = imed_loader_utils.orient_shapes_hwd(input_img.header.get_zooms(),
                                                slice_axis=axis)
    # Get indexes
    indexes = random.sample(range(0, input_data.shape[2]), number)

    # Get training transforms
    training_transforms, _, _ = imed_transforms.get_subdatasets_transforms(
        context["transformation"])

    if "ROICrop" in training_transforms:
        if roi and os.path.isfile(roi):
            roi_img, roi_data = get_data(roi, axis)
        else:
            print(
                "\nPlease provide ROI image (-r) in order to apply ROICrop transformation."
            )
            exit()

    # Compose transforms
    dict_transforms = {}
    stg_transforms = ""
    for transform_name in training_transforms:
        # We skip NumpyToTensor transform since that s only a change of data type
        if transform_name == "NumpyToTensor":
            continue

        # Update stg_transforms
        stg_transforms += transform_name + "_"

        # Add new transform to Compose
        dict_transforms.update(
            {transform_name: training_transforms[transform_name]})
        composed_transforms = imed_transforms.Compose(dict_transforms)

        # Loop across slices
        for i in indexes:
            data = [input_data[:, :, i]]
            # Init metadata
            metadata = imed_loader_utils.SampleMetadata({
                "zooms":
                zooms,
                "data_type":
                "gt" if is_mask else "im"
            })

            # Apply transformations to ROI
            if "CenterCrop" in training_transforms or (
                    "ROICrop" in training_transforms and os.path.isfile(roi)):
                metadata.__setitem__('crop_params', {})

            # Apply transformations to image
            stack_im, _ = composed_transforms(
                sample=data,
                metadata=[metadata for _ in range(number)],
                data_type="im")

            # Plot before / after transformation
            fname_out = os.path.join(
                output, stg_transforms + "slice" + str(i) + ".png")
            print("Fname out: {}.".format(fname_out))
            print("\t{}".format(dict(metadata)))
            # rescale intensities
            if len(stg_transforms[:-1].split("_")) == 1:
                before = np.rot90(
                    imed_maths.rescale_values_array(data[0], 0.0, 1.0))
            else:
                before = after
            after = np.rot90(
                imed_maths.rescale_values_array(stack_im[0], 0.0, 1.0))
            # Plot
            imed_utils.plot_transformed_sample(
                before,
                after,
                list_title=[
                    "\n".join(stg_transforms[:-1].split("_")[:-1]),
                    "\n".join(stg_transforms[:-1].split("_"))
                ],
                fname_out=fname_out,
                cmap="jet" if is_mask else "gray")