Example #1
0
    def get_pair_metadata(self, slice_index=0, coord=None):
        """Return dictionary containing input and gt metadata.

        Args:
            slice_index (int): Index of 2D slice if 2D model is used, else 0.
            coord (tuple or list): Coordinates of subvolume in volume if 3D model is used, else None.

        Returns:
            dict: Input and gt metadata.
        """
        gt_meta_dict = []
        for gt in self.gt_handle:
            if gt is not None:
                gt_meta_dict.append(
                    imed_loader_utils.SampleMetadata({
                        "zooms":
                        imed_loader_utils.orient_shapes_hwd(
                            gt.header.get_zooms(), self.slice_axis),
                        "data_shape":
                        imed_loader_utils.orient_shapes_hwd(
                            gt.header.get_data_shape(), self.slice_axis),
                        "gt_filenames":
                        self.metadata[0]["gt_filenames"],
                        "bounding_box":
                        self.metadata[0]["bounding_box"]
                        if 'bounding_box' in self.metadata[0] else None,
                        "data_type":
                        'gt',
                        "crop_params": {}
                    }))
            else:
                # Temporarily append null metadata to null gt
                gt_meta_dict.append(None)

        # Replace null metadata with metadata from other existing classes of the same subject
        for idx, gt_metadata in enumerate(gt_meta_dict):
            if gt_metadata is None:
                gt_meta_dict[idx] = list(filter(None, gt_meta_dict))[0]

        input_meta_dict = []
        for handle in self.input_handle:
            input_meta_dict.append(
                imed_loader_utils.SampleMetadata({
                    "zooms":
                    imed_loader_utils.orient_shapes_hwd(
                        handle.header.get_zooms(), self.slice_axis),
                    "data_shape":
                    imed_loader_utils.orient_shapes_hwd(
                        handle.header.get_data_shape(), self.slice_axis),
                    "data_type":
                    'im',
                    "crop_params": {}
                }))

        dreturn = {
            "input_metadata": input_meta_dict,
            "gt_metadata": gt_meta_dict,
        }

        for idx, metadata in enumerate(self.metadata):  # loop across channels
            metadata["slice_index"] = slice_index
            metadata["coord"] = coord
            self.metadata[idx] = metadata
            for metadata_key in metadata.keys():  # loop across input metadata
                dreturn["input_metadata"][idx][metadata_key] = metadata[
                    metadata_key]

        return dreturn
Example #2
0
    def __getitem__(self, index):
        """Get samples.

        Warning: For now, this method only supports one gt / roi.

        Args:
            index (int): Sample index.

        Returns:
            dict: Dictionary containing image and label tensors as well as metadata.
        """
        line = self.dataframe.iloc[index]
        # For HeMIS strategy. Otherwise the values of the matrix dont change anything.
        missing_modalities = self.cst_matrix[index]

        input_metadata = []
        input_tensors = []

        # Inputs
        with h5py.File(self.path_hdf5, "r") as f:
            for i, ct in enumerate(self.cst_lst):
                if self.status[ct]:
                    input_tensor = line[ct] * missing_modalities[i]
                else:
                    input_tensor = f[line[ct]][
                        line['Slices']] * missing_modalities[i]

                input_tensors.append(input_tensor)
                # input Metadata
                metadata = imed_loader_utils.SampleMetadata({
                    key: value
                    for key, value in f['{}/inputs/{}'.format(
                        line['Subjects'], ct)].attrs.items()
                })
                metadata['slice_index'] = line["Slices"]
                metadata['missing_mod'] = missing_modalities
                metadata['crop_params'] = {}
                input_metadata.append(metadata)

            # GT
            gt_img = []
            gt_metadata = []
            for idx, gt in enumerate(self.gt_lst):
                if self.status['gt/' + gt]:
                    gt_data = line['gt/' + gt]
                else:
                    gt_data = f[line['gt/' + gt]][line['Slices']]

                gt_data = gt_data.astype(np.uint8)
                gt_img.append(gt_data)
                gt_metadata.append(
                    imed_loader_utils.SampleMetadata({
                        key: value
                        for key, value in f[line['gt/' + gt]].attrs.items()
                    }))
                gt_metadata[idx]['crop_params'] = {}

            # ROI
            roi_img = []
            roi_metadata = []
            if self.roi_lst:
                if self.status['roi/' + self.roi_lst[0]]:
                    roi_data = line['roi/' + self.roi_lst[0]]
                else:
                    roi_data = f[line['roi/' +
                                      self.roi_lst[0]]][line['Slices']]

                roi_data = roi_data.astype(np.uint8)
                roi_img.append(roi_data)

                roi_metadata.append(
                    imed_loader_utils.SampleMetadata({
                        key: value
                        for key, value in f[line[
                            'roi/' + self.roi_lst[0]]].attrs.items()
                    }))
                roi_metadata[0]['crop_params'] = {}

            # Run transforms on ROI
            # ROI goes first because params of ROICrop are needed for the followings
            stack_roi, metadata_roi = self.transform(sample=roi_img,
                                                     metadata=roi_metadata,
                                                     data_type="roi")
            # Update metadata_input with metadata_roi
            metadata_input = imed_loader_utils.update_metadata(
                metadata_roi, input_metadata)

            # Run transforms on images
            stack_input, metadata_input = self.transform(
                sample=input_tensors, metadata=metadata_input, data_type="im")
            # Update metadata_input with metadata_roi
            metadata_gt = imed_loader_utils.update_metadata(
                metadata_input, gt_metadata)

            # Run transforms on images
            stack_gt, metadata_gt = self.transform(sample=gt_img,
                                                   metadata=metadata_gt,
                                                   data_type="gt")
            data_dict = {
                'input': stack_input,
                'gt': stack_gt,
                'roi': stack_roi,
                'input_metadata': metadata_input,
                'gt_metadata': metadata_gt,
                'roi_metadata': metadata_roi
            }

            return data_dict
Example #3
0
def run_visualization(input, config, number, output, roi):
    """Utility function to visualize Data Augmentation transformations.

    Data augmentation is a key part of the Deep Learning training scheme. This script aims at facilitating the
    fine-tuning of data augmentation parameters. To do so, this script provides a step-by-step visualization of the
    transformations that are applied on data.

    This function applies a series of transformations (defined in a configuration file
    ``-c``) to ``-n`` 2D slices randomly extracted from an input image (``-i``), and save as png the resulting sample
    after each transform.

    For example::

        ivadomed_visualize_transforms -i t2s.nii.gz -n 1 -c config.json -r t2s_seg.nii.gz

    Provides a visualization of a series of three transformation on a randomly selected slice:

    .. image:: ../../images/transforms_im.png
        :width: 600px
        :align: center

    And on a binary mask::

        ivadomed_visualize_transforms -i t2s_gmseg.nii.gz -n 1 -c config.json -r t2s_seg.nii.gz

    Gives:

    .. image:: ../../images/transforms_gt.png
        :width: 600px
        :align: center

    Args:
         input (string): Image filename. Flag: ``--input``, ``-i``
         config (string): Configuration file filename. Flag: ``--config``, ``-c``
         number (int): Number of slices randomly extracted. Flag: ``--number``, ``-n``
         output (string): Folder path where the results are saved. Flag: ``--ofolder``, ``-o``
         roi (string): Filename of the region of interest. Only needed if ROICrop is part of the transformations.
                       Flag: ``--roi``, ``-r``
    """
    # Load context
    with open(config, "r") as fhandle:
        context = json.load(fhandle)
    # Create output folder
    if not os.path.isdir(output):
        os.makedirs(output)

    # Slice extracted according to below axis
    axis = imed_utils.AXIS_DCT[context["loader_parameters"]["slice_axis"]]
    # Get data
    input_img, input_data = get_data(input, axis)
    # Image or Mask
    is_mask = np.array_equal(input_data, input_data.astype(bool))
    # Get zooms
    zooms = imed_loader_utils.orient_shapes_hwd(input_img.header.get_zooms(),
                                                slice_axis=axis)
    # Get indexes
    indexes = random.sample(range(0, input_data.shape[2]), number)

    # Get training transforms
    training_transforms, _, _ = imed_transforms.get_subdatasets_transforms(
        context["transformation"])

    if "ROICrop" in training_transforms:
        if roi and os.path.isfile(roi):
            roi_img, roi_data = get_data(roi, axis)
        else:
            print(
                "\nPlease provide ROI image (-r) in order to apply ROICrop transformation."
            )
            exit()

    # Compose transforms
    dict_transforms = {}
    stg_transforms = ""
    for transform_name in training_transforms:
        # We skip NumpyToTensor transform since that s only a change of data type
        if transform_name == "NumpyToTensor":
            continue

        # Update stg_transforms
        stg_transforms += transform_name + "_"

        # Add new transform to Compose
        dict_transforms.update(
            {transform_name: training_transforms[transform_name]})
        composed_transforms = imed_transforms.Compose(dict_transforms)

        # Loop across slices
        for i in indexes:
            data = [input_data[:, :, i]]
            # Init metadata
            metadata = imed_loader_utils.SampleMetadata({
                "zooms":
                zooms,
                "data_type":
                "gt" if is_mask else "im"
            })

            # Apply transformations to ROI
            if "CenterCrop" in training_transforms or (
                    "ROICrop" in training_transforms and os.path.isfile(roi)):
                metadata.__setitem__('crop_params', {})

            # Apply transformations to image
            stack_im, _ = composed_transforms(
                sample=data,
                metadata=[metadata for _ in range(number)],
                data_type="im")

            # Plot before / after transformation
            fname_out = os.path.join(
                output, stg_transforms + "slice" + str(i) + ".png")
            print("Fname out: {}.".format(fname_out))
            print("\t{}".format(dict(metadata)))
            # rescale intensities
            if len(stg_transforms[:-1].split("_")) == 1:
                before = np.rot90(
                    imed_maths.rescale_values_array(data[0], 0.0, 1.0))
            else:
                before = after
            after = np.rot90(
                imed_maths.rescale_values_array(stack_im[0], 0.0, 1.0))
            # Plot
            imed_utils.plot_transformed_sample(
                before,
                after,
                list_title=[
                    "\n".join(stg_transforms[:-1].split("_")[:-1]),
                    "\n".join(stg_transforms[:-1].split("_"))
                ],
                fname_out=fname_out,
                cmap="jet" if is_mask else "gray")