コード例 #1
0
def test_tensorboard_save():
    inp = torch.tensor(np.zeros((1, 1, 15, 15)))
    gt = torch.tensor(np.zeros((1, 1, 15, 15)))
    pred = torch.tensor(np.zeros((1, 1, 15, 15)))
    dpath = Path(__tmp_dir__, "test_tensorboard_save")
    dpath.mkdir(parents=True, exist_ok=True)
    writer = SummaryWriter(log_dir=str(dpath))
    imed_visualize.save_img(writer, 1, "Training", inp, pred, gt)
    writer.flush()

    summary_iterators = [EventAccumulator(str(dname)).Reload() for dname in dpath.iterdir()]
    for i in range(len(summary_iterators)):
        if summary_iterators[i].Tags()['images'] == ['Training/Input', 'Training/Predictions', 'Training/Ground Truth']:
            input_retrieve = np.array(Image.open(io.BytesIO(summary_iterators[i].Images('Training/Input')[0][2])))
            pred_retrieve = np.array(Image.open(io.BytesIO(summary_iterators[i].Images('Training/Predictions')[0][2])))
            gt_retrieve = np.array(Image.open(io.BytesIO(summary_iterators[i].Images('Training/Ground Truth')[0][2])))

    assert np.allclose(imed_math.rescale_values_array(input_retrieve[:, :, 0], 0, 1), inp[0, 0, :, :])
    assert np.allclose(imed_math.rescale_values_array(pred_retrieve[:, :, 0], 0, 1), pred[0, 0, :, :])
    assert np.allclose(imed_math.rescale_values_array(gt_retrieve[:, :, 0], 0, 1), gt[0, 0, :, :])
コード例 #2
0
def test_tensorboard_save():
    inp = torch.tensor(np.zeros((1, 1, 15, 15)))
    gt = torch.tensor(np.zeros((1, 1, 15, 15)))
    pred = torch.tensor(np.zeros((1, 1, 15, 15)))
    dpath = "test_tensorboard_save"
    os.makedirs(dpath)
    writer = SummaryWriter(log_dir=dpath)
    imed_utils.save_tensorboard_img(writer, 1, "Training", inp, pred, gt)
    writer.flush()

    summary_iterators = [
        EventAccumulator(os.path.join(dpath, dname)).Reload()
        for dname in os.listdir(dpath)
    ]
    for i in range(len(summary_iterators)):
        if summary_iterators[i].Tags()['images'] == [
                'Training/Input', 'Training/Predictions',
                'Training/Ground Truth'
        ]:
            input_retrieve = np.array(
                Image.open(
                    io.BytesIO(
                        summary_iterators[i].Images('Training/Input')[0][2])))
            pred_retrieve = np.array(
                Image.open(
                    io.BytesIO(summary_iterators[i].Images(
                        'Training/Predictions')[0][2])))
            gt_retrieve = np.array(
                Image.open(
                    io.BytesIO(summary_iterators[i].Images(
                        'Training/Ground Truth')[0][2])))

    assert np.allclose(
        imed_math.rescale_values_array(input_retrieve[:, :, 0], 0, 1),
        inp[0, 0, :, :])
    assert np.allclose(
        imed_math.rescale_values_array(pred_retrieve[:, :, 0], 0, 1),
        pred[0, 0, :, :])
    assert np.allclose(
        imed_math.rescale_values_array(gt_retrieve[:, :, 0], 0, 1),
        gt[0, 0, :, :])
コード例 #3
0
def run_visualization(input, config, number, output, roi):
    """Utility function to visualize Data Augmentation transformations.

    Data augmentation is a key part of the Deep Learning training scheme. This script aims at facilitating the
    fine-tuning of data augmentation parameters. To do so, this script provides a step-by-step visualization of the
    transformations that are applied on data.

    This function applies a series of transformations (defined in a configuration file
    ``-c``) to ``-n`` 2D slices randomly extracted from an input image (``-i``), and save as png the resulting sample
    after each transform.

    For example::

        ivadomed_visualize_transforms -i t2s.nii.gz -n 1 -c config.json -r t2s_seg.nii.gz

    Provides a visualization of a series of three transformation on a randomly selected slice:

    .. image:: https://raw.githubusercontent.com/ivadomed/doc-figures/main/scripts/transforms_im.png
        :width: 600px
        :align: center

    And on a binary mask::

        ivadomed_visualize_transforms -i t2s_gmseg.nii.gz -n 1 -c config.json -r t2s_seg.nii.gz

    Gives:

    .. image:: https://raw.githubusercontent.com/ivadomed/doc-figures/main/scripts/transforms_gt.png
        :width: 600px
        :align: center

    Args:
         input (string): Image filename. Flag: ``--input``, ``-i``
         config (string): Configuration file filename. Flag: ``--config``, ``-c``
         number (int): Number of slices randomly extracted. Flag: ``--number``, ``-n``
         output (string): Folder path where the results are saved. Flag: ``--ofolder``, ``-o``
         roi (string): Filename of the region of interest. Only needed if ROICrop is part of the transformations.
                       Flag: ``--roi``, ``-r``
    """
    # Load context
    context = imed_config_manager.ConfigurationManager(config).get_config()

    # Create output folder
    if not Path(output).is_dir():
        Path(output).mkdir(parents=True)

    # Slice extracted according to below axis
    axis = imed_utils.AXIS_DCT[context[ConfigKW.LOADER_PARAMETERS][LoaderParamsKW.SLICE_AXIS]]
    # Get data
    input_img, input_data = get_data(input, axis)
    # Image or Mask
    is_mask = np.array_equal(input_data, input_data.astype(bool))
    # Get zooms
    zooms = imed_loader_utils.orient_shapes_hwd(input_img.header.get_zooms(), slice_axis=axis)
    # Get indexes
    indexes = random.sample(range(0, input_data.shape[2]), number)

    # Get training transforms
    training_transforms, _, _ = imed_transforms.get_subdatasets_transforms(context[ConfigKW.TRANSFORMATION])

    if TransformationKW.ROICROP in training_transforms:
        if roi and Path(roi).is_file():
            roi_img, roi_data = get_data(roi, axis)
        else:
            raise ValueError("\nPlease provide ROI image (-r) in order to apply ROICrop transformation.")

    # Compose transforms
    dict_transforms = {}
    stg_transforms = ""
    for transform_name in training_transforms:
        # We skip NumpyToTensor transform since that s only a change of data type
        if transform_name == "NumpyToTensor":
            continue

        # Update stg_transforms
        stg_transforms += transform_name + "_"

        # Add new transform to Compose
        dict_transforms.update({transform_name: training_transforms[transform_name]})
        composed_transforms = imed_transforms.Compose(dict_transforms)

        # Loop across slices
        for i in indexes:
            data = [input_data[:, :, i]]
            # Init metadata
            metadata = SampleMetadata({MetadataKW.ZOOMS: zooms, MetadataKW.DATA_TYPE: "gt" if is_mask else "im"})

            # Apply transformations to ROI
            if TransformationKW.CENTERCROP in training_transforms or \
                    (TransformationKW.ROICROP in training_transforms and Path(roi).is_file()):
                metadata.__setitem__(MetadataKW.CROP_PARAMS, {})

            # Apply transformations to image
            stack_im, _ = composed_transforms(sample=data,
                                              metadata=[metadata for _ in range(number)],
                                              data_type="im")

            # Plot before / after transformation
            fname_out = str(Path(output, stg_transforms + "slice" + str(i) + ".png"))
            logger.debug(f"Fname out: {fname_out}.")
            logger.debug(f"\t{dict(metadata)}")
            # rescale intensities
            if len(stg_transforms[:-1].split("_")) == 1:
                before = np.rot90(imed_maths.rescale_values_array(data[0], 0.0, 1.0))
            else:
                before = after
            if isinstance(stack_im[0], torch.Tensor):
                after = np.rot90(imed_maths.rescale_values_array(stack_im[0].numpy(), 0.0, 1.0))
            else:
                after = np.rot90(imed_maths.rescale_values_array(stack_im[0], 0.0, 1.0))
            # Plot
            imed_utils.plot_transformed_sample(before,
                                               after,
                                               list_title=["\n".join(stg_transforms[:-1].split("_")[:-1]),
                                                           "\n".join(stg_transforms[:-1].split("_"))],
                                               fname_out=fname_out,
                                               cmap="jet" if is_mask else "gray")
コード例 #4
0
def run_visualization(input, config, number, output, roi):
    """Utility function to visualize Data Augmentation transformations.

    Data augmentation is a key part of the Deep Learning training scheme. This script aims at facilitating the
    fine-tuning of data augmentation parameters. To do so, this script provides a step-by-step visualization of the
    transformations that are applied on data.

    This function applies a series of transformations (defined in a configuration file
    ``-c``) to ``-n`` 2D slices randomly extracted from an input image (``-i``), and save as png the resulting sample
    after each transform.

    For example::

        ivadomed_visualize_transforms -i t2s.nii.gz -n 1 -c config.json -r t2s_seg.nii.gz

    Provides a visualization of a series of three transformation on a randomly selected slice:

    .. image:: ../../images/transforms_im.png
        :width: 600px
        :align: center

    And on a binary mask::

        ivadomed_visualize_transforms -i t2s_gmseg.nii.gz -n 1 -c config.json -r t2s_seg.nii.gz

    Gives:

    .. image:: ../../images/transforms_gt.png
        :width: 600px
        :align: center

    Args:
         input (string): Image filename. Flag: ``--input``, ``-i``
         config (string): Configuration file filename. Flag: ``--config``, ``-c``
         number (int): Number of slices randomly extracted. Flag: ``--number``, ``-n``
         output (string): Folder path where the results are saved. Flag: ``--ofolder``, ``-o``
         roi (string): Filename of the region of interest. Only needed if ROICrop is part of the transformations.
                       Flag: ``--roi``, ``-r``
    """
    # Load context
    with open(config, "r") as fhandle:
        context = json.load(fhandle)
    # Create output folder
    if not os.path.isdir(output):
        os.makedirs(output)

    # Slice extracted according to below axis
    axis = imed_utils.AXIS_DCT[context["loader_parameters"]["slice_axis"]]
    # Get data
    input_img, input_data = get_data(input, axis)
    # Image or Mask
    is_mask = np.array_equal(input_data, input_data.astype(bool))
    # Get zooms
    zooms = imed_loader_utils.orient_shapes_hwd(input_img.header.get_zooms(),
                                                slice_axis=axis)
    # Get indexes
    indexes = random.sample(range(0, input_data.shape[2]), number)

    # Get training transforms
    training_transforms, _, _ = imed_transforms.get_subdatasets_transforms(
        context["transformation"])

    if "ROICrop" in training_transforms:
        if roi and os.path.isfile(roi):
            roi_img, roi_data = get_data(roi, axis)
        else:
            print(
                "\nPlease provide ROI image (-r) in order to apply ROICrop transformation."
            )
            exit()

    # Compose transforms
    dict_transforms = {}
    stg_transforms = ""
    for transform_name in training_transforms:
        # We skip NumpyToTensor transform since that s only a change of data type
        if transform_name == "NumpyToTensor":
            continue

        # Update stg_transforms
        stg_transforms += transform_name + "_"

        # Add new transform to Compose
        dict_transforms.update(
            {transform_name: training_transforms[transform_name]})
        composed_transforms = imed_transforms.Compose(dict_transforms)

        # Loop across slices
        for i in indexes:
            data = [input_data[:, :, i]]
            # Init metadata
            metadata = imed_loader_utils.SampleMetadata({
                "zooms":
                zooms,
                "data_type":
                "gt" if is_mask else "im"
            })

            # Apply transformations to ROI
            if "CenterCrop" in training_transforms or (
                    "ROICrop" in training_transforms and os.path.isfile(roi)):
                metadata.__setitem__('crop_params', {})

            # Apply transformations to image
            stack_im, _ = composed_transforms(
                sample=data,
                metadata=[metadata for _ in range(number)],
                data_type="im")

            # Plot before / after transformation
            fname_out = os.path.join(
                output, stg_transforms + "slice" + str(i) + ".png")
            print("Fname out: {}.".format(fname_out))
            print("\t{}".format(dict(metadata)))
            # rescale intensities
            if len(stg_transforms[:-1].split("_")) == 1:
                before = np.rot90(
                    imed_maths.rescale_values_array(data[0], 0.0, 1.0))
            else:
                before = after
            after = np.rot90(
                imed_maths.rescale_values_array(stack_im[0], 0.0, 1.0))
            # Plot
            imed_utils.plot_transformed_sample(
                before,
                after,
                list_title=[
                    "\n".join(stg_transforms[:-1].split("_")[:-1]),
                    "\n".join(stg_transforms[:-1].split("_"))
                ],
                fname_out=fname_out,
                cmap="jet" if is_mask else "gray")
コード例 #5
0
ファイル: test_transforms.py プロジェクト: vs74/ivadomed
def create_test_image(width,
                      height,
                      depth=0,
                      num_contrasts=1,
                      noise_max=10.0,
                      num_objs=1,
                      rad_max=30,
                      num_seg_classes=1,
                      random_position=False):
    """Create test image.

    Create test image and its segmentation with a given number of objects, classes, and maximum radius.
    Compatible with both 2D (depth=0) and 3D images.

    Args:
        height (int): height image
        width (int): width image
        depth (int): depth image, if 0 then 2D images are returned
        num_contrasts (int): number of contrasts
        noise_max (float): noise from the uniform distribution [0,noise_max)
        num_objs (int): number of objects
        rad_max (int): maximum radius of objects
        num_seg_classes (int): number of classes
        random_position (bool): If false, the object is located at the center of the image. Otherwise, randomly located.

    Return:
        list, list: image and segmentation, list of num_contrasts elements of shape (height, width, depth).

    Adapted from: https://github.com/Project-MONAI/MONAI/blob/master/monai/data/synthetic.py#L17
    """
    assert num_contrasts >= 1

    depth_ = depth if depth >= 1 else 2 * rad_max + 1
    assert (height > 2 * rad_max) and (width > 2 * rad_max) and (depth_ >
                                                                 2 * rad_max)

    image = np.zeros((height, width, depth_))

    for i in range(num_objs):
        if random_position:
            x = np.random.randint(rad_max, height - rad_max)
            y = np.random.randint(rad_max, width - rad_max)
            z = np.random.randint(rad_max, depth_ - rad_max)
        else:
            x, y, z = np.rint(height / 2), np.rint(width / 2), np.rint(depth_ /
                                                                       2)
        rad = np.random.randint(5, rad_max)
        spy, spx, spz = np.ogrid[-x:height - x, -y:width - y, -z:depth_ - z]
        sphere = (spx * spx + spy * spy + spz * spz) <= rad * rad * rad

        if num_seg_classes > 1:
            image[sphere] = np.ceil(np.random.random() * num_seg_classes)
        else:
            image[sphere] = np.random.random() * 0.5 + 0.5

    seg = np.ceil(image).astype(np.int32)

    if depth == 0:
        _, _, z_slice = center_of_mass(seg.astype(np.int))
        z_slice = int(round(z_slice))
        seg = seg[:, :, z_slice]

    list_im, list_seg = [], []
    for _ in range(num_contrasts):
        norm = np.random.uniform(0,
                                 num_seg_classes * noise_max,
                                 size=image.shape)
        noisy_image = imed_maths.rescale_values_array(np.maximum(image, norm))

        if depth == 0:
            noisy_image = noisy_image[:, :, z_slice]

        list_im.append(noisy_image)
        list_seg.append(seg)

    return list_im, list_seg