예제 #1
0
def test_random_gamma(tensor_1channel_1slice: torch.Tensor) -> None:
    random.seed(0)
    transformed_1 = RandomGamma(scale=(0.3, 3))(tensor_1channel_1slice.clone())
    assert transformed_1.shape == tensor_1channel_1slice.shape

    tensor_img = torch.ones([2, 3, *image_size])
    transformed_2 = RandomGamma(scale=(0.3, 3))(tensor_img)
    # If you run on 1 channel, 1 Z dimension the gamma transform applied should be the same for all slices.
    assert transformed_2.shape == torch.Size([2, 3, *image_size])
    assert torch.isclose(transformed_2[0], transformed_2[1]).all()
    assert torch.isclose(transformed_2[0, 1], transformed_2[0, 2]).all() and \
           torch.isclose(transformed_2[0, 0], transformed_2[0, 2]).all()
예제 #2
0
def test_custom_tf_on_various_input(
        use_different_transformation_per_channel: bool) -> None:
    """
    This tests that we can run transformation pipeline with our custom transforms on various types
    of input: PIL image, 3D tensor, 4D tensors. Tests that use_different_transformation_per_channel has the correct
    behavior. The transforms are test individually in test_image_transforms.py
    """
    pipeline = ImageTransformationPipeline([
        ElasticTransform(sigma=4, alpha=34, p_apply=1),
        AddGaussianNoise(p_apply=1, std=0.05),
        RandomGamma(scale=(0.3, 3))
    ], use_different_transformation_per_channel)

    # Test PIL image input
    transformed = pipeline(test_image_as_pil)
    assert transformed.shape == test_2d_image_as_CHW_tensor.shape

    # Test image as [C, H, W] tensor
    pipeline(test_2d_image_as_CHW_tensor)
    assert transformed.shape == test_2d_image_as_CHW_tensor.shape

    # Test image as [1, 1, H, W]
    transformed = pipeline(test_2d_image_as_ZCHW_tensor)
    assert isinstance(transformed, torch.Tensor)
    assert transformed.shape == torch.Size([1, 1, *image_size])

    # Test with a fake scan [C, Z, H, W] -> [25, 34, 32, 32]
    transformed = pipeline(test_4d_scan_as_tensor)
    assert isinstance(transformed, torch.Tensor)
    assert transformed.shape == test_4d_scan_as_tensor.shape

    # Same transformation should be applied to all slices and channels.
    assert torch.isclose(
        transformed[0, 0],
        transformed[1, 1]).all() != use_different_transformation_per_channel
예제 #3
0
def test_random_gamma_image(image_as_tensor: torch.Tensor) -> None:
    human_readable_transformed = to_pil_image(
        RandomGamma(scale=(2, 3))(image_as_tensor).squeeze(0))
    expected_pil_image = Image.open(
        full_ml_test_data_path() /
        "gamma_transformed_image_and_contour.png").convert("RGB")
    assert expected_pil_image == human_readable_transformed
예제 #4
0
def test_random_gamma() -> None:
    # This is invalid input (expects 4 dimensions)
    with pytest.raises(ValueError):
        RandomGamma(scale=(0.3, 3))(invalid_test_tensor)

    random.seed(0)
    transformed_1 = RandomGamma(scale=(0.3,
                                       3))(test_tensor_1channel_1slice.clone())
    assert transformed_1.shape == test_tensor_1channel_1slice.shape

    tensor_img = torch.ones([2, 3, *image_size])
    transformed_2 = RandomGamma(scale=(0.3, 3))(tensor_img)
    # If you run on 1 channel, 1 Z dimension the gamma transform applied should be the same for all slices.
    assert transformed_2.shape == torch.Size([2, 3, *image_size])
    assert torch.isclose(transformed_2[0], transformed_2[1]).all()
    assert torch.isclose(transformed_2[0, 1], transformed_2[0, 2]).all() and \
           torch.isclose(transformed_2[0, 0], transformed_2[0, 2]).all()

    human_readable_transformed = to_pil_image(
        RandomGamma(scale=(2, 3))(test_image_as_tensor).squeeze(0))
    expected_pil_image = Image.open(
        full_ml_test_data_path() /
        "gamma_transformed_image_and_contour.png").convert("RGB")
    assert expected_pil_image == human_readable_transformed
def create_transforms_from_config(
        config: CfgNode,
        apply_augmentations: bool,
        expand_channels: bool = True) -> ImageTransformationPipeline:
    """
    Defines the image transformations pipeline from a config file. It has been designed for Chest X-Ray
    images but it can be used for other types of images data, type of augmentations to use and strength are
    expected to be defined in the config. The channel expansion is needed for gray images.
    :param config: config yaml file fixing strength and type of augmentation to apply
    :param apply_augmentations: if True return transformation pipeline with augmentations. Else,
    disable augmentations i.e. only resize and center crop the image.
    :param expand_channels: if True the expand channel transformation from InnerEye.ML.augmentations.image_transforms
    will be added to the transformation passed through the config. This is needed for single channel images as CXR.
    """
    transforms: List[Any] = []
    if expand_channels:
        transforms.append(ExpandChannels())
    if apply_augmentations:
        if config.augmentation.use_random_affine:
            transforms.append(
                RandomAffine(
                    degrees=config.augmentation.random_affine.max_angle,
                    translate=(
                        config.augmentation.random_affine.max_horizontal_shift,
                        config.augmentation.random_affine.max_vertical_shift),
                    shear=config.augmentation.random_affine.max_shear))
        if config.augmentation.use_random_crop:
            transforms.append(
                RandomResizedCrop(scale=config.augmentation.random_crop.scale,
                                  size=config.preprocess.resize))
        else:
            transforms.append(Resize(size=config.preprocess.resize))
        if config.augmentation.use_random_horizontal_flip:
            transforms.append(
                RandomHorizontalFlip(
                    p=config.augmentation.random_horizontal_flip.prob))
        if config.augmentation.use_gamma_transform:
            transforms.append(
                RandomGamma(scale=config.augmentation.gamma.scale))
        if config.augmentation.use_random_color:
            transforms.append(
                ColorJitter(
                    brightness=config.augmentation.random_color.brightness,
                    contrast=config.augmentation.random_color.contrast,
                    saturation=config.augmentation.random_color.saturation))
        if config.augmentation.use_elastic_transform:
            transforms.append(
                ElasticTransform(
                    alpha=config.augmentation.elastic_transform.alpha,
                    sigma=config.augmentation.elastic_transform.sigma,
                    p_apply=config.augmentation.elastic_transform.p_apply))
        transforms.append(CenterCrop(config.preprocess.center_crop_size))
        if config.augmentation.use_random_erasing:
            transforms.append(
                RandomErasing(scale=config.augmentation.random_erasing.scale,
                              ratio=config.augmentation.random_erasing.ratio))
        if config.augmentation.add_gaussian_noise:
            transforms.append(
                AddGaussianNoise(
                    p_apply=config.augmentation.gaussian_noise.p_apply,
                    std=config.augmentation.gaussian_noise.std))
    else:
        transforms += [
            Resize(size=config.preprocess.resize),
            CenterCrop(config.preprocess.center_crop_size)
        ]
    pipeline = ImageTransformationPipeline(transforms)
    return pipeline
예제 #6
0
def test_create_transform_pipeline_from_config() -> None:
    """
    Tests that the pipeline returned by create_transform_pipeline_from_config returns the expected transformation.
    """
    transformation_pipeline = create_cxr_transforms_from_config(
        cxr_augmentation_config, apply_augmentations=True)
    fake_cxr_as_array = np.ones([256, 256]) * 255.
    fake_cxr_as_array[100:150, 100:200] = 1
    fake_cxr_image = PIL.Image.fromarray(fake_cxr_as_array).convert("L")

    all_transforms = [
        ExpandChannels(),
        RandomAffine(degrees=180, translate=(0, 0), shear=40),
        RandomResizedCrop(scale=(0.4, 1.0), size=256),
        RandomHorizontalFlip(p=0.5),
        RandomGamma(scale=(0.5, 1.5)),
        ColorJitter(saturation=0, brightness=0.2, contrast=0.2),
        ElasticTransform(sigma=4, alpha=34, p_apply=0.4),
        CenterCrop(size=224),
        RandomErasing(scale=(0.15, 0.4), ratio=(0.33, 3)),
        AddGaussianNoise(std=0.05, p_apply=0.5)
    ]

    np.random.seed(3)
    torch.manual_seed(3)
    random.seed(3)

    transformed_image = transformation_pipeline(fake_cxr_image)
    assert isinstance(transformed_image, torch.Tensor)
    # Expected pipeline
    image = np.ones([256, 256]) * 255.
    image[100:150, 100:200] = 1
    image = PIL.Image.fromarray(image).convert("L")
    # In the pipeline the image is converted to tensor before applying the transformations. Do the same here.
    image = ToTensor()(image).reshape([1, 1, 256, 256])

    np.random.seed(3)
    torch.manual_seed(3)
    random.seed(3)

    expected_transformed = image
    for t in all_transforms:
        expected_transformed = t(expected_transformed)
    # The pipeline takes as input [C, Z, H, W] and returns [C, Z, H, W]
    # But the transforms list expect [Z, C, H, W] and returns [Z, C, H, W] so need to permute dimension to compare
    expected_transformed = torch.transpose(expected_transformed, 1,
                                           0).squeeze(1)
    assert torch.isclose(expected_transformed, transformed_image).all()

    # Test the evaluation pipeline
    transformation_pipeline = create_cxr_transforms_from_config(
        cxr_augmentation_config, apply_augmentations=False)
    transformed_image = transformation_pipeline(image)
    assert isinstance(transformed_image, torch.Tensor)
    all_transforms = [ExpandChannels(), Resize(size=256), CenterCrop(size=224)]
    expected_transformed = image
    for t in all_transforms:
        expected_transformed = t(expected_transformed)
    expected_transformed = torch.transpose(expected_transformed, 1,
                                           0).squeeze(1)
    assert torch.isclose(expected_transformed, transformed_image).all()
예제 #7
0
def test_invalid_tensors(invalid_test_tensor: torch.Tensor) -> None:
    # This is invalid input (expects 4 dimensions)
    with pytest.raises(ValueError):
        ExpandChannels()(invalid_test_tensor)
    with pytest.raises(ValueError):
        RandomGamma(scale=(0.3, 3))(invalid_test_tensor)