Esempio n. 1
0
def test_random_apply(device):
    tensor, _ = _create_data(26, 34, device=device)
    tensor = tensor.to(dtype=torch.float32) / 255.0

    transforms = T.RandomApply([
        T.RandomHorizontalFlip(),
        T.ColorJitter(),
    ],
                               p=0.4)
    s_transforms = T.RandomApply(torch.nn.ModuleList([
        T.RandomHorizontalFlip(),
        T.ColorJitter(),
    ]),
                                 p=0.4)

    scripted_fn = torch.jit.script(s_transforms)
    torch.manual_seed(12)
    transformed_tensor = transforms(tensor)
    torch.manual_seed(12)
    transformed_tensor_script = scripted_fn(tensor)
    assert_equal(transformed_tensor,
                 transformed_tensor_script,
                 msg="{}".format(transforms))

    if device == "cpu":
        # Can't check this twice, otherwise
        # "Can't redefine method: forward on class: __torch__.torchvision.transforms.transforms.RandomApply"
        transforms = T.RandomApply([
            T.ColorJitter(),
        ], p=0.3)
        with pytest.raises(
                RuntimeError,
                match="Module 'RandomApply' has no attribute 'transforms'"):
            torch.jit.script(transforms)
Esempio n. 2
0
def check_functional_vs_PIL_vs_scripted(fn, fn_pil, fn_t, config, device, dtype, tol=2.0 + 1e-10, agg_method="max"):

    script_fn = torch.jit.script(fn)
    torch.manual_seed(15)
    tensor, pil_img = _create_data(26, 34, device=device)
    batch_tensors = _create_data_batch(16, 18, num_samples=4, device=device)

    if dtype is not None:
        tensor = F.convert_image_dtype(tensor, dtype)
        batch_tensors = F.convert_image_dtype(batch_tensors, dtype)

    out_fn_t = fn_t(tensor, **config)
    out_pil = fn_pil(pil_img, **config)
    out_scripted = script_fn(tensor, **config)
    assert out_fn_t.dtype == out_scripted.dtype
    assert out_fn_t.size()[1:] == out_pil.size[::-1]

    rbg_tensor = out_fn_t

    if out_fn_t.dtype != torch.uint8:
        rbg_tensor = F.convert_image_dtype(out_fn_t, torch.uint8)

    # Check that max difference does not exceed 2 in [0, 255] range
    # Exact matching is not possible due to incompatibility convert_image_dtype and PIL results
    _assert_approx_equal_tensor_to_pil(rbg_tensor.float(), out_pil, tol=tol, agg_method=agg_method)

    atol = 1e-6
    if out_fn_t.dtype == torch.uint8 and "cuda" in torch.device(device).type:
        atol = 1.0
    assert out_fn_t.allclose(out_scripted, atol=atol)

    # FIXME: fn will be scripted again in _test_fn_on_batch. We could avoid that.
    _test_fn_on_batch(batch_tensors, fn, scripted_fn_atol=atol, **config)
Esempio n. 3
0
    def test_rect_rotations(self, device, height, width, dt, angle, fn):
        # Tests on rectangular images
        tensor, pil_img = _create_data(height, width, device=device)

        if dt == torch.float16 and device == "cpu":
            # skip float16 on CPU case
            return

        if dt is not None:
            tensor = tensor.to(dtype=dt)

        out_pil_img = F.affine(
            pil_img, angle=angle, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST
        )
        out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1)))

        out_tensor = fn(
            tensor, angle=angle, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST
        ).cpu()

        if out_tensor.dtype != torch.uint8:
            out_tensor = out_tensor.to(torch.uint8)

        num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0
        ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2]
        # Tolerance : less than 3% of different pixels
        assert ratio_diff_pixels < 0.03, "{}: {}\n{} vs \n{}".format(
            angle, ratio_diff_pixels, out_tensor[0, :7, :7], out_pil_tensor[0, :7, :7]
        )
Esempio n. 4
0
def test_x_crop(fn, method, out_length, size, device):
    meth_kwargs = fn_kwargs = {"size": size}
    scripted_fn = torch.jit.script(fn)

    tensor, pil_img = _create_data(height=20, width=20, device=device)
    transformed_t_list = fn(tensor, **fn_kwargs)
    transformed_p_list = fn(pil_img, **fn_kwargs)
    assert len(transformed_t_list) == len(transformed_p_list)
    assert len(transformed_t_list) == out_length
    for transformed_tensor, transformed_pil_img in zip(transformed_t_list, transformed_p_list):
        _assert_equal_tensor_to_pil(transformed_tensor, transformed_pil_img)

    transformed_t_list_script = scripted_fn(tensor.detach().clone(), **fn_kwargs)
    assert len(transformed_t_list) == len(transformed_t_list_script)
    assert len(transformed_t_list_script) == out_length
    for transformed_tensor, transformed_tensor_script in zip(transformed_t_list, transformed_t_list_script):
        assert_equal(transformed_tensor, transformed_tensor_script)

    # test for class interface
    fn = method(**meth_kwargs)
    scripted_fn = torch.jit.script(fn)
    output = scripted_fn(tensor)
    assert len(output) == len(transformed_t_list_script)

    # test on batch of tensors
    batch_tensors = _create_data_batch(height=23, width=34, channels=3, num_samples=4, device=device)
    torch.manual_seed(12)
    transformed_batch_list = fn(batch_tensors)

    for i in range(len(batch_tensors)):
        img_tensor = batch_tensors[i, ...]
        torch.manual_seed(12)
        transformed_img_list = fn(img_tensor)
        for transformed_img, transformed_batch in zip(transformed_img_list, transformed_batch_list):
            assert_equal(transformed_img, transformed_batch[i, ...])
Esempio n. 5
0
 def test_rotate_interpolation_type(self):
     tensor, _ = _create_data(26, 26)
     # assert changed type warning
     with pytest.warns(UserWarning, match=r"Argument interpolation should be of type InterpolationMode"):
         res1 = F.rotate(tensor, 45, interpolation=2)
         res2 = F.rotate(tensor, 45, interpolation=BILINEAR)
         assert_equal(res1, res2)
Esempio n. 6
0
 def test_rotate_deprecation_resample(self):
     tensor, _ = _create_data(26, 26)
     # assert deprecation warning and non-BC
     with pytest.warns(UserWarning, match=r"Argument resample is deprecated and will be removed"):
         res1 = F.rotate(tensor, 45, resample=2)
         res2 = F.rotate(tensor, 45, interpolation=BILINEAR)
         assert_equal(res1, res2)
Esempio n. 7
0
def test_pad(device, dt, pad, config):
    script_fn = torch.jit.script(F.pad)
    tensor, pil_img = _create_data(7, 8, device=device)
    batch_tensors = _create_data_batch(16, 18, num_samples=4, device=device)

    if dt == torch.float16 and device == "cpu":
        # skip float16 on CPU case
        return

    if dt is not None:
        # This is a trivial cast to float of uint8 data to test all cases
        tensor = tensor.to(dt)
        batch_tensors = batch_tensors.to(dt)

    pad_tensor = F_t.pad(tensor, pad, **config)
    pad_pil_img = F_pil.pad(pil_img, pad, **config)

    pad_tensor_8b = pad_tensor
    # we need to cast to uint8 to compare with PIL image
    if pad_tensor_8b.dtype != torch.uint8:
        pad_tensor_8b = pad_tensor_8b.to(torch.uint8)

    _assert_equal_tensor_to_pil(pad_tensor_8b, pad_pil_img, msg="{}, {}".format(pad, config))

    if isinstance(pad, int):
        script_pad = [pad, ]
    else:
        script_pad = pad
    pad_tensor_script = script_fn(tensor, script_pad, **config)
    assert_equal(pad_tensor, pad_tensor_script, msg="{}, {}".format(pad, config))

    _test_fn_on_batch(batch_tensors, F.pad, padding=script_pad, **config)
    def test_convert_image_dtype(self):
        tensor, _ = _create_data(26, 34, device=self.device)
        batch_tensors = torch.rand(4, 3, 44, 56, device=self.device)

        for in_dtype in int_dtypes() + float_dtypes():
            in_tensor = tensor.to(in_dtype)
            in_batch_tensors = batch_tensors.to(in_dtype)
            for out_dtype in int_dtypes() + float_dtypes():

                fn = T.ConvertImageDtype(dtype=out_dtype)
                scripted_fn = torch.jit.script(fn)

                if (in_dtype == torch.float32 and out_dtype in (torch.int32, torch.int64)) or \
                        (in_dtype == torch.float64 and out_dtype == torch.int64):
                    with self.assertRaisesRegex(RuntimeError,
                                                r"cannot be performed safely"):
                        _test_transform_vs_scripted(fn, scripted_fn, in_tensor)
                    with self.assertRaisesRegex(RuntimeError,
                                                r"cannot be performed safely"):
                        _test_transform_vs_scripted_on_batch(
                            fn, scripted_fn, in_batch_tensors)
                    continue

                _test_transform_vs_scripted(fn, scripted_fn, in_tensor)
                _test_transform_vs_scripted_on_batch(fn, scripted_fn,
                                                     in_batch_tensors)

        with get_tmp_dir() as tmp_dir:
            scripted_fn.save(os.path.join(tmp_dir, "t_convert_dtype.pt"))
Esempio n. 9
0
def test_perspective_pil_vs_tensor(device, dims_and_points, dt, fill, fn):

    if dt == torch.float16 and device == "cpu":
        # skip float16 on CPU case
        return

    data_dims, (spoints, epoints) = dims_and_points

    tensor, pil_img = _create_data(*data_dims, device=device)
    if dt is not None:
        tensor = tensor.to(dtype=dt)

    interpolation = NEAREST
    fill_pil = int(fill[0]) if fill is not None and len(fill) == 1 else fill
    out_pil_img = F.perspective(pil_img, startpoints=spoints, endpoints=epoints, interpolation=interpolation,
                                fill=fill_pil)
    out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1)))
    out_tensor = fn(tensor, startpoints=spoints, endpoints=epoints, interpolation=interpolation, fill=fill).cpu()

    if out_tensor.dtype != torch.uint8:
        out_tensor = out_tensor.to(torch.uint8)

    num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0
    ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2]
    # Tolerance : less than 5% of different pixels
    assert ratio_diff_pixels < 0.05
Esempio n. 10
0
    def test_compose(self):
        tensor, _ = _create_data(26, 34, device=self.device)
        tensor = tensor.to(dtype=torch.float32) / 255.0

        transforms = T.Compose([
            T.CenterCrop(10),
            T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
        ])
        s_transforms = torch.nn.Sequential(*transforms.transforms)

        scripted_fn = torch.jit.script(s_transforms)
        torch.manual_seed(12)
        transformed_tensor = transforms(tensor)
        torch.manual_seed(12)
        transformed_tensor_script = scripted_fn(tensor)
        assert_equal(transformed_tensor,
                     transformed_tensor_script,
                     msg="{}".format(transforms))

        t = T.Compose([
            lambda x: x,
        ])
        with self.assertRaisesRegex(
                RuntimeError, r"Could not get name of python class object"):
            torch.jit.script(t)
Esempio n. 11
0
def test_compose(device):
    tensor, _ = _create_data(26, 34, device=device)
    tensor = tensor.to(dtype=torch.float32) / 255.0
    transforms = T.Compose(
        [
            T.CenterCrop(10),
            T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
        ]
    )
    s_transforms = torch.nn.Sequential(*transforms.transforms)

    scripted_fn = torch.jit.script(s_transforms)
    torch.manual_seed(12)
    transformed_tensor = transforms(tensor)
    torch.manual_seed(12)
    transformed_tensor_script = scripted_fn(tensor)
    assert_equal(transformed_tensor, transformed_tensor_script, msg=f"{transforms}")

    t = T.Compose(
        [
            lambda x: x,
        ]
    )
    with pytest.raises(RuntimeError, match="cannot call a value of type 'Tensor'"):
        torch.jit.script(t)
Esempio n. 12
0
    def test_rotate(self, device, height, width, center, dt, angle, expand, fill, fn):
        tensor, pil_img = _create_data(height, width, device=device)

        if dt == torch.float16 and torch.device(device).type == "cpu":
            # skip float16 on CPU case
            return

        if dt is not None:
            tensor = tensor.to(dtype=dt)

        f_pil = int(fill[0]) if fill is not None and len(fill) == 1 else fill
        out_pil_img = F.rotate(pil_img, angle=angle, interpolation=NEAREST, expand=expand, center=center, fill=f_pil)
        out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1)))

        out_tensor = fn(tensor, angle=angle, interpolation=NEAREST, expand=expand, center=center, fill=fill).cpu()

        if out_tensor.dtype != torch.uint8:
            out_tensor = out_tensor.to(torch.uint8)

        assert out_tensor.shape == out_pil_tensor.shape, (
            f"{(height, width, NEAREST, dt, angle, expand, center)}: "
            f"{out_tensor.shape} vs {out_pil_tensor.shape}")

        num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0
        ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2]
        # Tolerance : less than 3% of different pixels
        assert ratio_diff_pixels < 0.03, (
            f"{(height, width, NEAREST, dt, angle, expand, center, fill)}: "
            f"{ratio_diff_pixels}\n{out_tensor[0, :7, :7]} vs \n"
            f"{out_pil_tensor[0, :7, :7]}")
Esempio n. 13
0
def _test_class_op(transform_cls, device, channels=3, meth_kwargs=None, test_exact_match=True, **match_kwargs):
    meth_kwargs = meth_kwargs or {}

    # test for class interface
    f = transform_cls(**meth_kwargs)
    scripted_fn = torch.jit.script(f)

    tensor, pil_img = _create_data(26, 34, channels, device=device)
    # set seed to reproduce the same transformation for tensor and PIL image
    torch.manual_seed(12)
    transformed_tensor = f(tensor)
    torch.manual_seed(12)
    transformed_pil_img = f(pil_img)
    if test_exact_match:
        _assert_equal_tensor_to_pil(transformed_tensor, transformed_pil_img, **match_kwargs)
    else:
        _assert_approx_equal_tensor_to_pil(transformed_tensor.float(), transformed_pil_img, **match_kwargs)

    torch.manual_seed(12)
    transformed_tensor_script = scripted_fn(tensor)
    assert_equal(transformed_tensor, transformed_tensor_script)

    batch_tensors = _create_data_batch(height=23, width=34, channels=channels, num_samples=4, device=device)
    _test_transform_vs_scripted_on_batch(f, scripted_fn, batch_tensors)

    with get_tmp_dir() as tmp_dir:
        scripted_fn.save(os.path.join(tmp_dir, f"t_{transform_cls.__name__}.pt"))
Esempio n. 14
0
    def test_all_ops(self, device, height, width, dt, a, t, s, sh, f, fn):
        # 4) Test rotation + translation + scale + shear
        tensor, pil_img = _create_data(height, width, device=device)

        if dt == torch.float16 and device == "cpu":
            # skip float16 on CPU case
            return

        if dt is not None:
            tensor = tensor.to(dtype=dt)

        f_pil = int(f[0]) if f is not None and len(f) == 1 else f
        out_pil_img = F.affine(pil_img, angle=a, translate=t, scale=s, shear=sh, interpolation=NEAREST, fill=f_pil)
        out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1)))

        out_tensor = fn(tensor, angle=a, translate=t, scale=s, shear=sh, interpolation=NEAREST, fill=f).cpu()

        if out_tensor.dtype != torch.uint8:
            out_tensor = out_tensor.to(torch.uint8)

        num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0
        ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2]
        # Tolerance : less than 5% (cpu), 6% (cuda) of different pixels
        tol = 0.06 if device == "cuda" else 0.05
        assert ratio_diff_pixels < tol, "{}: {}\n{} vs \n{}".format(
            (NEAREST, a, t, s, sh, f), ratio_diff_pixels, out_tensor[0, :7, :7], out_pil_tensor[0, :7, :7]
        )
Esempio n. 15
0
def test_ten_crop(device):
    script_ten_crop = torch.jit.script(F.ten_crop)

    img_tensor, pil_img = _create_data(32, 34, device=device)

    cropped_pil_images = F.ten_crop(pil_img, [10, 11])

    cropped_tensors = F.ten_crop(img_tensor, [10, 11])
    for i in range(10):
        _assert_equal_tensor_to_pil(cropped_tensors[i], cropped_pil_images[i])

    cropped_tensors = script_ten_crop(img_tensor, [10, 11])
    for i in range(10):
        _assert_equal_tensor_to_pil(cropped_tensors[i], cropped_pil_images[i])

    batch_tensors = _create_data_batch(16, 18, num_samples=4, device=device)
    tuple_transformed_batches = F.ten_crop(batch_tensors, [10, 11])
    for i in range(len(batch_tensors)):
        img_tensor = batch_tensors[i, ...]
        tuple_transformed_imgs = F.ten_crop(img_tensor, [10, 11])
        assert len(tuple_transformed_imgs) == len(tuple_transformed_batches)

        for j in range(len(tuple_transformed_imgs)):
            true_transformed_img = tuple_transformed_imgs[j]
            transformed_img = tuple_transformed_batches[j][i, ...]
            assert_equal(true_transformed_img, transformed_img)

    # scriptable function test
    s_tuple_transformed_batches = script_ten_crop(batch_tensors, [10, 11])
    for transformed_batch, s_transformed_batch in zip(tuple_transformed_batches, s_tuple_transformed_batches):
        assert_equal(transformed_batch, s_transformed_batch)
Esempio n. 16
0
def test_random_erasing(device, config):
    tensor, _ = _create_data(24, 32, channels=3, device=device)
    batch_tensors = torch.rand(4, 3, 44, 56, device=device)

    fn = T.RandomErasing(**config)
    scripted_fn = torch.jit.script(fn)
    _test_transform_vs_scripted(fn, scripted_fn, tensor)
    _test_transform_vs_scripted_on_batch(fn, scripted_fn, batch_tensors)
Esempio n. 17
0
def test_assert_resize_antialias(interpolation):

    # Checks implementation on very large scales
    # and catch TORCH_CHECK inside interpolate_aa_kernels.cu
    torch.manual_seed(12)
    tensor, pil_img = _create_data(1000, 1000, device="cuda")

    with pytest.raises(RuntimeError, match=r"Max supported scale factor is"):
        F.resize(tensor, size=(5, 5), interpolation=interpolation, antialias=True)
Esempio n. 18
0
def _test_functional_op(f, device, channels=3, fn_kwargs=None, test_exact_match=True, **match_kwargs):
    fn_kwargs = fn_kwargs or {}

    tensor, pil_img = _create_data(height=10, width=10, channels=channels, device=device)
    transformed_tensor = f(tensor, **fn_kwargs)
    transformed_pil_img = f(pil_img, **fn_kwargs)
    if test_exact_match:
        _assert_equal_tensor_to_pil(transformed_tensor, transformed_pil_img, **match_kwargs)
    else:
        _assert_approx_equal_tensor_to_pil(transformed_tensor, transformed_pil_img, **match_kwargs)
Esempio n. 19
0
def test_resized_crop(device, mode):
    # test values of F.resized_crop in several cases:
    # 1) resize to the same size, crop to the same size => should be identity
    tensor, _ = _create_data(26, 36, device=device)

    out_tensor = F.resized_crop(tensor, top=0, left=0, height=26, width=36, size=[26, 36], interpolation=mode)
    assert_equal(tensor, out_tensor, msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5]))

    # 2) resize by half and crop a TL corner
    tensor, _ = _create_data(26, 36, device=device)
    out_tensor = F.resized_crop(tensor, top=0, left=0, height=20, width=30, size=[10, 15], interpolation=NEAREST)
    expected_out_tensor = tensor[:, :20:2, :30:2]
    assert_equal(
        expected_out_tensor,
        out_tensor,
        msg="{} vs {}".format(expected_out_tensor[0, :10, :10], out_tensor[0, :10, :10]),
    )

    batch_tensors = _create_data_batch(26, 36, num_samples=4, device=device)
    _test_fn_on_batch(
        batch_tensors, F.resized_crop, top=1, left=2, height=20, width=30, size=[10, 15], interpolation=NEAREST
    )
Esempio n. 20
0
    def test_resize_scripted(self, dt, size, max_size, interpolation, device):
        tensor, _ = _create_data(height=34, width=36, device=device)
        batch_tensors = torch.randint(0, 256, size=(4, 3, 44, 56), dtype=torch.uint8, device=device)

        if dt is not None:
            # This is a trivial cast to float of uint8 data to test all cases
            tensor = tensor.to(dt)
        if max_size is not None and len(size) != 1:
            pytest.skip("Size should be an int or a sequence of length 1 if max_size is specified")

        transform = T.Resize(size=size, interpolation=interpolation, max_size=max_size)
        s_transform = torch.jit.script(transform)
        _test_transform_vs_scripted(transform, s_transform, tensor)
        _test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors)
Esempio n. 21
0
def test_hflip(device):
    script_hflip = torch.jit.script(F.hflip)

    img_tensor, pil_img = _create_data(16, 18, device=device)
    hflipped_img = F.hflip(img_tensor)
    hflipped_pil_img = F.hflip(pil_img)
    _assert_equal_tensor_to_pil(hflipped_img, hflipped_pil_img)

    # scriptable function test
    hflipped_img_script = script_hflip(img_tensor)
    assert_equal(hflipped_img, hflipped_img_script)

    batch_tensors = _create_data_batch(16, 18, num_samples=4, device=device)
    _test_fn_on_batch(batch_tensors, F.hflip)
Esempio n. 22
0
    def test_resize(self):

        # TODO: Minimal check for bug-fix, improve this later
        x = torch.rand(3, 32, 46)
        t = T.Resize(size=38)
        y = t(x)
        # If size is an int, smaller edge of the image will be matched to this number.
        # i.e, if height > width, then image will be rescaled to (size * height / width, size).
        self.assertTrue(isinstance(y, torch.Tensor))
        self.assertEqual(y.shape[1], 38)
        self.assertEqual(y.shape[2], int(38 * 46 / 32))

        tensor, _ = _create_data(height=34, width=36, device=self.device)
        batch_tensors = torch.randint(0,
                                      256,
                                      size=(4, 3, 44, 56),
                                      dtype=torch.uint8,
                                      device=self.device)

        for dt in [None, torch.float32, torch.float64]:
            if dt is not None:
                # This is a trivial cast to float of uint8 data to test all cases
                tensor = tensor.to(dt)
            for size in [32, 34, [
                    32,
            ], [32, 32], (32, 32), [34, 35]]:
                for max_size in (None, 35, 1000):
                    if max_size is not None and isinstance(
                            size, Sequence) and len(size) != 1:
                        continue  # Not supported
                    for interpolation in [BILINEAR, BICUBIC, NEAREST]:

                        if isinstance(size, int):
                            script_size = [
                                size,
                            ]
                        else:
                            script_size = size

                        transform = T.Resize(size=script_size,
                                             interpolation=interpolation,
                                             max_size=max_size)
                        s_transform = torch.jit.script(transform)
                        _test_transform_vs_scripted(transform, s_transform,
                                                    tensor)
                        _test_transform_vs_scripted_on_batch(
                            transform, s_transform, batch_tensors)

        with get_tmp_dir() as tmp_dir:
            s_transform.save(os.path.join(tmp_dir, "t_resize.pt"))
Esempio n. 23
0
def test_resize(device, dt, size, max_size, interpolation):

    if dt == torch.float16 and device == "cpu":
        # skip float16 on CPU case
        return

    if max_size is not None and isinstance(size, Sequence) and len(size) != 1:
        return  # unsupported

    torch.manual_seed(12)
    script_fn = torch.jit.script(F.resize)
    tensor, pil_img = _create_data(26, 36, device=device)
    batch_tensors = _create_data_batch(16, 18, num_samples=4, device=device)

    if dt is not None:
        # This is a trivial cast to float of uint8 data to test all cases
        tensor = tensor.to(dt)
        batch_tensors = batch_tensors.to(dt)

    resized_tensor = F.resize(tensor, size=size, interpolation=interpolation, max_size=max_size)
    resized_pil_img = F.resize(pil_img, size=size, interpolation=interpolation, max_size=max_size)

    assert resized_tensor.size()[1:] == resized_pil_img.size[::-1]

    if interpolation not in [NEAREST, ]:
        # We can not check values if mode = NEAREST, as results are different
        # E.g. resized_tensor  = [[a, a, b, c, d, d, e, ...]]
        # E.g. resized_pil_img = [[a, b, c, c, d, e, f, ...]]
        resized_tensor_f = resized_tensor
        # we need to cast to uint8 to compare with PIL image
        if resized_tensor_f.dtype == torch.uint8:
            resized_tensor_f = resized_tensor_f.to(torch.float)

        # Pay attention to high tolerance for MAE
        _assert_approx_equal_tensor_to_pil(resized_tensor_f, resized_pil_img, tol=8.0)

    if isinstance(size, int):
        script_size = [size, ]
    else:
        script_size = size

    resize_result = script_fn(
        tensor, size=script_size, interpolation=interpolation, max_size=max_size
    )
    assert_equal(resized_tensor, resize_result)

    _test_fn_on_batch(
        batch_tensors, F.resize, size=script_size, interpolation=interpolation, max_size=max_size
    )
Esempio n. 24
0
def test_autoaugment__op_apply_shear(interpolation, mode):
    # We check that torchvision's implementation of shear is equivalent
    # to official CIFAR10 autoaugment implementation:
    # https://github.com/tensorflow/models/blob/885fda091c46c59d6c7bb5c7e760935eacc229da/research/autoaugment/augmentation_transforms.py#L273-L290
    image_size = 32

    def shear(pil_img, level, mode, resample):
        if mode == "X":
            matrix = (1, level, 0, 0, 1, 0)
        elif mode == "Y":
            matrix = (1, 0, 0, level, 1, 0)
        return pil_img.transform((image_size, image_size),
                                 Image.AFFINE,
                                 matrix,
                                 resample=resample)

    t_img, pil_img = _create_data(image_size, image_size)

    resample_pil = {
        F.InterpolationMode.NEAREST: Image.NEAREST,
        F.InterpolationMode.BILINEAR: Image.BILINEAR,
    }[interpolation]

    level = 0.3
    expected_out = shear(pil_img, level, mode=mode, resample=resample_pil)

    # Check pil output vs expected pil
    out = _apply_op(pil_img,
                    op_name=f"Shear{mode}",
                    magnitude=level,
                    interpolation=interpolation,
                    fill=0)
    assert out == expected_out

    if interpolation == F.InterpolationMode.BILINEAR:
        # We skip bilinear mode for tensors as
        # affine transformation results are not exactly the same
        # between tensors and pil images
        # MAE as around 1.40
        # Max Abs error can be 163 or 170
        return

    # Check tensor output vs expected pil
    out = _apply_op(t_img,
                    op_name=f"Shear{mode}",
                    magnitude=level,
                    interpolation=interpolation,
                    fill=0)
    _assert_approx_equal_tensor_to_pil(out, expected_out)
Esempio n. 25
0
def test_crop(device, top, left, height, width):
    script_crop = torch.jit.script(F.crop)

    img_tensor, pil_img = _create_data(16, 18, device=device)

    pil_img_cropped = F.crop(pil_img, top, left, height, width)

    img_tensor_cropped = F.crop(img_tensor, top, left, height, width)
    _assert_equal_tensor_to_pil(img_tensor_cropped, pil_img_cropped)

    img_tensor_cropped = script_crop(img_tensor, top, left, height, width)
    _assert_equal_tensor_to_pil(img_tensor_cropped, pil_img_cropped)

    batch_tensors = _create_data_batch(16, 18, num_samples=4, device=device)
    _test_fn_on_batch(batch_tensors, F.crop, top=top, left=left, height=height, width=width)
Esempio n. 26
0
def test_rgb_to_grayscale(device, num_output_channels):
    script_rgb_to_grayscale = torch.jit.script(F.rgb_to_grayscale)

    img_tensor, pil_img = _create_data(32, 34, device=device)

    gray_pil_image = F.rgb_to_grayscale(pil_img, num_output_channels=num_output_channels)
    gray_tensor = F.rgb_to_grayscale(img_tensor, num_output_channels=num_output_channels)

    _assert_approx_equal_tensor_to_pil(gray_tensor.float(), gray_pil_image, tol=1.0 + 1e-10, agg_method="max")

    s_gray_tensor = script_rgb_to_grayscale(img_tensor, num_output_channels=num_output_channels)
    assert_equal(s_gray_tensor, gray_tensor)

    batch_tensors = _create_data_batch(16, 18, num_samples=4, device=device)
    _test_fn_on_batch(batch_tensors, F.rgb_to_grayscale, num_output_channels=num_output_channels)
Esempio n. 27
0
def test_center_crop(device):
    script_center_crop = torch.jit.script(F.center_crop)

    img_tensor, pil_img = _create_data(32, 34, device=device)

    cropped_pil_image = F.center_crop(pil_img, [10, 11])

    cropped_tensor = F.center_crop(img_tensor, [10, 11])
    _assert_equal_tensor_to_pil(cropped_tensor, cropped_pil_image)

    cropped_tensor = script_center_crop(img_tensor, [10, 11])
    _assert_equal_tensor_to_pil(cropped_tensor, cropped_pil_image)

    batch_tensors = _create_data_batch(16, 18, num_samples=4, device=device)
    _test_fn_on_batch(batch_tensors, F.center_crop, output_size=[10, 11])
Esempio n. 28
0
def test_resize_antialias(device, dt, size, interpolation):

    if dt == torch.float16 and device == "cpu":
        # skip float16 on CPU case
        return

    torch.manual_seed(12)
    script_fn = torch.jit.script(F.resize)
    tensor, pil_img = _create_data(320, 290, device=device)

    if dt is not None:
        # This is a trivial cast to float of uint8 data to test all cases
        tensor = tensor.to(dt)

    resized_tensor = F.resize(tensor, size=size, interpolation=interpolation, antialias=True)
    resized_pil_img = F.resize(pil_img, size=size, interpolation=interpolation)

    assert resized_tensor.size()[1:] == resized_pil_img.size[::-1]

    resized_tensor_f = resized_tensor
    # we need to cast to uint8 to compare with PIL image
    if resized_tensor_f.dtype == torch.uint8:
        resized_tensor_f = resized_tensor_f.to(torch.float)

    _assert_approx_equal_tensor_to_pil(
        resized_tensor_f, resized_pil_img, tol=0.5, msg=f"{size}, {interpolation}, {dt}"
    )

    accepted_tol = 1.0 + 1e-5
    if interpolation == BICUBIC:
        # this overall mean value to make the tests pass
        # High value is mostly required for test cases with
        # downsampling and upsampling where we can not exactly
        # match PIL implementation.
        accepted_tol = 15.0

    _assert_approx_equal_tensor_to_pil(
        resized_tensor_f, resized_pil_img, tol=accepted_tol, agg_method="max",
        msg=f"{size}, {interpolation}, {dt}"
    )

    if isinstance(size, int):
        script_size = [size, ]
    else:
        script_size = size

    resize_result = script_fn(tensor, size=script_size, interpolation=interpolation, antialias=True)
    assert_equal(resized_tensor, resize_result)
Esempio n. 29
0
def test_normalize(device, tmpdir):
    fn = T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    tensor, _ = _create_data(26, 34, device=device)

    with pytest.raises(TypeError, match="Input tensor should be a float tensor"):
        fn(tensor)

    batch_tensors = torch.rand(4, 3, 44, 56, device=device)
    tensor = tensor.to(dtype=torch.float32) / 255.0
    # test for class interface
    scripted_fn = torch.jit.script(fn)

    _test_transform_vs_scripted(fn, scripted_fn, tensor)
    _test_transform_vs_scripted_on_batch(fn, scripted_fn, batch_tensors)

    scripted_fn.save(os.path.join(tmpdir, "t_norm.pt"))
Esempio n. 30
0
def test_resize_asserts(device):

    tensor, pil_img = _create_data(26, 36, device=device)

    # assert changed type warning
    with pytest.warns(UserWarning, match=r"Argument interpolation should be of type InterpolationMode"):
        res1 = F.resize(tensor, size=32, interpolation=2)

    res2 = F.resize(tensor, size=32, interpolation=BILINEAR)
    assert_equal(res1, res2)

    for img in (tensor, pil_img):
        exp_msg = "max_size should only be passed if size specifies the length of the smaller edge"
        with pytest.raises(ValueError, match=exp_msg):
            F.resize(img, size=(32, 34), max_size=35)
        with pytest.raises(ValueError, match="max_size = 32 must be strictly greater"):
            F.resize(img, size=32, max_size=32)