예제 #1
0
    def test_perspective(self):

        from torchvision.transforms import RandomPerspective

        data = [
            self._create_data(26, 34, device=self.device),
            self._create_data(26, 26, device=self.device)
        ]
        for tensor, pil_img in data:

            scripted_tranform = torch.jit.script(F.perspective)

            test_configs = [
                [[[0, 0], [33, 0], [33, 25], [0, 25]],
                 [[3, 2], [32, 3], [30, 24], [2, 25]]],
                [[[3, 2], [32, 3], [30, 24], [2, 25]],
                 [[0, 0], [33, 0], [33, 25], [0, 25]]],
                [[[3, 2], [32, 3], [30, 24], [2, 25]],
                 [[5, 5], [30, 3], [33, 19], [4, 25]]],
            ]
            n = 10
            test_configs += [
                RandomPerspective.get_params(pil_img.size[0], pil_img.size[1],
                                             i / n) for i in range(n)
            ]

            for r in [
                    0,
            ]:
                for spoints, epoints in test_configs:
                    out_pil_img = F.perspective(pil_img,
                                                startpoints=spoints,
                                                endpoints=epoints,
                                                interpolation=r)
                    out_pil_tensor = torch.from_numpy(
                        np.array(out_pil_img).transpose((2, 0, 1)))

                    for fn in [F.perspective, scripted_tranform]:
                        out_tensor = fn(tensor,
                                        startpoints=spoints,
                                        endpoints=epoints,
                                        interpolation=r).cpu()

                        num_diff_pixels = (out_tensor !=
                                           out_pil_tensor).sum().item() / 3.0
                        ratio_diff_pixels = num_diff_pixels / out_tensor.shape[
                            -1] / out_tensor.shape[-2]
                        # Tolerance : less than 5% of different pixels
                        self.assertLess(ratio_diff_pixels,
                                        0.05,
                                        msg="{}: {}\n{} vs \n{}".format(
                                            (r, spoints, epoints),
                                            ratio_diff_pixels,
                                            out_tensor[0, :7, :7],
                                            out_pil_tensor[0, :7, :7]))
예제 #2
0
    def test_perspective(self):

        from torchvision.transforms import RandomPerspective

        data = [
            self._create_data(26, 34, device=self.device),
            self._create_data(26, 26, device=self.device)
        ]
        scripted_transform = torch.jit.script(F.perspective)

        for tensor, pil_img in data:

            test_configs = [
                [[[0, 0], [33, 0], [33, 25], [0, 25]],
                 [[3, 2], [32, 3], [30, 24], [2, 25]]],
                [[[3, 2], [32, 3], [30, 24], [2, 25]],
                 [[0, 0], [33, 0], [33, 25], [0, 25]]],
                [[[3, 2], [32, 3], [30, 24], [2, 25]],
                 [[5, 5], [30, 3], [33, 19], [4, 25]]],
            ]
            n = 10
            test_configs += [
                RandomPerspective.get_params(pil_img.size[0], pil_img.size[1],
                                             i / n) for i in range(n)
            ]

            for dt in [None, torch.float32, torch.float64, torch.float16]:

                if dt == torch.float16 and torch.device(
                        self.device).type == "cpu":
                    # skip float16 on CPU case
                    continue

                if dt is not None:
                    tensor = tensor.to(dtype=dt)

                self._test_perspective(tensor, pil_img, scripted_transform,
                                       test_configs)

                batch_tensors = self._create_data_batch(26,
                                                        36,
                                                        num_samples=4,
                                                        device=self.device)
                if dt is not None:
                    batch_tensors = batch_tensors.to(dtype=dt)

                for spoints, epoints in test_configs:
                    self._test_fn_on_batch(batch_tensors,
                                           F.perspective,
                                           startpoints=spoints,
                                           endpoints=epoints,
                                           interpolation=0)
예제 #3
0
    def test_perspective(self):

        from torchvision.transforms import RandomPerspective

        data = [self._create_data(26, 34, device=self.device), self._create_data(26, 26, device=self.device)]
        scripted_transform = torch.jit.script(F.perspective)

        for tensor, pil_img in data:

            test_configs = [
                [[[0, 0], [33, 0], [33, 25], [0, 25]], [[3, 2], [32, 3], [30, 24], [2, 25]]],
                [[[3, 2], [32, 3], [30, 24], [2, 25]], [[0, 0], [33, 0], [33, 25], [0, 25]]],
                [[[3, 2], [32, 3], [30, 24], [2, 25]], [[5, 5], [30, 3], [33, 19], [4, 25]]],
            ]
            n = 10
            test_configs += [
                RandomPerspective.get_params(pil_img.size[0], pil_img.size[1], i / n) for i in range(n)
            ]

            for dt in [None, torch.float32, torch.float64, torch.float16]:

                if dt == torch.float16 and torch.device(self.device).type == "cpu":
                    # skip float16 on CPU case
                    continue

                if dt is not None:
                    tensor = tensor.to(dtype=dt)

                self._test_perspective(tensor, pil_img, scripted_transform, test_configs)

                batch_tensors = self._create_data_batch(26, 36, num_samples=4, device=self.device)
                if dt is not None:
                    batch_tensors = batch_tensors.to(dtype=dt)

                for spoints, epoints in test_configs:
                    self._test_fn_on_batch(
                        batch_tensors, F.perspective, startpoints=spoints, endpoints=epoints, interpolation=NEAREST
                    )

        # assert changed type warning
        spoints = [[0, 0], [33, 0], [33, 25], [0, 25]]
        epoints = [[3, 2], [32, 3], [30, 24], [2, 25]]
        with self.assertWarnsRegex(UserWarning, r"Argument interpolation should be of type InterpolationModes"):
            res1 = F.perspective(tensor, startpoints=spoints, endpoints=epoints, interpolation=2)
            res2 = F.perspective(tensor, startpoints=spoints, endpoints=epoints, interpolation=BILINEAR)
            self.assertTrue(res1.equal(res2))