Beispiel #1
0
def resize_image_pil(
    img: PIL.Image.Image,
    size: Union[Sequence[int], int],
    interpolation: InterpolationMode = InterpolationMode.BILINEAR,
    max_size: Optional[int] = None,
) -> PIL.Image.Image:
    return _FP.resize(img, size, interpolation=pil_modes_mapping[interpolation], max_size=max_size)
Beispiel #2
0
    def test_resize(self):
        script_fn = torch.jit.script(F_t.resize)
        tensor, pil_img = self._create_data(26, 36, device=self.device)

        for dt in [None, torch.float32, torch.float64, torch.float16]:

            if dt == torch.float16 and torch.device(self.device).type == "cpu":
                # skip float16 on CPU case
                continue

            if dt is not None:
                # This is a trivial cast to float of uint8 data to test all cases
                tensor = tensor.to(dt)
            for size in [32, 26, [
                    32,
            ], [32, 32], (32, 32), [26, 35]]:
                for interpolation in [BILINEAR, BICUBIC, NEAREST]:
                    resized_tensor = F_t.resize(tensor,
                                                size=size,
                                                interpolation=interpolation)
                    resized_pil_img = F_pil.resize(pil_img,
                                                   size=size,
                                                   interpolation=interpolation)

                    self.assertEqual(resized_tensor.size()[1:],
                                     resized_pil_img.size[::-1],
                                     msg="{}, {}".format(size, interpolation))

                    if interpolation != NEAREST:
                        # We can not check values if mode = NEAREST, as results are different
                        # E.g. resized_tensor  = [[a, a, b, c, d, d, e, ...]]
                        # E.g. resized_pil_img = [[a, b, c, c, d, e, f, ...]]
                        resized_tensor_f = resized_tensor
                        # we need to cast to uint8 to compare with PIL image
                        if resized_tensor_f.dtype == torch.uint8:
                            resized_tensor_f = resized_tensor_f.to(torch.float)

                        # Pay attention to high tolerance for MAE
                        self.approxEqualTensorToPIL(resized_tensor_f,
                                                    resized_pil_img,
                                                    tol=8.0,
                                                    msg="{}, {}".format(
                                                        size, interpolation))

                    if isinstance(size, int):
                        script_size = [
                            size,
                        ]
                    else:
                        script_size = size
                    resize_result = script_fn(tensor,
                                              size=script_size,
                                              interpolation=interpolation)
                    self.assertTrue(resized_tensor.equal(resize_result),
                                    msg="{}, {}".format(size, interpolation))
Beispiel #3
0
def resize_image_pil(
    img: PIL.Image.Image,
    size: Union[Sequence[int], int],
    interpolation: InterpolationMode = InterpolationMode.BILINEAR,
    max_size: Optional[int] = None,
) -> PIL.Image.Image:
    if isinstance(size, int):
        size = [size, size]
    # Explicitly cast size to list otherwise mypy issue: incompatible type "Sequence[int]"; expected "List[int]"
    size: List[int] = list(size)
    size = _compute_output_size(img.size[::-1], size=size, max_size=max_size)
    return _FP.resize(img,
                      size,
                      interpolation=pil_modes_mapping[interpolation])
Beispiel #4
0
 def forward(self, img: Image):
     img = tF_pil.resize(img=img,
                         size=self.scaled_size,
                         interpolation=PIL.Image.BICUBIC)
     return tF.center_crop(img, self.size)
Beispiel #5
0
 def forward(self, img: Image):
     scaled_size = tuple(x + 32 for x in img.size)
     img = tF_pil.resize(img=img,
                         size=scaled_size,
                         interpolation=PIL.Image.BICUBIC)
     return tF.center_crop(img, self.size)