def test_resize(self): height = random.randint(24, 32) * 2 width = random.randint(24, 32) * 2 img = torch.ones(3, height, width) img_clone = img.clone() modes = ["bilinear", "nearest", "bicubic"] for mode in modes: # (Int) for resizing output_size = random.randint(5, 12) * 2 result = F_t.resize(img, output_size, interpolation=mode) if height < width: self.assertEqual(output_size, result.shape[1]) else: self.assertEqual(output_size, result.shape[2]) # (Int, Int) for resizing output_size = (random.randint(5, 12) * 2, random.randint(5, 12) * 2) result = F_t.resize(img, output_size, interpolation=mode) self.assertEqual((output_size[0], output_size[1]), (result.shape[1], result.shape[2])) # checking input tensor is not mutated self.assertTrue(torch.equal(img, img_clone)) # checking overshooting for bicubic output_size = (random.randint(5, 12) * 2, random.randint(5, 12) * 2) result = F_t.resize(img, output_size, interpolation="bicubic") clamped_tensor = result.clamp(min=0, max=255) self.assertTrue(torch.equal(result, clamped_tensor))
def test_resize(self): script_fn = torch.jit.script(F_t.resize) tensor, pil_img = self._create_data(26, 36, device=self.device) for dt in [None, torch.float32, torch.float64, torch.float16]: if dt == torch.float16 and torch.device(self.device).type == "cpu": # skip float16 on CPU case continue if dt is not None: # This is a trivial cast to float of uint8 data to test all cases tensor = tensor.to(dt) for size in [32, 26, [ 32, ], [32, 32], (32, 32), [26, 35]]: for interpolation in [BILINEAR, BICUBIC, NEAREST]: resized_tensor = F_t.resize(tensor, size=size, interpolation=interpolation) resized_pil_img = F_pil.resize(pil_img, size=size, interpolation=interpolation) self.assertEqual(resized_tensor.size()[1:], resized_pil_img.size[::-1], msg="{}, {}".format(size, interpolation)) if interpolation != NEAREST: # We can not check values if mode = NEAREST, as results are different # E.g. resized_tensor = [[a, a, b, c, d, d, e, ...]] # E.g. resized_pil_img = [[a, b, c, c, d, e, f, ...]] resized_tensor_f = resized_tensor # we need to cast to uint8 to compare with PIL image if resized_tensor_f.dtype == torch.uint8: resized_tensor_f = resized_tensor_f.to(torch.float) # Pay attention to high tolerance for MAE self.approxEqualTensorToPIL(resized_tensor_f, resized_pil_img, tol=8.0, msg="{}, {}".format( size, interpolation)) if isinstance(size, int): script_size = [ size, ] else: script_size = size resize_result = script_fn(tensor, size=script_size, interpolation=interpolation) self.assertTrue(resized_tensor.equal(resize_result), msg="{}, {}".format(size, interpolation))
def resize_image_tensor( image: torch.Tensor, size: List[int], interpolation: InterpolationMode = InterpolationMode.BILINEAR, max_size: Optional[int] = None, antialias: Optional[bool] = None, ) -> torch.Tensor: new_height, new_width = size num_channels, old_height, old_width = get_dimensions_image_tensor(image) batch_shape = image.shape[:-3] return _FT.resize( image.reshape((-1, num_channels, old_height, old_width)), size=size, interpolation=interpolation.value, max_size=max_size, antialias=antialias, ).reshape(batch_shape + (num_channels, new_height, new_width))