def test_pad(device, dt, pad, config): script_fn = torch.jit.script(F.pad) tensor, pil_img = _create_data(7, 8, device=device) batch_tensors = _create_data_batch(16, 18, num_samples=4, device=device) if dt == torch.float16 and device == "cpu": # skip float16 on CPU case return if dt is not None: # This is a trivial cast to float of uint8 data to test all cases tensor = tensor.to(dt) batch_tensors = batch_tensors.to(dt) pad_tensor = F_t.pad(tensor, pad, **config) pad_pil_img = F_pil.pad(pil_img, pad, **config) pad_tensor_8b = pad_tensor # we need to cast to uint8 to compare with PIL image if pad_tensor_8b.dtype != torch.uint8: pad_tensor_8b = pad_tensor_8b.to(torch.uint8) _assert_equal_tensor_to_pil(pad_tensor_8b, pad_pil_img, msg="{}, {}".format(pad, config)) if isinstance(pad, int): script_pad = [pad, ] else: script_pad = pad pad_tensor_script = script_fn(tensor, script_pad, **config) assert_equal(pad_tensor, pad_tensor_script, msg="{}, {}".format(pad, config)) _test_fn_on_batch(batch_tensors, F.pad, padding=script_pad, **config)
def test_pad(self): script_fn = torch.jit.script(F_t.pad) tensor, pil_img = self._create_data(7, 8) for pad in [1, [ 1, ], [0, 1], (2, 2), [1, 0, 1, 2]]: padding_mode = "constant" for fill in [0, 10, 20]: pad_tensor = F_t.pad(tensor, pad, fill=fill, padding_mode=padding_mode) pad_pil_img = F_pil.pad(pil_img, pad, fill=fill, padding_mode=padding_mode) self.compareTensorToPIL(pad_tensor, pad_pil_img, msg="{}, {}".format(pad, fill)) if isinstance(pad, int): script_pad = [ pad, ] else: script_pad = pad pad_tensor_script = script_fn(tensor, script_pad, fill=fill, padding_mode=padding_mode) self.assertTrue(pad_tensor.equal(pad_tensor_script), msg="{}, {}".format(pad, fill))
def test_pad(self): script_fn = torch.jit.script(F_t.pad) tensor, pil_img = self._create_data(7, 8) for dt in [None, torch.float32, torch.float64]: if dt is not None: # This is a trivial cast to float of uint8 data to test all cases tensor = tensor.to(dt) for pad in [2, [ 3, ], [0, 3], (3, 3), [4, 2, 4, 3]]: configs = [ { "padding_mode": "constant", "fill": 0 }, { "padding_mode": "constant", "fill": 10 }, { "padding_mode": "constant", "fill": 20 }, { "padding_mode": "edge" }, { "padding_mode": "reflect" }, ] for kwargs in configs: pad_tensor = F_t.pad(tensor, pad, **kwargs) pad_pil_img = F_pil.pad(pil_img, pad, **kwargs) pad_tensor_8b = pad_tensor # we need to cast to uint8 to compare with PIL image if pad_tensor_8b.dtype != torch.uint8: pad_tensor_8b = pad_tensor_8b.to(torch.uint8) self.compareTensorToPIL(pad_tensor_8b, pad_pil_img, msg="{}, {}".format(pad, kwargs)) if isinstance(pad, int): script_pad = [ pad, ] else: script_pad = pad pad_tensor_script = script_fn(tensor, script_pad, **kwargs) self.assertTrue(pad_tensor.equal(pad_tensor_script), msg="{}, {}".format(pad, kwargs))
def test_pad(self): script_fn = torch.jit.script(F.pad) tensor, pil_img = self._create_data(7, 8, device=self.device) batch_tensors = self._create_data_batch(16, 18, num_samples=4, device=self.device) for dt in [None, torch.float32, torch.float64, torch.float16]: if dt == torch.float16 and torch.device(self.device).type == "cpu": # skip float16 on CPU case continue if dt is not None: # This is a trivial cast to float of uint8 data to test all cases tensor = tensor.to(dt) batch_tensors = batch_tensors.to(dt) for pad in [2, [3, ], [0, 3], (3, 3), [4, 2, 4, 3]]: configs = [ {"padding_mode": "constant", "fill": 0}, {"padding_mode": "constant", "fill": 10}, {"padding_mode": "constant", "fill": 20}, {"padding_mode": "edge"}, {"padding_mode": "reflect"}, {"padding_mode": "symmetric"}, ] for kwargs in configs: pad_tensor = F_t.pad(tensor, pad, **kwargs) pad_pil_img = F_pil.pad(pil_img, pad, **kwargs) pad_tensor_8b = pad_tensor # we need to cast to uint8 to compare with PIL image if pad_tensor_8b.dtype != torch.uint8: pad_tensor_8b = pad_tensor_8b.to(torch.uint8) self.compareTensorToPIL(pad_tensor_8b, pad_pil_img, msg="{}, {}".format(pad, kwargs)) if isinstance(pad, int): script_pad = [pad, ] else: script_pad = pad pad_tensor_script = script_fn(tensor, script_pad, **kwargs) self.assertTrue(pad_tensor.equal(pad_tensor_script), msg="{}, {}".format(pad, kwargs)) self._test_fn_on_batch(batch_tensors, F.pad, padding=script_pad, **kwargs) with self.assertRaises(ValueError, msg="Padding can not be negative for symmetric padding_mode"): F_t.pad(tensor, (-2, -3), padding_mode="symmetric")