def test_affine_scale(self, device): torch.manual_seed(0) scale_factor = torch.rand(1, device=device) * 2.0 input = torch.rand(1, 2, 3, 4, device=device) transform = kornia.Affine(scale_factor=scale_factor).to(device) actual = transform(input) expected = kornia.scale(input, scale_factor) assert_allclose(actual, expected)
def test_affine_scale(self, device, dtype): # TODO: Remove when #666 is implemented if device.type == 'cuda': pytest.skip("Currently breaks in CUDA." "See https://github.com/kornia/kornia/issues/666") torch.manual_seed(0) _scale_factor = torch.rand(1, device=device, dtype=dtype) * 2.0 scale_factor = torch.stack([_scale_factor, _scale_factor], dim=1) input = torch.rand(1, 2, 3, 4, device=device, dtype=dtype) transform = kornia.Affine(scale_factor=scale_factor).to(device=device, dtype=dtype) actual = transform(input) expected = kornia.scale(input, scale_factor) assert_close(actual, expected, atol=1e-4, rtol=1e-4)
def ZoomY(x, v): batch_size = v.size(0) zoom = torch.ones((batch_size, 2), device=x.device) zoom[:, 1] = v return kornia.scale(x, zoom)