Пример #1
0
 def test_divide_by_zero3d(self, height, width, depth, device, dtype):
     output = kornia.normal_transform_pixel3d(depth,
                                              height,
                                              width,
                                              device=device,
                                              dtype=dtype)
     assert torch.isinf(output).sum().item() == 0
Пример #2
0
    def test_normalize_homography_identity(self, batch_size, device, dtype):
        # create input data
        input_shape = (4, 8, 5)
        dst_homo_src = utils.create_eye_batch(batch_size=batch_size, eye_size=4).to(device=device, dtype=dtype)

        res = torch.tensor(
            [[[0.5000, 0.0, 0.0, -1.0], [0.0, 0.2857, 0.0, -1.0], [0.0, 0.0, 0.6667, -1.0], [0.0, 0.0, 0.0, 1.0]]],
            device=device,
            dtype=dtype,
        )
        norm = kornia.normal_transform_pixel3d(input_shape[0], input_shape[1], input_shape[2]).to(
            device=device, dtype=dtype
        )
        assert_close(norm, res, rtol=1e-4, atol=1e-4)

        norm_homo = kornia.normalize_homography3d(dst_homo_src, input_shape, input_shape).to(device=device, dtype=dtype)
        assert_close(norm_homo, dst_homo_src, rtol=1e-4, atol=1e-4)

        norm_homo = kornia.normalize_homography3d(dst_homo_src, input_shape, input_shape).to(device=device, dtype=dtype)
        assert_close(norm_homo, dst_homo_src, rtol=1e-4, atol=1e-4)

        # change output scale
        norm_homo = kornia.normalize_homography3d(
            dst_homo_src, input_shape, (input_shape[0] // 2, input_shape[1] * 2, input_shape[2] // 2)
        ).to(device=device, dtype=dtype)
        res = torch.tensor(
            [[[4.0, 0.0, 0.0, 3.0], [0.0, 0.4667, 0.0, -0.5333], [0.0, 0.0, 3.0, 2.0], [0.0, 0.0, 0.0, 1.0]]],
            device=device,
            dtype=dtype,
        ).repeat(batch_size, 1, 1)
        assert_close(norm_homo, res, rtol=1e-4, atol=1e-4)
Пример #3
0
 def test_transform3d_apply(self, device, dtype):
     depth, height, width = 3, 2, 5
     input = torch.tensor([[0.0, 0.0, 0.0], [width - 1, height - 1, depth - 1]], device=device, dtype=dtype)
     expected = torch.tensor([[-1.0, -1.0, -1.0], [1.0, 1.0, 1.0]], device=device, dtype=dtype)
     transform = kornia.normal_transform_pixel3d(depth, height, width, device=device, dtype=dtype)
     output = kornia.transform_points(transform, input)
     assert_close(output, expected.to(device=device, dtype=dtype), atol=1e-4, rtol=1e-4)
Пример #4
0
 def test_transform3d_apply(self):
     depth, height, width = 3, 2, 5
     input = torch.tensor([[0., 0., 0.], [width - 1, height - 1, depth - 1]])
     expected = torch.tensor([[-1., -1., -1.], [1., 1., 1.]])
     transform = kornia.normal_transform_pixel3d(depth, height, width)
     output = kornia.transform_points(transform, input)
     assert_allclose(output, expected)
Пример #5
0
 def test_transform3d(self, height, width, depth, expected, device, dtype):
     output = kornia.normal_transform_pixel3d(depth,
                                              height,
                                              width,
                                              device=device,
                                              dtype=dtype)
     assert_allclose(output,
                     expected.to(device=device, dtype=dtype),
                     atol=1e-4,
                     rtol=1e-4)
Пример #6
0
 def test_transform3d(self):
     height, width, depth = 2, 6, 4
     output = kornia.normal_transform_pixel3d(depth, height, width)
     expected = torch.tensor([[
         [0.4, 0.0, 0.0, -1.],
         [0.0, 2.0, 0.0, -1.],
         [0.0, 0.0, 0.6667, -1.],
         [0.0, 0.0, 0.0, 1.],
     ]])
     assert_allclose(output, expected)