Beispiel #1
0
 def test_b1_ch1_h3w3_ws23(self):
     input = torch.arange(9.).view(1, 1, 3, 3)
     m = kornia.contrib.ExtractTensorPatches((2, 3))
     patches = m(input)
     assert patches.shape == (1, 2, 1, 2, 3)
     assert utils.check_equal_torch(input[0, :, 0:2, 0:3], patches[0, 0])
     assert utils.check_equal_torch(input[0, :, 1:3, 0:3], patches[0, 1])
Beispiel #2
0
    def test_warp_grid_offset_x1y1_depth1(self, batch_size):
        height, width = 3, 5  # output shape
        pinhole_src, pinhole_dst = self._create_pinhole_pair(batch_size)
        pinhole_dst.tx += 1.  # apply offset to tx
        pinhole_dst.ty += 1.  # apply offset to ty

        # initialize depth to one
        depth_src = torch.ones(batch_size, 1, height, width)

        # create warper, initialize projection matrices and warp grid
        warper = kornia.DepthWarper(pinhole_dst, height, width)
        warper.compute_projection_matrix(pinhole_src)

        grid_warped = warper.warp_grid(depth_src)
        assert grid_warped.shape == (batch_size, height, width, 2)

        # normalize base meshgrid
        grid = warper.grid[..., :2]
        grid_norm = normalize_pixel_coordinates(grid, height, width)

        # check offset in x-axis
        assert utils.check_equal_torch(grid_norm[..., -1, 0],
                                       grid_warped[..., -2, 0])
        # check that y-axis remain the same
        assert utils.check_equal_torch(grid_norm[..., -1, :, 1],
                                       grid_warped[..., -2, :, 1])
Beispiel #3
0
def test_inverse_pinhole_matrix(batch_size, device_type):
    # generate input data
    image_height, image_width = 32., 32.
    cx, cy = image_width / 2, image_height / 2
    fx, fy = 1., 1.
    rx, ry, rz = 0., 0., 0.
    tx, ty, tz = 0., 0., 0.
    offset_x = 10.  # we will apply a 10units offset to `i` camera
    eps = 1e-6

    pinhole = utils.create_pinhole(
        fx, fy, cx, cy, image_height, image_width, rx, ry, rx, tx, ty, tz)
    pinhole = pinhole.repeat(batch_size, 1).to(torch.device(device_type))

    pinhole_matrix = tgm.inverse_pinhole_matrix(pinhole)

    ones = torch.ones(batch_size)
    assert utils.check_equal_torch(pinhole_matrix[:, 0, 0], (1. / fx) * ones)
    assert utils.check_equal_torch(pinhole_matrix[:, 1, 1], (1. / fy) * ones)
    assert utils.check_equal_torch(
        pinhole_matrix[:, 0, 2], (-1. * cx / fx) * ones)
    assert utils.check_equal_torch(
        pinhole_matrix[:, 1, 2], (-1. * cy / fx) * ones)

    # functional
    assert tgm.InversePinholeMatrix()(pinhole).shape == (batch_size, 4, 4)

    # evaluate function gradient
    pinhole = utils.tensor_to_gradcheck_var(pinhole)  # to var
    assert gradcheck(tgm.pinhole_matrix, (pinhole,),
                     raise_exception=True)
    def test_warp_perspective_crop(self):
        # generate input data
        batch_size = 1
        src_h, src_w = 3, 4
        dst_h, dst_w = 3, 2

        # [x, y] origin
        # top-left, top-right, bottom-right, bottom-left
        points_src = torch.FloatTensor([[
            [1, 0], [2, 0], [2, 2], [1, 2],
        ]])

        # [x, y] destination
        # top-left, top-right, bottom-right, bottom-left
        points_dst = torch.FloatTensor([[
            [0, 0], [dst_w - 1, 0], [dst_w - 1, dst_h - 1], [0, dst_h - 1],
        ]])

        # compute transformation between points
        dst_pix_trans_src_pix = tgm.get_perspective_transform(
            points_src, points_dst)

        # create points grid in normalized coordinates
        grid_src_norm = tgm.create_meshgrid(src_h, src_w,
                                            normalized_coordinates=True)
        grid_src_norm = torch.unsqueeze(grid_src_norm, dim=0)

        # create points grid in pixel coordinates
        grid_src_pix = tgm.create_meshgrid(src_h, src_w,
                                           normalized_coordinates=False)
        grid_src_pix = torch.unsqueeze(grid_src_pix, dim=0)

        src_norm_trans_src_pix = tgm.normal_transform_pixel(src_h, src_w)
        src_pix_trans_src_norm = tgm.inverse(src_norm_trans_src_pix)

        dst_norm_trans_dst_pix = tgm.normal_transform_pixel(dst_h, dst_w)

        # transform pixel grid
        grid_dst_pix = tgm.transform_points(
            dst_pix_trans_src_pix, grid_src_pix)
        grid_dst_norm = tgm.transform_points(
            dst_norm_trans_dst_pix, grid_dst_pix)

        # transform norm grid
        dst_norm_trans_src_norm = torch.matmul(
            dst_norm_trans_dst_pix, torch.matmul(
                dst_pix_trans_src_pix, src_pix_trans_src_norm))
        grid_dst_norm2 = tgm.transform_points(
            dst_norm_trans_src_norm, grid_src_norm)

        # grids should be equal
        self.assertTrue(utils.check_equal_torch(
            grid_dst_norm, grid_dst_norm2))

        # warp tensor
        patch = torch.rand(batch_size, 1, src_h, src_w)
        patch_warped = tgm.warp_perspective(
            patch, dst_pix_trans_src_pix, (dst_h, dst_w))
        self.assertTrue(utils.check_equal_torch(
            patch[:, :, :3, 1:3], patch_warped))
Beispiel #5
0
 def _test_b1_ch1_h3w4_ws23(self):
     input = torch.arange(12.).view(1, 1, 3, 4)
     m = tgm.contrib.ExtractTensorPatches((2, 3))
     patches = m(input)
     assert patches.shape == (1, 4, 1, 2, 3)
     assert utils.check_equal_torch(input[0, :, 0:2, 0:3], patches[0, 0])
     assert utils.check_equal_torch(input[0, :, 0:2, 1:4], patches[0, 1])
     assert utils.check_equal_torch(input[0, :, 1:3, 0:3], patches[0, 2])
     assert utils.check_equal_torch(input[0, :, 1:3, 1:4], patches[0, 3])
Beispiel #6
0
 def test_b1_ch1_h4w4_ws3(self):
     input = torch.arange(16.).view(1, 1, 4, 4)
     m = kornia.contrib.ExtractTensorPatches(3)
     patches = m(input)
     assert patches.shape == (1, 4, 1, 3, 3)
     assert utils.check_equal_torch(input[0, :, :3, :3], patches[0, 0])
     assert utils.check_equal_torch(input[0, :, :3, 1:], patches[0, 1])
     assert utils.check_equal_torch(input[0, :, 1:, :3], patches[0, 2])
     assert utils.check_equal_torch(input[0, :, 1:, 1:], patches[0, 3])
Beispiel #7
0
 def test_b1_ch1_h4w4_ws2(self):
     input = torch.arange(16.).view(1, 1, 4, 4)
     m = kornia.contrib.ExtractTensorPatches(2)
     patches = m(input)
     assert patches.shape == (1, 9, 1, 2, 2)
     assert utils.check_equal_torch(input[0, :, 0:2, 1:3], patches[0, 1])
     assert utils.check_equal_torch(input[0, :, 0:2, 2:4], patches[0, 2])
     assert utils.check_equal_torch(input[0, :, 1:3, 1:3], patches[0, 4])
     assert utils.check_equal_torch(input[0, :, 2:4, 1:3], patches[0, 7])
Beispiel #8
0
 def test_b1_ch1_h4w4_ws2_stride21(self):
     input = torch.arange(16.).view(1, 1, 4, 4)
     m = kornia.contrib.ExtractTensorPatches(2, stride=(2, 1))
     patches = m(input)
     assert patches.shape == (1, 6, 1, 2, 2)
     assert utils.check_equal_torch(input[0, :, 0:2, 1:3], patches[0, 1])
     assert utils.check_equal_torch(input[0, :, 0:2, 2:4], patches[0, 2])
     assert utils.check_equal_torch(input[0, :, 2:4, 0:2], patches[0, 3])
     assert utils.check_equal_torch(input[0, :, 2:4, 2:4], patches[0, 5])
Beispiel #9
0
 def test_b1_ch1_h3w3_ws2_stride1_padding1(self):
     input = torch.arange(9.).view(1, 1, 3, 3)
     m = kornia.contrib.ExtractTensorPatches(2, stride=1, padding=1)
     patches = m(input)
     assert patches.shape == (1, 16, 1, 2, 2)
     assert utils.check_equal_torch(input[0, :, 0:2, 0:2], patches[0, 5])
     assert utils.check_equal_torch(input[0, :, 0:2, 1:3], patches[0, 6])
     assert utils.check_equal_torch(input[0, :, 1:3, 0:2], patches[0, 9])
     assert utils.check_equal_torch(input[0, :, 1:3, 1:3], patches[0, 10])
Beispiel #10
0
 def test_b1_ch2_h4w4_ws3(self):
     input = torch.arange(16.).view(1, 1, 4, 4)
     input = input.expand(-1, 2, -1, -1)  # copy all channels
     m = kornia.contrib.ExtractTensorPatches(3)
     patches = m(input)
     assert patches.shape == (1, 4, 2, 3, 3)
     assert utils.check_equal_torch(input[0, :, :3, :3], patches[0, 0])
     assert utils.check_equal_torch(input[0, :, :3, 1:], patches[0, 1])
     assert utils.check_equal_torch(input[0, :, 1:, :3], patches[0, 2])
     assert utils.check_equal_torch(input[0, :, 1:, 1:], patches[0, 3])
Beispiel #11
0
 def test_b2_ch1_h3w3_ws2_stride1_padding1(self):
     batch_size = 2
     input = torch.arange(9.).view(1, 1, 3, 3)
     input = input.expand(batch_size, -1, -1, -1)
     m = kornia.contrib.ExtractTensorPatches(2, stride=1, padding=1)
     patches = m(input)
     assert patches.shape == (batch_size, 16, 1, 2, 2)
     for i in range(batch_size):
         assert utils.check_equal_torch(input[i, :, 0:2, 0:2], patches[i,
                                                                       5])
         assert utils.check_equal_torch(input[i, :, 0:2, 1:3], patches[i,
                                                                       6])
         assert utils.check_equal_torch(input[i, :, 1:3, 0:2], patches[i,
                                                                       9])
         assert utils.check_equal_torch(input[i, :, 1:3, 1:3], patches[i,
                                                                       10])
Beispiel #12
0
def test_get_perspective_transform(batch_size, device_type):
    # generate input data
    device = torch.device(device_type)

    h_max, w_max = 64, 32  # height, width
    h = torch.ceil(h_max * torch.rand(batch_size)).to(device)
    w = torch.ceil(w_max * torch.rand(batch_size)).to(device)

    norm = torch.rand(batch_size, 4, 2).to(device)
    points_src = torch.zeros_like(norm)
    points_src[:, 1, 0] = h
    points_src[:, 2, 1] = w
    points_src[:, 3, 0] = h
    points_src[:, 3, 1] = w
    points_dst = points_src + norm

    # compute transform from source to target
    dst_homo_src = tgm.get_perspective_transform(points_src, points_dst)

    assert utils.check_equal_torch(
        tgm.transform_points(dst_homo_src, points_src), points_dst)

    # compute gradient check
    points_src = utils.tensor_to_gradcheck_var(points_src)  # to var
    points_dst = utils.tensor_to_gradcheck_var(points_dst)  # to var
    assert gradcheck(tgm.get_perspective_transform, (
        points_src,
        points_dst,
    ),
                     raise_exception=True)
Beispiel #13
0
def test_homography_i_H_ref(batch_size, device_type):
    # generate input data
    device = torch.device(device_type)
    image_height, image_width = 32., 32.
    cx, cy = image_width / 2, image_height / 2
    fx, fy = 1., 1.
    rx, ry, rz = 0., 0., 0.
    tx, ty, tz = 0., 0., 0.
    offset_x = 10.  # we will apply a 10units offset to `i` camera
    eps = 1e-6

    pinhole_ref = utils.create_pinhole(fx, fy, cx, cy, image_height,
                                       image_width, rx, ry, rx, tx, ty, tz)
    pinhole_ref = pinhole_ref.repeat(batch_size, 1).to(device)

    pinhole_i = utils.create_pinhole(fx, fy, cx, cy, image_height, image_width,
                                     rx, ry, rx, tx + offset_x, ty, tz)
    pinhole_i = pinhole_i.repeat(batch_size, 1).to(device)

    # compute homography from ref to i
    i_H_ref = tgm.homography_i_H_ref(pinhole_i, pinhole_ref) + eps
    i_H_ref_inv = torch.inverse(i_H_ref)

    # compute homography from i to ref
    ref_H_i = tgm.homography_i_H_ref(pinhole_ref, pinhole_i) + eps
    assert utils.check_equal_torch(i_H_ref_inv, ref_H_i)

    # evaluate function gradient
    assert gradcheck(tgm.homography_i_H_ref, (
        utils.tensor_to_gradcheck_var(pinhole_ref) + eps,
        utils.tensor_to_gradcheck_var(pinhole_i) + eps,
    ),
                     raise_exception=True)
    def test_homography_i_H_ref(self):
        # generate input data
        image_height, image_width = 32., 32.
        cx, cy = image_width / 2, image_height / 2
        fx, fy = 1., 1.
        rx, ry, rz = 0., 0., 0.
        tx, ty, tz = 0., 0., 0.
        offset_x = 10.  # we will apply a 10units offset to `i` camera

        pinhole_ref = utils.create_pinhole(fx, fy, cx, cy, image_height,
                                           image_width, rx, ry, rx, tx, ty, tz)

        pinhole_i = utils.create_pinhole(fx, fy, cx, cy, image_height,
                                         image_width, rx, ry, rx,
                                         tx + offset_x, ty, tz)

        # compute homography from ref to i
        i_H_ref = tgm.homography_i_H_ref(pinhole_i, pinhole_ref)
        i_H_ref_inv = tgm.inverse(i_H_ref)

        # compute homography from i to ref
        ref_H_i = tgm.homography_i_H_ref(pinhole_ref, pinhole_i)

        res = utils.check_equal_torch(i_H_ref_inv, ref_H_i)
        self.assertTrue(res)
Beispiel #15
0
def test_ssim(batch_shape, device_type, window_size, reduction_type):
    # input data
    device = torch.device(device_type)
    img1 = torch.rand(batch_shape).to(device)
    img2 = torch.rand(batch_shape).to(device)

    ssim = tgm.losses.SSIM(window_size, reduction_type)
    ssim_loss_val = ssim(img1, img2)

    if reduction_type == 'none':
        assert ssim_loss_val.shape == batch_shape
    else:
        assert ssim_loss_val.dim() == 0

    assert pytest.approx(ssim(img1, img1).sum().item(), 0.0)
    assert pytest.approx(ssim(img2, img2).sum().item(), 0.0)

    # functional
    assert utils.check_equal_torch(
        ssim_loss_val, tgm.losses.ssim(
            img1, img2, window_size, reduction_type))

    # evaluate function gradient
    img1 = utils.tensor_to_gradcheck_var(img1)  # to var
    img2 = utils.tensor_to_gradcheck_var(img2, requires_grad=False)  # to var
    assert gradcheck(ssim, (img1, img2,), raise_exception=True)
Beispiel #16
0
    def test_warp_perspective(self):
        # generate input data
        batch_size = 1
        height, width = 16, 32
        alpha = tgm.pi / 2  # 90 deg rotation

        # create data patch
        patch = torch.rand(batch_size, 1, height, width)

        # create transformation (rotation)
        M = torch.tensor([[
            [torch.cos(alpha), -torch.sin(alpha), 0.],
            [torch.sin(alpha), torch.cos(alpha), 0.],
            [0., 0., 1.],
        ]])  # Bx3x3

        # apply transformation and inverse
        _, _, h, w = patch.shape
        patch_warped = tgm.warp_perspective(patch, M, dsize=(height, width))
        patch_warped_inv = tgm.warp_perspective(patch_warped, tgm.inverse(M),
                                                dsize=(height, width))

        # generate mask to compute error
        mask = torch.ones_like(patch)
        mask_warped_inv = tgm.warp_perspective(
            tgm.warp_perspective(patch, M, dsize=(height, width)),
            tgm.inverse(M), dsize=(height, width))

        res = utils.check_equal_torch(mask_warped_inv * patch,
                                      mask_warped_inv * patch_warped_inv)
        self.assertTrue(res)
    def test_translation_Bx4x4(self, batch_size):
        offset = 10
        trans_01 = identity_matrix(batch_size)
        trans_01[..., :3, -1] += offset  # add offset to translation vector

        trans_10 = tgm.inverse_transformation(trans_01)
        trans_01_hat = tgm.inverse_transformation(trans_10)
        assert utils.check_equal_torch(trans_01, trans_01_hat)
    def test_translation_Bx4x4(self, batch_size):
        offset = 10
        trans_01 = identity_matrix(batch_size)
        trans_12 = identity_matrix(batch_size)
        trans_12[..., :3, -1] += offset  # add offset to translation vector

        trans_02 = tgm.compose_transformations(trans_01, trans_12)
        assert utils.check_equal_torch(trans_02, trans_12)
    def test_translation_4x4(self):
        offset = 10
        trans_01 = identity_matrix(batch_size=1)[0]
        trans_12 = identity_matrix(batch_size=1)[0]
        trans_12[..., :3, -1] += offset  # add offset to translation vector

        trans_02 = tgm.boxplus_transformation(trans_01, trans_12)
        assert utils.check_equal_torch(trans_02, trans_12)
Beispiel #20
0
    def test_two_classes(self):
        num_classes = 2
        actual = torch.tensor([[1, 1, 1, 1, 0, 0, 0, 0]])
        predicted = torch.tensor([[1, 1, 1, 1, 0, 0, 0, 1]])

        conf_mat = tgm.metrics.confusion_matrix(predicted, actual, num_classes)
        conf_mat_real = torch.tensor([[[3, 1], [0, 4]]], dtype=torch.float32)
        assert utils.check_equal_torch(conf_mat, conf_mat_real)
Beispiel #21
0
    def test_depth_warper(self):
        # generate input data
        batch_size = 1
        height, width = 8, 8
        cx, cy = width / 2, height / 2
        fx, fy = 1., 1.
        rx, ry, rz = 0., 0., 0.
        tx, ty, tz = 0., 0., 0.
        offset = 1.  # we will apply a 1unit offset to `i` camera

        pinhole_ref = utils.create_pinhole(fx, fy, cx, cy, height, width, rx,
                                           ry, rx, tx, ty, tz)
        pinhole_ref = pinhole_ref.expand(batch_size, -1)

        pinhole_i = utils.create_pinhole(fx, fy, cx, cy, height, width, rx, ry,
                                         rx, tx + offset, ty + offset, tz)
        pinhole_i = pinhole_i.expand(batch_size, -1)

        # create checkerboard
        board = utils.create_checkerboard(height, width, 4)
        patch_i = torch.from_numpy(board).view(1, 1, height, width).expand(
            batch_size, 1, height, width)

        # instantiate warper and compute relative homographies
        warper = tgm.DepthWarper(pinhole_i)
        warper.compute_homographies(pinhole_ref,
                                    scale=torch.ones(batch_size, 1))

        # generate synthetic inverse depth
        inv_depth_ref = torch.ones(batch_size, 1, height, width)

        # warpd source patch by depth
        patch_ref = warper(inv_depth_ref, patch_i)

        # compute error
        res = utils.check_equal_torch(
            patch_ref[..., :int(height - offset), :int(width - offset)],
            patch_i[..., int(offset):, int(offset):])
        self.assertTrue(res)

        # test functional
        patch_ref_functional = tgm.depth_warp(pinhole_i, pinhole_ref,
                                              inv_depth_ref, patch_i)
        res = utils.check_equal_torch(patch_ref, patch_ref_functional)
        self.assertTrue(res)
    def test_warp_grid_translation(self, shape, offset):
        # create input data
        height, width = shape
        patch_src = torch.rand(1, 1, height, width)
        dst_homo_src = utils.create_eye_batch(batch_size=1, eye_size=3)
        dst_homo_src[..., 0, 2] = offset  # apply offset in x

        # instantiate warper
        warper = tgm.HomographyWarper(height,
                                      width,
                                      normalized_coordinates=False)
        flow = warper.warp_grid(dst_homo_src)

        # the grid the src plus the offset should be equal to the flow
        # on the x-axis, y-axis remains the same.
        assert utils.check_equal_torch(warper.grid[..., 0] + offset, flow[...,
                                                                          0])
        assert utils.check_equal_torch(warper.grid[..., 1], flow[..., 1])
Beispiel #23
0
 def test_translation(self, batch_size):
     offset = 1.
     channels, height, width = 1, 3, 4
     aff_ab = torch.eye(2, 3).repeat(batch_size, 1, 1)  # Bx2x3
     aff_ab[..., -1] += offset
     img_b = torch.arange(float(height * width)).view(
         1, channels, height, width).repeat(batch_size, 1, 1, 1)
     img_a = kornia.warp_affine(img_b, aff_ab, (height, width))
     assert utils.check_equal_torch(img_b[..., :2, :3], img_a[..., 1:, 1:])
    def test_translation_4x4(self):
        offset = 10.
        trans_01 = identity_matrix(batch_size=1)[0]
        trans_02 = identity_matrix(batch_size=1)[0]
        trans_02[..., :3, -1] += offset  # add offset to translation vector

        trans_12 = tgm.relative_transformation(trans_01, trans_02)
        trans_02_hat = tgm.compose_transformations(trans_01, trans_12)
        assert utils.check_equal_torch(trans_02_hat, trans_02)
    def test_identity_resize(self, batch_shape):
        # create input data
        batch_size, channels, height, width = batch_shape
        patch_src = torch.rand(batch_size, channels, height, width)
        dst_homo_src = utils.create_eye_batch(batch_size, eye_size=3)

        # instantiate warper warp from source to destination
        warper = tgm.HomographyWarper(height // 2, width // 2)
        patch_dst = warper(patch_src, dst_homo_src)

        # check the corners
        assert utils.check_equal_torch(patch_src[..., 0, 0], patch_dst[..., 0,
                                                                       0])
        assert utils.check_equal_torch(patch_src[..., 0, -1], patch_dst[..., 0,
                                                                        -1])
        assert utils.check_equal_torch(patch_src[..., -1, 0], patch_dst[...,
                                                                        -1, 0])
        assert utils.check_equal_torch(patch_src[..., -1, -1],
                                       patch_dst[..., -1, -1])
Beispiel #26
0
    def test_two_classes_perfect(self):
        batch_size = 1
        num_classes = 2
        actual = torch.tensor([[1, 1, 1, 1, 0, 0, 0, 0]])
        predicted = torch.tensor([[1, 1, 1, 1, 0, 0, 0, 0]])

        mean_iou = tgm.metrics.mean_iou(predicted, actual, num_classes)
        mean_iou_real = torch.tensor([[1.0, 1.0]], dtype=torch.float32)
        assert mean_iou.shape == (batch_size, num_classes)
        assert utils.check_equal_torch(mean_iou, mean_iou_real)
Beispiel #27
0
    def test_three_classes(self):
        num_classes = 3
        actual = torch.tensor(
            [[2, 2, 0, 0, 1, 0, 0, 2, 1, 1, 0, 0, 1, 2, 1, 0]])
        predicted = torch.tensor(
            [[2, 1, 0, 0, 0, 0, 0, 1, 0, 2, 2, 1, 0, 0, 2, 2]])

        conf_mat = tgm.metrics.confusion_matrix(predicted, actual, num_classes)
        conf_mat_real = torch.tensor([[[4, 1, 2], [3, 0, 2], [1, 2, 1]]],
                                     dtype=torch.float32)
        assert utils.check_equal_torch(conf_mat, conf_mat_real)
Beispiel #28
0
    def test_two_classes_batch2(self):
        batch_size = 2
        num_classes = 2
        actual = torch.tensor([[1, 1, 1, 1, 0, 0, 0, 0]]).repeat(batch_size, 1)
        predicted = torch.tensor([[1, 1, 1, 1, 0, 0, 0,
                                   1]]).repeat(batch_size, 1)

        conf_mat = kornia.utils.metrics.confusion_matrix(
            predicted, actual, num_classes)
        conf_mat_real = torch.tensor([[[3, 1], [0, 4]]], dtype=torch.float32)
        assert utils.check_equal_torch(conf_mat, conf_mat_real)
    def test_identity(self):
        # create input data
        height, width = 2, 5
        patch_src = torch.rand(1, 1, height, width)
        dst_homo_src = utils.create_eye_batch(batch_size=1, eye_size=3)

        # instantiate warper
        warper = tgm.HomographyWarper(height, width)

        # warp from source to destination
        patch_dst = warper(patch_src, dst_homo_src)
        assert utils.check_equal_torch(patch_src, patch_dst)
    def test_translation(self, shape):
        # create input data
        offset = 2.  # in pixel
        height, width = shape
        patch_src = torch.rand(1, 1, height, width)
        dst_homo_src = utils.create_eye_batch(batch_size=1, eye_size=3)
        dst_homo_src[..., 0, 2] = offset / (width - 1)  # apply offset in x

        # instantiate warper and from source to destination
        warper = tgm.HomographyWarper(height, width)
        patch_dst = warper(patch_src, dst_homo_src)
        assert utils.check_equal_torch(patch_src[..., 1:], patch_dst[..., :-1])