def test_transform_points(batch_size, num_points, num_dims, device_type):
    # generate input data
    eye_size = num_dims + 1
    points_src = torch.rand(batch_size, num_points, num_dims)
    points_src = points_src.to(torch.device(device_type))

    dst_homo_src = utils.create_random_homography(batch_size, eye_size)
    dst_homo_src = dst_homo_src.to(torch.device(device_type))

    # transform the points from dst to ref
    points_dst = tgm.transform_points(dst_homo_src, points_src)

    # transform the points from ref to dst
    src_homo_dst = torch.inverse(dst_homo_src)
    points_dst_to_src = tgm.transform_points(src_homo_dst, points_dst)

    # projected should be equal as initial
    error = utils.compute_mse(points_src, points_dst_to_src)
    assert pytest.approx(error.item(), 0.0)

    # functional
    assert torch.allclose(points_dst,
                          tgm.TransformPoints(dst_homo_src)(points_src))

    # evaluate function gradient
    points_src = utils.tensor_to_gradcheck_var(points_src)  # to var
    dst_homo_src = utils.tensor_to_gradcheck_var(dst_homo_src)  # to var
    assert gradcheck(tgm.transform_points, (
        dst_homo_src,
        points_src,
    ),
                     raise_exception=True)
示例#2
0
    def test_warp_perspective_crop(self):
        # generate input data
        batch_size = 1
        src_h, src_w = 3, 4
        dst_h, dst_w = 3, 2

        # [x, y] origin
        # top-left, top-right, bottom-right, bottom-left
        points_src = torch.FloatTensor([[
            [1, 0], [2, 0], [2, 2], [1, 2],
        ]])

        # [x, y] destination
        # top-left, top-right, bottom-right, bottom-left
        points_dst = torch.FloatTensor([[
            [0, 0], [dst_w - 1, 0], [dst_w - 1, dst_h - 1], [0, dst_h - 1],
        ]])

        # compute transformation between points
        dst_pix_trans_src_pix = tgm.get_perspective_transform(
            points_src, points_dst)

        # create points grid in normalized coordinates
        grid_src_norm = tgm.create_meshgrid(src_h, src_w,
                                            normalized_coordinates=True)
        grid_src_norm = torch.unsqueeze(grid_src_norm, dim=0)

        # create points grid in pixel coordinates
        grid_src_pix = tgm.create_meshgrid(src_h, src_w,
                                           normalized_coordinates=False)
        grid_src_pix = torch.unsqueeze(grid_src_pix, dim=0)

        src_norm_trans_src_pix = tgm.normal_transform_pixel(src_h, src_w)
        src_pix_trans_src_norm = tgm.inverse(src_norm_trans_src_pix)

        dst_norm_trans_dst_pix = tgm.normal_transform_pixel(dst_h, dst_w)

        # transform pixel grid
        grid_dst_pix = tgm.transform_points(
            dst_pix_trans_src_pix, grid_src_pix)
        grid_dst_norm = tgm.transform_points(
            dst_norm_trans_dst_pix, grid_dst_pix)

        # transform norm grid
        dst_norm_trans_src_norm = torch.matmul(
            dst_norm_trans_dst_pix, torch.matmul(
                dst_pix_trans_src_pix, src_pix_trans_src_norm))
        grid_dst_norm2 = tgm.transform_points(
            dst_norm_trans_src_norm, grid_src_norm)

        # grids should be equal
        self.assertTrue(utils.check_equal_torch(
            grid_dst_norm, grid_dst_norm2))

        # warp tensor
        patch = torch.rand(batch_size, 1, src_h, src_w)
        patch_warped = tgm.warp_perspective(
            patch, dst_pix_trans_src_pix, (dst_h, dst_w))
        self.assertTrue(utils.check_equal_torch(
            patch[:, :, :3, 1:3], patch_warped))
示例#3
0
def test_get_perspective_transform(batch_size, device_type):
    # generate input data
    device = torch.device(device_type)

    h_max, w_max = 64, 32  # height, width
    h = torch.ceil(h_max * torch.rand(batch_size)).to(device)
    w = torch.ceil(w_max * torch.rand(batch_size)).to(device)

    norm = torch.rand(batch_size, 4, 2).to(device)
    points_src = torch.zeros_like(norm)
    points_src[:, 1, 0] = h
    points_src[:, 2, 1] = w
    points_src[:, 3, 0] = h
    points_src[:, 3, 1] = w
    points_dst = points_src + norm

    # compute transform from source to target
    dst_homo_src = tgm.get_perspective_transform(points_src, points_dst)

    assert utils.check_equal_torch(
        tgm.transform_points(dst_homo_src, points_src), points_dst)

    # compute gradient check
    points_src = utils.tensor_to_gradcheck_var(points_src)  # to var
    points_dst = utils.tensor_to_gradcheck_var(points_dst)  # to var
    assert gradcheck(tgm.get_perspective_transform, (
        points_src,
        points_dst,
    ),
                     raise_exception=True)
示例#4
0
def draw_rectangle(image, dst_homo_src):
    import cv2
    height, width = image.shape[:2]
    pts_src = torch.FloatTensor([[
        [-1, -1],  # top-left
        [1, -1],  # bottom-left
        [1, 1],  # bottom-right
        [-1, 1],  # top-right
    ]]).to(dst_homo_src.device)
    # transform points
    pts_dst = tgm.transform_points(tgm.inverse(dst_homo_src), pts_src)

    def compute_factor(size):
        return 1.0 * size / 2

    def convert_coordinates_to_pixel(coordinates, factor):
        return factor * (coordinates + 1.0)

    # compute convertion factor
    x_factor = compute_factor(width - 1)
    y_factor = compute_factor(height - 1)
    pts_dst = pts_dst.cpu().squeeze().detach().numpy()
    pts_dst[..., 0] = convert_coordinates_to_pixel(pts_dst[..., 0], x_factor)
    pts_dst[..., 1] = convert_coordinates_to_pixel(pts_dst[..., 1], y_factor)
    # do the actual drawing
    for i in range(4):
        pt_i, pt_ii = tuple(pts_dst[i % 4]), tuple(pts_dst[(i + 1) % 4])
        image = cv2.line(image, pt_i, pt_ii, (255, 0, 0), 3)
    return image
    def test_transform_points(self, batch_size, num_points, num_dims,
                              device_type):
        # generate input data
        eye_size = num_dims + 1
        points_src = torch.rand(batch_size, num_points, num_dims)
        points_src = points_src.to(torch.device(device_type))

        dst_homo_src = utils.create_random_homography(batch_size, eye_size)
        dst_homo_src = dst_homo_src.to(torch.device(device_type))

        # transform the points from dst to ref
        points_dst = tgm.transform_points(dst_homo_src, points_src)

        # transform the points from ref to dst
        src_homo_dst = torch.inverse(dst_homo_src)
        points_dst_to_src = tgm.transform_points(src_homo_dst, points_dst)

        # projected should be equal as initial
        error = utils.compute_mse(points_src, points_dst_to_src)
        assert pytest.approx(error.item(), 0.0)
示例#6
0
def test_normalize_pixel_grid():
    # generate input data
    batch_size = 1
    height, width = 2, 4

    # create points grid
    grid_norm = tgm.utils.create_meshgrid(
        height, width, normalized_coordinates=True)
    grid_norm = torch.unsqueeze(grid_norm, dim=0)
    grid_pix = tgm.utils.create_meshgrid(
        height, width, normalized_coordinates=False)
    grid_pix = torch.unsqueeze(grid_pix, dim=0)

    # grid from pixel space to normalized
    norm_trans_pix = tgm.normal_transform_pixel(height, width)  # 1x3x3
    pix_trans_norm = torch.inverse(norm_trans_pix)  # 1x3x3
    # transform grids
    grid_pix_to_norm = tgm.transform_points(norm_trans_pix, grid_pix)
    grid_norm_to_pix = tgm.transform_points(pix_trans_norm, grid_norm)
    assert utils.check_equal_torch(grid_pix, grid_norm_to_pix)
    assert utils.check_equal_torch(grid_norm, grid_pix_to_norm)
    def test_transform_points(self):
        # generate input data
        batch_size = 2
        num_points = 2
        num_dims = 2
        eye_size = 3  # identity 3x3
        points_src = torch.rand(batch_size, 2, num_dims)
        dst_homo_src = utils.create_random_homography(batch_size, eye_size)

        # transform the points from dst to ref
        points_dst = tgm.transform_points(dst_homo_src, points_src)

        # transform the points from ref to dst
        src_homo_dst = tgm.inverse(dst_homo_src)
        points_dst_to_src = tgm.transform_points(src_homo_dst, points_dst)

        # projected should be equal as initial
        error = utils.compute_mse(points_src, points_dst_to_src)
        self.assertAlmostEqual(error.item(), 0.0, places=4)

        # functional
        self.assertTrue(
            torch.allclose(points_dst,
                           tgm.TransformPoints()(dst_homo_src, points_src)))
示例#8
0
    def test_get_perspective_transform(self):
        # generate input data
        h, w = 64, 32  # height, width
        norm = torch.randn(1, 4, 2)
        points_src = torch.FloatTensor([[
            [0, 0], [h, 0], [0, w], [h, w],
        ]])
        points_dst = points_src + norm

        # compute transform from source to target
        dst_homo_src = tgm.get_perspective_transform(points_src, points_dst)

        res = utils.check_equal_torch(
            tgm.transform_points(dst_homo_src, points_src), points_dst)
        self.assertTrue(res)
示例#9
0
def test_warp_perspective_crop(batch_size, device_type, channels):
    # generate input data
    src_h, src_w = 3, 4
    dst_h, dst_w = 3, 2
    device = torch.device(device_type)

    # [x, y] origin
    # top-left, top-right, bottom-right, bottom-left
    points_src = torch.rand(batch_size, 4, 2).to(device)
    points_src[:, :, 0] *= dst_h
    points_src[:, :, 1] *= dst_w

    # [x, y] destination
    # top-left, top-right, bottom-right, bottom-left
    points_dst = torch.zeros_like(points_src)
    points_dst[:, 1, 0] = dst_w - 1
    points_dst[:, 2, 0] = dst_w - 1
    points_dst[:, 2, 1] = dst_h - 1
    points_dst[:, 3, 1] = dst_h - 1

    # compute transformation between points
    dst_pix_trans_src_pix = tgm.get_perspective_transform(
        points_src, points_dst)

    # create points grid in normalized coordinates
    grid_src_norm = tgm.create_meshgrid(src_h,
                                        src_w,
                                        normalized_coordinates=True)
    grid_src_norm = grid_src_norm.repeat(batch_size, 1, 1, 1).to(device)

    # create points grid in pixel coordinates
    grid_src_pix = tgm.create_meshgrid(src_h,
                                       src_w,
                                       normalized_coordinates=False)
    grid_src_pix = grid_src_pix.repeat(batch_size, 1, 1, 1).to(device)

    src_norm_trans_src_pix = tgm.normal_transform_pixel(src_h, src_w).repeat(
        batch_size, 1, 1).to(device)
    src_pix_trans_src_norm = torch.inverse(src_norm_trans_src_pix)

    dst_norm_trans_dst_pix = tgm.normal_transform_pixel(dst_h, dst_w).repeat(
        batch_size, 1, 1).to(device)

    # transform pixel grid
    grid_dst_pix = tgm.transform_points(dst_pix_trans_src_pix.unsqueeze(1),
                                        grid_src_pix)
    grid_dst_norm = tgm.transform_points(dst_norm_trans_dst_pix.unsqueeze(1),
                                         grid_dst_pix)

    # transform norm grid
    dst_norm_trans_src_norm = torch.matmul(
        dst_norm_trans_dst_pix,
        torch.matmul(dst_pix_trans_src_pix, src_pix_trans_src_norm))
    grid_dst_norm2 = tgm.transform_points(dst_norm_trans_src_norm.unsqueeze(1),
                                          grid_src_norm)

    # grids should be equal
    # TODO: investage why precision is that low
    assert utils.check_equal_torch(grid_dst_norm, grid_dst_norm2, 1e-2)

    # warp tensor
    patch = torch.rand(batch_size, channels, src_h, src_w)
    patch_warped = tgm.warp_perspective(patch, dst_pix_trans_src_pix,
                                        (dst_h, dst_w))
    assert patch_warped.shape == (batch_size, channels, dst_h, dst_w)