Exemplo n.º 1
0
    def __init__(self):
        super(Align, self).__init__()

        # Resnet-18 features
        # Predict 8 points (4 pixels) per homography matrix

        points_src = torch.FloatTensor([[
            [190, 210],
            [455, 210],
            [633, 475],
            [0, 475],
        ]]).cuda()
        points_dst = torch.FloatTensor([[
            [0, 0],
            [399, 0],
            [399, 399],
            [0, 399],
        ]]).cuda()
        cropH = tgm.get_perspective_transform(points_src, points_dst)

        points_src = torch.FloatTensor([[[0, 0], [400, 0], [400, 400],
                                         [0, 400]]]).cuda()
        points_dst = torch.FloatTensor([[[400, 400], [0, 400], [0, 0],
                                         [400, 0]]]).cuda()
        flipH = tgm.get_perspective_transform(points_src, points_dst)

        self.H1 = cropH
        self.H2 = flipH @ cropH
	def __init__(self, rootDir, imgData, preprocessing):
		self.rootDir = rootDir
		self.imgData = imgData
		self.preprocessing = preprocessing
		self.dataset = []

		# points_src = torch.FloatTensor([[
		# 	[190,210],[455,210],[633,475],[0,475],
		# ]]).cuda()
		points_src = torch.FloatTensor([[
			[149, 157],[447, 166],[311, 151],[322, 265],
		]]).cuda()
		points_dst = torch.FloatTensor([[
			[0, 0], [399, 0], [399, 399], [0, 399],
		]]).cuda()
		cropH = tgm.get_perspective_transform(points_src, points_dst)

		points_src = torch.FloatTensor([[
			[0, 0], [400, 0], [400, 400], [0, 400]
			]]).cuda()
		points_dst = torch.FloatTensor([[
			[400, 400], [0, 400], [0, 0], [400, 0]
			]]).cuda()
		flipH = tgm.get_perspective_transform(points_src, points_dst)

		self.H1 = cropH
		self.H2 = cropH
Exemplo n.º 3
0
def test_get_perspective_transform(batch_size, device_type):
    # generate input data
    device = torch.device(device_type)

    h_max, w_max = 64, 32  # height, width
    h = torch.ceil(h_max * torch.rand(batch_size)).to(device)
    w = torch.ceil(w_max * torch.rand(batch_size)).to(device)

    norm = torch.rand(batch_size, 4, 2).to(device)
    points_src = torch.zeros_like(norm)
    points_src[:, 1, 0] = h
    points_src[:, 2, 1] = w
    points_src[:, 3, 0] = h
    points_src[:, 3, 1] = w
    points_dst = points_src + norm

    # compute transform from source to target
    dst_homo_src = tgm.get_perspective_transform(points_src, points_dst)

    assert utils.check_equal_torch(
        tgm.transform_points(dst_homo_src, points_src), points_dst)

    # compute gradient check
    points_src = utils.tensor_to_gradcheck_var(points_src)  # to var
    points_dst = utils.tensor_to_gradcheck_var(points_dst)  # to var
    assert gradcheck(tgm.get_perspective_transform, (
        points_src,
        points_dst,
    ),
                     raise_exception=True)
Exemplo n.º 4
0
    def test_warp_perspective_crop(self):
        # generate input data
        batch_size = 1
        src_h, src_w = 3, 4
        dst_h, dst_w = 3, 2

        # [x, y] origin
        # top-left, top-right, bottom-right, bottom-left
        points_src = torch.FloatTensor([[
            [1, 0], [2, 0], [2, 2], [1, 2],
        ]])

        # [x, y] destination
        # top-left, top-right, bottom-right, bottom-left
        points_dst = torch.FloatTensor([[
            [0, 0], [dst_w - 1, 0], [dst_w - 1, dst_h - 1], [0, dst_h - 1],
        ]])

        # compute transformation between points
        dst_pix_trans_src_pix = tgm.get_perspective_transform(
            points_src, points_dst)

        # create points grid in normalized coordinates
        grid_src_norm = tgm.create_meshgrid(src_h, src_w,
                                            normalized_coordinates=True)
        grid_src_norm = torch.unsqueeze(grid_src_norm, dim=0)

        # create points grid in pixel coordinates
        grid_src_pix = tgm.create_meshgrid(src_h, src_w,
                                           normalized_coordinates=False)
        grid_src_pix = torch.unsqueeze(grid_src_pix, dim=0)

        src_norm_trans_src_pix = tgm.normal_transform_pixel(src_h, src_w)
        src_pix_trans_src_norm = tgm.inverse(src_norm_trans_src_pix)

        dst_norm_trans_dst_pix = tgm.normal_transform_pixel(dst_h, dst_w)

        # transform pixel grid
        grid_dst_pix = tgm.transform_points(
            dst_pix_trans_src_pix, grid_src_pix)
        grid_dst_norm = tgm.transform_points(
            dst_norm_trans_dst_pix, grid_dst_pix)

        # transform norm grid
        dst_norm_trans_src_norm = torch.matmul(
            dst_norm_trans_dst_pix, torch.matmul(
                dst_pix_trans_src_pix, src_pix_trans_src_norm))
        grid_dst_norm2 = tgm.transform_points(
            dst_norm_trans_src_norm, grid_src_norm)

        # grids should be equal
        self.assertTrue(utils.check_equal_torch(
            grid_dst_norm, grid_dst_norm2))

        # warp tensor
        patch = torch.rand(batch_size, 1, src_h, src_w)
        patch_warped = tgm.warp_perspective(
            patch, dst_pix_trans_src_pix, (dst_h, dst_w))
        self.assertTrue(utils.check_equal_torch(
            patch[:, :, :3, 1:3], patch_warped))
Exemplo n.º 5
0
    def test_get_perspective_transform(self):
        # generate input data
        h, w = 64, 32  # height, width
        norm = torch.randn(1, 4, 2)
        points_src = torch.FloatTensor([[
            [0, 0], [h, 0], [0, w], [h, w],
        ]])
        points_dst = points_src + norm

        # compute transform from source to target
        dst_homo_src = tgm.get_perspective_transform(points_src, points_dst)

        res = utils.check_equal_torch(
            tgm.transform_points(dst_homo_src, points_src), points_dst)
        self.assertTrue(res)
Exemplo n.º 6
0
    def test_crop(self, device_type, batch_size, channels):
        # generate input data
        src_h, src_w = 3, 3
        dst_h, dst_w = 3, 3
        device = torch.device(device_type)

        # [x, y] origin
        # top-left, top-right, bottom-right, bottom-left
        points_src = torch.FloatTensor([[
            [0, 0],
            [0, src_w - 1],
            [src_h - 1, src_w - 1],
            [src_h - 1, 0],
        ]])

        # [x, y] destination
        # top-left, top-right, bottom-right, bottom-left
        points_dst = torch.FloatTensor([[
            [0, 0],
            [0, dst_w - 1],
            [dst_h - 1, dst_w - 1],
            [dst_h - 1, 0],
        ]])

        # compute transformation between points
        dst_trans_src = tgm.get_perspective_transform(points_src,
                                                      points_dst).expand(
                                                          batch_size, -1, -1)

        # warp tensor
        patch = torch.FloatTensor([[[
            [1, 2, 3, 4],
            [5, 6, 7, 8],
            [9, 10, 11, 12],
            [13, 14, 15, 16],
        ]]]).expand(batch_size, channels, -1, -1)

        expected = torch.FloatTensor([[[
            [1, 2, 3],
            [5, 6, 7],
            [9, 10, 11],
        ]]])

        # warp and assert
        patch_warped = tgm.warp_perspective(patch, dst_trans_src,
                                            (dst_h, dst_w))
        assert_allclose(patch_warped, expected)
Exemplo n.º 7
0
    def test_crop_center_resize(self, device_type):
        # generate input data
        dst_h, dst_w = 4, 4
        device = torch.device(device_type)

        # [x, y] origin
        # top-left, top-right, bottom-right, bottom-left
        points_src = torch.FloatTensor([[
            [1, 1],
            [1, 2],
            [2, 2],
            [2, 1],
        ]])

        # [x, y] destination
        # top-left, top-right, bottom-right, bottom-left
        points_dst = torch.FloatTensor([[
            [0, 0],
            [0, dst_w - 1],
            [dst_h - 1, dst_w - 1],
            [dst_h - 1, 0],
        ]])

        # compute transformation between points
        dst_trans_src = tgm.get_perspective_transform(points_src, points_dst)

        # warp tensor
        patch = torch.FloatTensor([[[
            [1, 2, 3, 4],
            [5, 6, 7, 8],
            [9, 10, 11, 12],
            [13, 14, 15, 16],
        ]]])

        expected = torch.FloatTensor([[[
            [6.000, 6.333, 6.666, 7.000],
            [7.333, 7.666, 8.000, 8.333],
            [8.666, 9.000, 9.333, 9.666],
            [10.000, 10.333, 10.666, 11.000],
        ]]])

        # warp and assert
        patch_warped = tgm.warp_perspective(patch, dst_trans_src,
                                            (dst_h, dst_w))
        assert_allclose(patch_warped, expected)
Exemplo n.º 8
0
def test_warp_perspective_crop(batch_size, device_type, channels):
    # generate input data
    src_h, src_w = 3, 4
    dst_h, dst_w = 3, 2
    device = torch.device(device_type)

    # [x, y] origin
    # top-left, top-right, bottom-right, bottom-left
    points_src = torch.rand(batch_size, 4, 2).to(device)
    points_src[:, :, 0] *= dst_h
    points_src[:, :, 1] *= dst_w

    # [x, y] destination
    # top-left, top-right, bottom-right, bottom-left
    points_dst = torch.zeros_like(points_src)
    points_dst[:, 1, 0] = dst_w - 1
    points_dst[:, 2, 0] = dst_w - 1
    points_dst[:, 2, 1] = dst_h - 1
    points_dst[:, 3, 1] = dst_h - 1

    # compute transformation between points
    dst_pix_trans_src_pix = tgm.get_perspective_transform(
        points_src, points_dst)

    # create points grid in normalized coordinates
    grid_src_norm = tgm.create_meshgrid(src_h,
                                        src_w,
                                        normalized_coordinates=True)
    grid_src_norm = grid_src_norm.repeat(batch_size, 1, 1, 1).to(device)

    # create points grid in pixel coordinates
    grid_src_pix = tgm.create_meshgrid(src_h,
                                       src_w,
                                       normalized_coordinates=False)
    grid_src_pix = grid_src_pix.repeat(batch_size, 1, 1, 1).to(device)

    src_norm_trans_src_pix = tgm.normal_transform_pixel(src_h, src_w).repeat(
        batch_size, 1, 1).to(device)
    src_pix_trans_src_norm = torch.inverse(src_norm_trans_src_pix)

    dst_norm_trans_dst_pix = tgm.normal_transform_pixel(dst_h, dst_w).repeat(
        batch_size, 1, 1).to(device)

    # transform pixel grid
    grid_dst_pix = tgm.transform_points(dst_pix_trans_src_pix.unsqueeze(1),
                                        grid_src_pix)
    grid_dst_norm = tgm.transform_points(dst_norm_trans_dst_pix.unsqueeze(1),
                                         grid_dst_pix)

    # transform norm grid
    dst_norm_trans_src_norm = torch.matmul(
        dst_norm_trans_dst_pix,
        torch.matmul(dst_pix_trans_src_pix, src_pix_trans_src_norm))
    grid_dst_norm2 = tgm.transform_points(dst_norm_trans_src_norm.unsqueeze(1),
                                          grid_src_norm)

    # grids should be equal
    # TODO: investage why precision is that low
    assert utils.check_equal_torch(grid_dst_norm, grid_dst_norm2, 1e-2)

    # warp tensor
    patch = torch.rand(batch_size, channels, src_h, src_w)
    patch_warped = tgm.warp_perspective(patch, dst_pix_trans_src_pix,
                                        (dst_h, dst_w))
    assert patch_warped.shape == (batch_size, channels, dst_h, dst_w)