def test_homography_warper_gradcheck(self):
        # generate input data
        batch_size = 1
        height, width = 16, 32  # small patch, otherwise the test takes forever
        eye_size = 3  # identity 3x3

        # create checkerboard
        board = utils.create_checkerboard(height, width, 4)
        patch_src = torch.from_numpy(board).view(
            1, 1, height, width).expand(batch_size, 1, height, width)
        patch_src = utils.tensor_to_gradcheck_var(patch_src)  # to var

        # create base homography
        dst_homo_src = utils.create_eye_batch(batch_size, eye_size)
        dst_homo_src = utils.tensor_to_gradcheck_var(
            dst_homo_src, requires_grad=False)  # to var

        # instantiate warper
        warper = tgm.HomographyWarper(height, width)

        # evaluate function gradient
        res = gradcheck(warper, (patch_src, dst_homo_src,),
                        raise_exception=True)
        self.assertTrue(res)

        # evaluate function gradient
        res = gradcheck(
            tgm.homography_warp,
            (patch_src,
             dst_homo_src,
             (height,
              width)),
            raise_exception=True)
        self.assertTrue(res)
    def test_translation(self, shape):
        # create input data
        offset = 2.  # in pixel
        height, width = shape
        patch_src = torch.rand(1, 1, height, width)
        dst_homo_src = utils.create_eye_batch(batch_size=1, eye_size=3)
        dst_homo_src[..., 0, 2] = offset / (width - 1)  # apply offset in x

        # instantiate warper and from source to destination
        warper = tgm.HomographyWarper(height, width)
        patch_dst = warper(patch_src, dst_homo_src)
        assert utils.check_equal_torch(patch_src[..., 1:], patch_dst[..., :-1])
    def test_identity(self):
        # create input data
        height, width = 2, 5
        patch_src = torch.rand(1, 1, height, width)
        dst_homo_src = utils.create_eye_batch(batch_size=1, eye_size=3)

        # instantiate warper
        warper = tgm.HomographyWarper(height, width)

        # warp from source to destination
        patch_dst = warper(patch_src, dst_homo_src)
        assert utils.check_equal_torch(patch_src, patch_dst)
    def test_warp_grid_translation(self, shape, offset):
        # create input data
        height, width = shape
        patch_src = torch.rand(1, 1, height, width)
        dst_homo_src = utils.create_eye_batch(batch_size=1, eye_size=3)
        dst_homo_src[..., 0, 2] = offset  # apply offset in x

        # instantiate warper
        warper = tgm.HomographyWarper(height,
                                      width,
                                      normalized_coordinates=False)
        flow = warper.warp_grid(dst_homo_src)

        # the grid the src plus the offset should be equal to the flow
        # on the x-axis, y-axis remains the same.
        assert utils.check_equal_torch(warper.grid[..., 0] + offset, flow[...,
                                                                          0])
        assert utils.check_equal_torch(warper.grid[..., 1], flow[..., 1])
    def test_identity_resize(self, batch_shape):
        # create input data
        batch_size, channels, height, width = batch_shape
        patch_src = torch.rand(batch_size, channels, height, width)
        dst_homo_src = utils.create_eye_batch(batch_size, eye_size=3)

        # instantiate warper warp from source to destination
        warper = tgm.HomographyWarper(height // 2, width // 2)
        patch_dst = warper(patch_src, dst_homo_src)

        # check the corners
        assert utils.check_equal_torch(patch_src[..., 0, 0], patch_dst[..., 0,
                                                                       0])
        assert utils.check_equal_torch(patch_src[..., 0, -1], patch_dst[..., 0,
                                                                        -1])
        assert utils.check_equal_torch(patch_src[..., -1, 0], patch_dst[...,
                                                                        -1, 0])
        assert utils.check_equal_torch(patch_src[..., -1, -1],
                                       patch_dst[..., -1, -1])
    def test_homography_warper(self, batch_size, device_type):
        # generate input data
        height, width = 128, 64
        eye_size = 3  # identity 3x3
        device = torch.device(device_type)

        # create checkerboard
        board = utils.create_checkerboard(height, width, 4)
        patch_src = torch.from_numpy(board).view(1, 1, height, width).expand(
            batch_size, 1, height, width)
        patch_src = patch_src.to(device)

        # create base homography
        dst_homo_src = utils.create_eye_batch(batch_size, eye_size).to(device)

        # instantiate warper
        warper = tgm.HomographyWarper(height, width)

        for i in range(self.num_tests):
            # generate homography noise
            homo_delta = torch.zeros_like(dst_homo_src)
            homo_delta[:, -1, -1] = 0.0

            dst_homo_src_i = dst_homo_src + homo_delta

            # transform the points from dst to ref
            patch_dst = warper(patch_src, dst_homo_src_i)
            patch_dst_to_src = warper(patch_dst, torch.inverse(dst_homo_src_i))

            # projected should be equal as initial
            error = utils.compute_patch_error(patch_dst, patch_dst_to_src,
                                              height, width)

            assert error.item() < self.threshold

            # check functional api
            patch_dst_to_src_functional = tgm.homography_warp(
                patch_dst, torch.inverse(dst_homo_src_i), (height, width))

            assert utils.check_equal_torch(patch_dst_to_src,
                                           patch_dst_to_src_functional)
Exemple #7
0
    def test_gradcheck(self, batch_shape, device_type):
        # generate input data
        device = torch.device(device_type)
        eye_size = 3  # identity 3x3

        # create checkerboard
        patch_src = torch.rand(batch_shape).to(device)
        patch_src = utils.tensor_to_gradcheck_var(patch_src)  # to var

        # create base homography
        batch_size, _, height, width = patch_src.shape
        dst_homo_src = utils.create_eye_batch(batch_size, eye_size)
        dst_homo_src = utils.tensor_to_gradcheck_var(
            dst_homo_src, requires_grad=False)  # to var

        # instantiate warper
        warper = tgm.HomographyWarper(height, width)

        # evaluate function gradient
        assert gradcheck(warper, (patch_src, dst_homo_src,),
                         raise_exception=True)
    def test_rotation(self, batch_shape):
        # create input data
        batch_size, channels, height, width = batch_shape
        patch_src = torch.rand(batch_size, channels, height, width)
        # rotation of 90deg
        dst_homo_src = utils.create_eye_batch(batch_size, 3)
        dst_homo_src[..., 0, 0] = 0.0
        dst_homo_src[..., 0, 1] = 1.0
        dst_homo_src[..., 1, 0] = -1.0
        dst_homo_src[..., 1, 1] = 0.0

        # instantiate warper and warp from source to destination
        warper = tgm.HomographyWarper(height, width)
        patch_dst = warper(patch_src, dst_homo_src)

        # check the corners
        assert utils.check_equal_torch(patch_src[..., 0, 0], patch_dst[..., 0,
                                                                       -1])
        assert utils.check_equal_torch(patch_src[..., 0, -1],
                                       patch_dst[..., -1, -1])
        assert utils.check_equal_torch(patch_src[..., -1, 0], patch_dst[..., 0,
                                                                        0])
        assert utils.check_equal_torch(patch_src[..., -1, -1],
                                       patch_dst[..., -1, 0])
Exemple #9
0
def HomographyRegressionApp():
    # Training settings
    parser = argparse.ArgumentParser(
        description='Homography Regression with photometric loss.')
    parser.add_argument('--input-dir',
                        type=str,
                        required=True,
                        help='the path to the directory with the input data.')
    parser.add_argument('--output-dir',
                        type=str,
                        required=True,
                        help='the path to output the results.')
    parser.add_argument('--num-iterations',
                        type=int,
                        default=1000,
                        metavar='N',
                        help='number of training iterations (default: 1000)')
    parser.add_argument('--lr',
                        type=float,
                        default=1e-3,
                        metavar='LR',
                        help='learning rate (default: 1e-3)')
    parser.add_argument('--cuda',
                        action='store_true',
                        default=False,
                        help='enables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=666,
                        metavar='S',
                        help='random seed (default: 666)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument(
        '--log-interval-vis',
        type=int,
        default=100,
        metavar='N',
        help='how many batches to wait before visual logging training status')
    args = parser.parse_args()

    # define the device to use for inference
    use_cuda = args.cuda and torch.cuda.is_available()
    device = torch.device('cuda' if use_cuda else 'cpu')

    torch.manual_seed(args.seed)

    # load the data
    img_src, _ = load_image(os.path.join(args.input_dir, 'img1.ppm'))
    img_dst, _ = load_image(os.path.join(args.input_dir, 'img2.ppm'))
    dst_homo_src_gt = load_homography(os.path.join(args.input_dir, 'H1to2p'))

    # instantiate the homography warper from `torchgeometry`
    height, width = img_src.shape[-2:]
    warper = dgm.HomographyWarper(height, width)

    # create the homography as the parameter to be optimized
    dst_homo_src = MyHomography().to(device)

    # create optimizer
    optimizer = optim.Adam(dst_homo_src.parameters(), lr=args.lr)

    # main training loop

    for iter_idx in range(args.num_iterations):
        # send data to device
        img_src, img_dst = img_src.to(device), img_dst.to(device)

        # warp the reference image to the destiny with current homography
        img_src_to_dst = warper(img_src, dst_homo_src())

        # compute the photometric loss
        loss = F.l1_loss(img_src_to_dst, img_dst, reduction='none')

        # propagate the error just for a fixed window
        w_size = 100  # window size
        h_2, w_2 = height // 2, width // 2
        loss = loss[..., h_2 - w_size:h_2 + w_size, w_2 - w_size:w_2 + w_size]
        loss = torch.mean(loss)

        # compute gradient and update optimizer parameters
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if iter_idx % args.log_interval == 0:
            print('Train iteration: {}/{}\tLoss: {:.6}'.format(
                iter_idx, args.num_iterations, loss.item()))
            print(dst_homo_src.h**o)

        def draw_rectangle(image, dst_homo_src):
            height, width = image.shape[:2]
            pts_src = torch.FloatTensor([[
                [-1, -1],  # top-left
                [1, -1],  # bottom-left
                [1, 1],  # bottom-right
                [-1, 1],  # top-right
            ]]).to(dst_homo_src.device)
            # transform points
            pts_dst = dgm.transform_points(dgm.inverse(dst_homo_src), pts_src)

            def compute_factor(size):
                return 1.0 * size / 2

            def convert_coordinates_to_pixel(coordinates, factor):
                return factor * (coordinates + 1.0)

            # compute convertion factor
            x_factor = compute_factor(width - 1)
            y_factor = compute_factor(height - 1)
            pts_dst = pts_dst.cpu().squeeze().detach().numpy()
            pts_dst[...,
                    0] = convert_coordinates_to_pixel(pts_dst[..., 0],
                                                      x_factor)
            pts_dst[...,
                    1] = convert_coordinates_to_pixel(pts_dst[..., 1],
                                                      y_factor)

            # do the actual drawing
            for i in range(4):
                pt_i, pt_ii = tuple(pts_dst[i % 4]), tuple(pts_dst[(i + 1) %
                                                                   4])
                image = cv2.line(image, pt_i, pt_ii, (255, 0, 0), 3)
            return image

        if iter_idx % args.log_interval_vis == 0:
            # merge warped and target image for visualization
            img_src_to_dst = warper(img_src, dst_homo_src())
            img_vis = 255. * 0.5 * (img_src_to_dst + img_dst)
            img_vis_np = dgm.utils.tensor_to_image(img_vis)
            image_draw = draw_rectangle(img_vis_np, dst_homo_src())
            # save warped image to disk
            file_name = os.path.join(args.output_dir,
                                     'warped_{}.png'.format(iter_idx))
            cv2.imwrite(file_name, image_draw)