Esempio n. 1
0
    def test_real_sift_preextract(self, device, dtype, data):
        torch.random.manual_seed(0)
        # This is not unit test, but that is quite good integration test
        feat = SIFTFeature(2000)
        matcher = LocalFeatureMatcher(feat, DescriptorMatcher('snn',
                                                              0.8)).to(device)
        ransac = RANSAC('homography', 1.0, 2048, 10).to(device, dtype)
        data_dev = utils.dict_to(data, device, dtype)
        pts_src = data_dev['pts0']
        pts_dst = data_dev['pts1']

        lafs, _, descs = feat(data_dev["image0"])
        data_dev["lafs0"] = lafs
        data_dev["descriptors0"] = descs

        lafs2, _, descs2 = feat(data_dev["image1"])
        data_dev["lafs1"] = lafs2
        data_dev["descriptors1"] = descs2

        with torch.no_grad():
            out = matcher(data_dev)
        homography, inliers = ransac(out['keypoints0'], out['keypoints1'])
        assert inliers.sum().item() > 50  # we have enough inliers
        # Reprojection error of 5px is OK
        assert_close(transform_points(homography[None], pts_src[None]),
                     pts_dst[None],
                     rtol=5e-2,
                     atol=5)
Esempio n. 2
0
 def inverse_keypoints(
         self,
         input: torch.Tensor,
         module: nn.Module,
         param: Optional[Dict[str, torch.Tensor]] = None) -> torch.Tensor:
     if isinstance(module, GeometricAugmentationBase2D):
         transform = module.compute_inverse_transformation(
             module.get_transformation_matrix(input, param))
         input = transform_points(
             torch.as_tensor(transform,
                             device=input.device,
                             dtype=input.dtype), input)
     return input
Esempio n. 3
0
    def test_dirty_points(self, device, dtype):
        # generate input data
        torch.random.manual_seed(0)

        H = torch.eye(3, dtype=dtype, device=device)
        H[:2] = H[:2] + 0.1 * torch.rand_like(H[:2])
        H[2:, :2] = H[2:, :2] + 0.001 * torch.rand_like(H[2:, :2])

        points_src = 100.0 * torch.rand(1, 20, 2, device=device, dtype=dtype)
        points_dst = transform_points(H[None], points_src)

        # making last point an outlier
        points_dst[:, -1, :] += 800
        ransac = RANSAC('homography', inl_th=0.5,
                        max_iter=20).to(device=device, dtype=dtype)
        # compute transform from source to target
        dst_homo_src, _ = ransac(points_src[0], points_dst[0])

        assert_close(transform_points(dst_homo_src[None], points_src[:, :-1]),
                     points_dst[:, :-1],
                     rtol=1e-3,
                     atol=1e-3)
    def test_registration_real(self, device, dtype, data):
        data_dev = utils.dict_to(data, device, dtype)
        IR = ImageRegistrator('homography',
                              num_iterations=1200,
                              lr=2e-2,
                              pyramid_levels=5).to(device, dtype)
        model = IR.register(data_dev['image0'], data_dev['image1'])
        homography_gt = torch.inverse(data_dev['H_gt'])
        homography_gt = homography_gt / homography_gt[2, 2]
        h0, w0 = data['image0'].shape[2], data['image0'].shape[3]
        h1, w1 = data['image1'].shape[2], data['image1'].shape[3]

        model_denormalized = denormalize_homography(model, (h0, w0), (h1, w1))
        model_denormalized = model_denormalized / model_denormalized[0, 2, 2]

        bbox = torch.tensor([[[0, 0], [w0, 0], [w0, h0], [0, h0]]],
                            device=device,
                            dtype=dtype)
        bbox_in_2_gt = transform_points(homography_gt[None], bbox)
        bbox_in_2_gt_est = transform_points(model_denormalized, bbox)
        # The tolerance is huge, because the error is in pixels
        # and transformation is quite significant, so
        # 15 px  reprojection error is not super huge
        assert_close(bbox_in_2_gt, bbox_in_2_gt_est, atol=15, rtol=0.1)
Esempio n. 5
0
 def apply_to_keypoints(
         self,
         input: torch.Tensor,
         module: nn.Module,
         param: Optional[Dict[str, torch.Tensor]] = None) -> torch.Tensor:
     if isinstance(module, GeometricAugmentationBase2D) and param is None:
         raise ValueError(
             f"Transformation matrix for {module} has not been computed.")
     if isinstance(module,
                   GeometricAugmentationBase2D) and param is not None:
         input = transform_points(
             module.get_transformation_matrix(input, param), input)
     else:
         pass  # No need to update anything
     return input
Esempio n. 6
0
    def test_real_clean(self, device, dtype, data):
        # generate input data
        torch.random.manual_seed(0)
        data_dev = utils.dict_to(data, device, dtype)
        homography_gt = torch.inverse(data_dev['H_gt'])
        homography_gt = homography_gt / homography_gt[2, 2]
        pts_src = data_dev['pts0']
        pts_dst = data_dev['pts1']
        ransac = RANSAC('homography', inl_th=0.5,
                        max_iter=20).to(device=device, dtype=dtype)
        # compute transform from source to target
        dst_homo_src, _ = ransac(pts_src, pts_dst)

        assert_close(transform_points(dst_homo_src[None], pts_src[None]),
                     pts_dst[None],
                     rtol=1e-3,
                     atol=1e-3)
Esempio n. 7
0
 def test_real_keynet(self, device, dtype, data):
     torch.random.manual_seed(0)
     # This is not unit test, but that is quite good integration test
     matcher = LocalFeatureMatcher(KeyNetHardNet(500),
                                   DescriptorMatcher('snn', 0.9)).to(
                                       device, dtype)
     ransac = RANSAC('homography', 1.0, 2048, 10).to(device, dtype)
     data_dev = utils.dict_to(data, device, dtype)
     pts_src = data_dev['pts0']
     pts_dst = data_dev['pts1']
     with torch.no_grad():
         out = matcher(data_dev)
     homography, inliers = ransac(out['keypoints0'], out['keypoints1'])
     assert inliers.sum().item() > 50  # we have enough inliers
     # Reprojection error of 5px is OK
     assert_close(transform_points(homography[None], pts_src[None]),
                  pts_dst[None],
                  rtol=5e-2,
                  atol=5)
Esempio n. 8
0
    def test_real_dirty(self, device, dtype, data):
        # generate input data
        torch.random.manual_seed(0)
        data_dev = utils.dict_to(data, device, dtype)
        homography_gt = torch.inverse(data_dev['H_gt'])
        homography_gt = homography_gt / homography_gt[2, 2]
        pts_src = data_dev['pts0']
        pts_dst = data_dev['pts1']

        kp1 = data_dev['loftr_outdoor_tentatives0']
        kp2 = data_dev['loftr_outdoor_tentatives1']

        ransac = RANSAC('homography', inl_th=3.0, max_iter=30,
                        max_lo_iters=10).to(device=device, dtype=dtype)
        # compute transform from source to target
        dst_homo_src, _ = ransac(kp1, kp2)

        # Reprojection error of 5px is OK
        assert_close(transform_points(dst_homo_src[None], pts_src[None]),
                     pts_dst[None],
                     rtol=5,
                     atol=0.15)
Esempio n. 9
0
def warp_frame_depth(image_src: torch.Tensor, depth_dst: torch.Tensor,
                     src_trans_dst: torch.Tensor,
                     camera_matrix: torch.Tensor) -> torch.Tensor:
    """Warp a tensor from a source to destination frame by the depth in the destination.

    Compute 3d points from the depth, transform them using given transformation, then project the point cloud to an
    image plane.

    Args:
        image_src (torch.Tensor): image tensor in the source frame with shape (BxDxHxW).
        depth_dst (torch.Tensor): depth tensor in the destination frame with shape (Bx1xHxW).
        src_trans_dst (torch.Tensor): transformation matrix from destination to source with shape (Bx4x4).
        camera_matrix (torch.Tensor): tensor containing the camera intrinsics with shape (Bx3x3).

    Return:
        torch.Tensor: the warped tensor in the source frame with shape (Bx3xHxW).

    """
    if not isinstance(image_src, torch.Tensor):
        raise TypeError(
            f"Input image_src type is not a torch.Tensor. Got {type(image_src)}."
        )

    if not len(image_src.shape) == 4:
        raise ValueError(
            f"Input image_src musth have a shape (B, D, H, W). Got: {image_src.shape}"
        )

    if not isinstance(depth_dst, torch.Tensor):
        raise TypeError(
            f"Input depht_dst type is not a torch.Tensor. Got {type(depth_dst)}."
        )

    if not len(depth_dst.shape) == 4 and depth_dst.shape[-3] == 1:
        raise ValueError(
            f"Input depth_dst musth have a shape (B, 1, H, W). Got: {depth_dst.shape}"
        )

    if not isinstance(src_trans_dst, torch.Tensor):
        raise TypeError(f"Input src_trans_dst type is not a torch.Tensor. "
                        f"Got {type(src_trans_dst)}.")

    if not len(src_trans_dst.shape) == 3 and src_trans_dst.shape[-2:] == (3,
                                                                          3):
        raise ValueError(f"Input src_trans_dst must have a shape (B, 3, 3). "
                         f"Got: {src_trans_dst.shape}.")

    if not isinstance(camera_matrix, torch.Tensor):
        raise TypeError(f"Input camera_matrix type is not a torch.Tensor. "
                        f"Got {type(camera_matrix)}.")

    if not len(camera_matrix.shape) == 3 and camera_matrix.shape[-2:] == (3,
                                                                          3):
        raise ValueError(f"Input camera_matrix must have a shape (B, 3, 3). "
                         f"Got: {camera_matrix.shape}.")
    # unproject source points to camera frame
    points_3d_dst: torch.Tensor = depth_to_3d(depth_dst,
                                              camera_matrix)  # Bx3xHxW

    # transform points from source to destionation
    points_3d_dst = points_3d_dst.permute(0, 2, 3, 1)  # BxHxWx3

    # apply transformation to the 3d points
    points_3d_src = transform_points(src_trans_dst[:, None],
                                     points_3d_dst)  # BxHxWx3

    # project back to pixels
    camera_matrix_tmp: torch.Tensor = camera_matrix[:, None, None]  # Bx1x1xHxW
    points_2d_src: torch.Tensor = project_points(points_3d_src,
                                                 camera_matrix_tmp)  # BxHxWx2

    # normalize points between [-1 / 1]
    height, width = depth_dst.shape[-2:]
    points_2d_src_norm: torch.Tensor = normalize_pixel_coordinates(
        points_2d_src, height, width)  # BxHxWx2

    return F.grid_sample(image_src, points_2d_src_norm,
                         align_corners=True)  # type: ignore