예제 #1
0
파일: test_loftr.py 프로젝트: kornia/kornia
 def test_pretrained_outdoor(self, device, dtype, data):
     loftr = LoFTR('outdoor').to(device, dtype)
     data_dev = utils.dict_to(data, device, dtype)
     with torch.no_grad():
         out = loftr(data_dev)
     assert_close(out['keypoints0'], data_dev["loftr_outdoor_tentatives0"])
     assert_close(out['keypoints1'], data_dev["loftr_outdoor_tentatives1"])
예제 #2
0
    def test_real_sift_preextract(self, device, dtype, data):
        torch.random.manual_seed(0)
        # This is not unit test, but that is quite good integration test
        feat = SIFTFeature(2000)
        matcher = LocalFeatureMatcher(feat, DescriptorMatcher('snn',
                                                              0.8)).to(device)
        ransac = RANSAC('homography', 1.0, 2048, 10).to(device, dtype)
        data_dev = utils.dict_to(data, device, dtype)
        pts_src = data_dev['pts0']
        pts_dst = data_dev['pts1']

        lafs, _, descs = feat(data_dev["image0"])
        data_dev["lafs0"] = lafs
        data_dev["descriptors0"] = descs

        lafs2, _, descs2 = feat(data_dev["image1"])
        data_dev["lafs1"] = lafs2
        data_dev["descriptors1"] = descs2

        with torch.no_grad():
            out = matcher(data_dev)
        homography, inliers = ransac(out['keypoints0'], out['keypoints1'])
        assert inliers.sum().item() > 50  # we have enough inliers
        # Reprojection error of 5px is OK
        assert_close(transform_points(homography[None], pts_src[None]),
                     pts_dst[None],
                     rtol=5e-2,
                     atol=5)
예제 #3
0
 def test_nomatch(self, device, dtype, data):
     matcher = LocalFeatureMatcher(GFTTAffNetHardNet(100),
                                   DescriptorMatcher('snn', 0.8)).to(
                                       device, dtype)
     data_dev = utils.dict_to(data, device, dtype)
     with torch.no_grad():
         out = matcher({
             "image0": data_dev["image0"],
             "image1": 0 * data_dev["image0"]
         })
     assert len(out['keypoints0']) == 0
예제 #4
0
    def test_real_clean(self, device, dtype, data):
        # generate input data
        torch.random.manual_seed(0)
        data_dev = utils.dict_to(data, device, dtype)
        homography_gt = torch.inverse(data_dev['H_gt'])
        homography_gt = homography_gt / homography_gt[2, 2]
        pts_src = data_dev['pts0']
        pts_dst = data_dev['pts1']
        ransac = RANSAC('homography', inl_th=0.5,
                        max_iter=20).to(device=device, dtype=dtype)
        # compute transform from source to target
        dst_homo_src, _ = ransac(pts_src, pts_dst)

        assert_close(transform_points(dst_homo_src[None], pts_src[None]),
                     pts_dst[None],
                     rtol=1e-3,
                     atol=1e-3)
예제 #5
0
 def test_real_clean(self, device, dtype, data):
     torch.random.manual_seed(0)
     # generate input data
     data_dev = utils.dict_to(data, device, dtype)
     pts_src = data_dev['pts0']
     pts_dst = data_dev['pts1']
     # compute transform from source to target
     ransac = RANSAC('fundamental',
                     inl_th=0.5,
                     max_iter=20,
                     max_lo_iters=10).to(device=device, dtype=dtype)
     fundamental_matrix, _ = ransac(pts_src, pts_dst)
     gross_errors = (sampson_epipolar_distance(pts_src[None],
                                               pts_dst[None],
                                               fundamental_matrix[None],
                                               squared=False) > 1.0)
     assert gross_errors.sum().item() == 0
예제 #6
0
 def test_real_keynet(self, device, dtype, data):
     torch.random.manual_seed(0)
     # This is not unit test, but that is quite good integration test
     matcher = LocalFeatureMatcher(KeyNetHardNet(500),
                                   DescriptorMatcher('snn', 0.9)).to(
                                       device, dtype)
     ransac = RANSAC('homography', 1.0, 2048, 10).to(device, dtype)
     data_dev = utils.dict_to(data, device, dtype)
     pts_src = data_dev['pts0']
     pts_dst = data_dev['pts1']
     with torch.no_grad():
         out = matcher(data_dev)
     homography, inliers = ransac(out['keypoints0'], out['keypoints1'])
     assert inliers.sum().item() > 50  # we have enough inliers
     # Reprojection error of 5px is OK
     assert_close(transform_points(homography[None], pts_src[None]),
                  pts_dst[None],
                  rtol=5e-2,
                  atol=5)
예제 #7
0
    def test_real_dirty(self, device, dtype, data):
        torch.random.manual_seed(0)
        # generate input data
        data_dev = utils.dict_to(data, device, dtype)
        pts_src = data_dev['pts0']
        pts_dst = data_dev['pts1']

        kp1 = data_dev['loftr_indoor_tentatives0']
        kp2 = data_dev['loftr_indoor_tentatives1']

        ransac = RANSAC('fundamental',
                        inl_th=1.0,
                        max_iter=20,
                        max_lo_iters=10).to(device=device, dtype=dtype)
        # compute transform from source to target
        fundamental_matrix, _ = ransac(kp1, kp2)
        gross_errors = (sampson_epipolar_distance(pts_src[None],
                                                  pts_dst[None],
                                                  fundamental_matrix[None],
                                                  squared=False) > 10.0)
        assert gross_errors.sum().item() < 2
예제 #8
0
    def test_real_dirty(self, device, dtype, data):
        # generate input data
        torch.random.manual_seed(0)
        data_dev = utils.dict_to(data, device, dtype)
        homography_gt = torch.inverse(data_dev['H_gt'])
        homography_gt = homography_gt / homography_gt[2, 2]
        pts_src = data_dev['pts0']
        pts_dst = data_dev['pts1']

        kp1 = data_dev['loftr_outdoor_tentatives0']
        kp2 = data_dev['loftr_outdoor_tentatives1']

        ransac = RANSAC('homography', inl_th=3.0, max_iter=30,
                        max_lo_iters=10).to(device=device, dtype=dtype)
        # compute transform from source to target
        dst_homo_src, _ = ransac(kp1, kp2)

        # Reprojection error of 5px is OK
        assert_close(transform_points(dst_homo_src[None], pts_src[None]),
                     pts_dst[None],
                     rtol=5,
                     atol=0.15)
예제 #9
0
    def test_registration_real(self, device, dtype, data):
        data_dev = utils.dict_to(data, device, dtype)
        IR = ImageRegistrator('homography',
                              num_iterations=1200,
                              lr=2e-2,
                              pyramid_levels=5).to(device, dtype)
        model = IR.register(data_dev['image0'], data_dev['image1'])
        homography_gt = torch.inverse(data_dev['H_gt'])
        homography_gt = homography_gt / homography_gt[2, 2]
        h0, w0 = data['image0'].shape[2], data['image0'].shape[3]
        h1, w1 = data['image1'].shape[2], data['image1'].shape[3]

        model_denormalized = denormalize_homography(model, (h0, w0), (h1, w1))
        model_denormalized = model_denormalized / model_denormalized[0, 2, 2]

        bbox = torch.tensor([[[0, 0], [w0, 0], [w0, h0], [0, h0]]],
                            device=device,
                            dtype=dtype)
        bbox_in_2_gt = transform_points(homography_gt[None], bbox)
        bbox_in_2_gt_est = transform_points(model_denormalized, bbox)
        # The tolerance is huge, because the error is in pixels
        # and transformation is quite significant, so
        # 15 px  reprojection error is not super huge
        assert_close(bbox_in_2_gt, bbox_in_2_gt_est, atol=15, rtol=0.1)