def test_dirty_points_and_gradcheck(self, batch_size, device, dtype): # generate input data points_src = torch.rand(batch_size, 10, 2, device=device, dtype=dtype) H = kornia.eye_like(3, points_src) H = H * 0.3 * torch.rand_like(H) H = H / H[:, 2:3, 2:3] points_src = 100. * torch.rand( batch_size, 20, 2, device=device, dtype=dtype) points_dst = kornia.transform_points(H, points_src) # making last point an outlier points_dst[:, -1, :] += 20 weights = torch.ones(batch_size, 20, device=device, dtype=dtype) # compute transform from source to target dst_homo_src = find_homography_dlt_iterated(points_src, points_dst, weights, 0.5, 10) assert_allclose(kornia.transform_points(dst_homo_src, points_src[:, :-1]), points_dst[:, :-1], rtol=1e-3, atol=1e-3)
def test_shape(self, batch_size, num_points, device, dtype): B, N = batch_size, num_points points1 = torch.rand(B, N, 2, device=device, dtype=dtype) points2 = torch.rand(B, N, 2, device=device, dtype=dtype) weights = torch.ones(B, N, device=device, dtype=dtype) H = find_homography_dlt_iterated(points1, points2, weights, 5) assert H.shape == (B, 3, 3)
def test_clean_points_and_gradcheck(self, batch_size, device): # generate input data dtype = torch.float64 H = (torch.eye(3, device=device)[None].repeat(batch_size, 1, 1) + 0.3 * torch.rand(batch_size, 3, 3, device=device)) H = H / H[:, 2:3, 2:3] points_src = torch.rand(batch_size, 10, 2).to(device) points_dst = kornia.transform_points(H, points_src) weights = torch.ones(batch_size, 10, device=device) # compute transform from source to target dst_homo_src = find_homography_dlt_iterated(points_src, points_dst, weights, 10) assert_allclose(kornia.transform_points(dst_homo_src, points_src), points_dst, rtol=1e-3, atol=1e-4) # compute gradient check points_src = utils.tensor_to_gradcheck_var(points_src) # to var points_dst = utils.tensor_to_gradcheck_var(points_dst) # to var weights = utils.tensor_to_gradcheck_var(weights) # to var assert gradcheck(kornia.find_homography_dlt_iterated, ( points_src, points_dst, weights, ), rtol=1e-3, atol=1e-4, raise_exception=True)
def test_clean_points(self, batch_size, device, dtype): # generate input data points_src = torch.rand(batch_size, 10, 2, device=device, dtype=dtype) H = kornia.eye_like(3, points_src) H = H * 0.3 * torch.rand_like(H) H = H / H[:, 2:3, 2:3] points_dst = kornia.transform_points(H, points_src) weights = torch.ones(batch_size, 10, device=device, dtype=dtype) # compute transform from source to target dst_homo_src = find_homography_dlt_iterated(points_src, points_dst, weights, 10) assert_allclose(kornia.transform_points(dst_homo_src, points_src), points_dst, rtol=1e-3, atol=1e-4)
def _estimate_homography(self, keypoints1: torch.Tensor, keypoints2: torch.Tensor) -> torch.Tensor: """Estimate homography by the matched keypoints. Args: keypoints1: matched keypoint set from an image, shaped as :math:`(N, 2)`. keypoints2: matched keypoint set from the other image, shaped as :math:`(N, 2)`. """ h**o: torch.Tensor if self.estimator == "vanilla": h**o = find_homography_dlt_iterated( keypoints2[None], keypoints1[None], torch.ones_like(keypoints1[None, :, 0])) elif self.estimator == "ransac": h**o, _ = self.ransac(keypoints2, keypoints1) h**o = h**o[None] else: raise NotImplementedError( f"Unsupported estimator {self.estimator}. Use ‘ransac’ or ‘vanilla’ instead." ) return h**o
def test_smoke(self, device, dtype): points1 = torch.rand(1, 4, 2, device=device, dtype=dtype) points2 = torch.rand(1, 4, 2, device=device, dtype=dtype) weights = torch.ones(1, 4, device=device, dtype=dtype) H = find_homography_dlt_iterated(points1, points2, weights, 5) assert H.shape == (1, 3, 3)