示例#1
0
    def test_recover_relative_pose_from_essential_matrix(self):
        """Test for function to extract relative pose from essential matrix."""

        # simulate correspondences and the essential matrix
        corr_i1, corr_i2, i2Ei1 = simulate_two_planes_scene(10, 10)

        i2Ri1, i2Ui1 = verification_utils.recover_relative_pose_from_essential_matrix(
            i2Ei1.matrix(), corr_i1.coordinates, corr_i2.coordinates,
            Cal3Bundler(), Cal3Bundler())

        # compare the recovered R and U with the ground truth
        self.assertTrue(i2Ri1.equals(i2Ei1.rotation(), 1e-3))
        self.assertTrue(i2Ui1.equals(i2Ei1.direction(), 1e-3))
示例#2
0
文件: degensac.py 项目: asa/gtsfm
    def verify_with_approximate_intrinsics(
        self,
        keypoints_i1: Keypoints,
        keypoints_i2: Keypoints,
        match_indices: np.ndarray,
        camera_intrinsics_i1: Cal3Bundler,
        camera_intrinsics_i2: Cal3Bundler,
    ) -> Tuple[Optional[Rot3], Optional[Unit3], np.ndarray]:
        """Estimates the essential matrix and verifies the feature matches.

        Note: this function is preferred when camera intrinsics are approximate (i.e from image size/exif). The feature
        coordinates are used to compute the fundamental matrix, which is then converted to the essential matrix.

        Args:
            keypoints_i1: detected features in image #i1.
            keypoints_i2: detected features in image #i2.
            match_indices: matches as indices of features from both images, of shape (N3, 2), where N3 <= min(N1, N2).
            camera_intrinsics_i1: intrinsics for image #i1.
            camera_intrinsics_i2: intrinsics for image #i2.

        Returns:
            Estimated rotation i2Ri1, or None if it cannot be estimated.
            Estimated unit translation i2Ui1, or None if it cannot be estimated.
            Indices of verified correspondences, of shape (N, 2) with N <= N3. These are subset of match_indices.
        """
        verified_indices = np.array([], dtype=np.uint32)

        # check if we don't have the minimum number of points
        if match_indices.shape[0] < self.min_pts:
            return None, None, verified_indices

        i2Fi1, mask = pydegensac.findFundamentalMatrix(
            keypoints_i1.coordinates[match_indices[:, 0]],
            keypoints_i2.coordinates[match_indices[:, 1]],
        )

        inlier_idxes = np.where(mask.ravel() == 1)[0]

        i2Ei1_matrix = verification_utils.fundamental_to_essential_matrix(
            i2Fi1, camera_intrinsics_i1, camera_intrinsics_i2)

        i2Ri1, i2Ui1 = verification_utils.recover_relative_pose_from_essential_matrix(
            i2Ei1_matrix,
            keypoints_i1.coordinates[match_indices[inlier_idxes, 0]],
            keypoints_i2.coordinates[match_indices[inlier_idxes, 1]],
            camera_intrinsics_i1,
            camera_intrinsics_i2,
        )

        return i2Ri1, i2Ui1, match_indices[inlier_idxes]
示例#3
0
文件: degensac.py 项目: borglab/gtsfm
    def verify(
        self,
        keypoints_i1: Keypoints,
        keypoints_i2: Keypoints,
        match_indices: np.ndarray,
        camera_intrinsics_i1: Cal3Bundler,
        camera_intrinsics_i2: Cal3Bundler,
    ) -> Tuple[Optional[Rot3], Optional[Unit3], np.ndarray]:
        """Performs verification of correspondences between two images to recover the relative pose and indices of
        verified correspondences.

        Args:
            keypoints_i1: detected features in image #i1.
            keypoints_i2: detected features in image #i2.
            match_indices: matches as indices of features from both images, of shape (N3, 2), where N3 <= min(N1, N2).
            camera_intrinsics_i1: intrinsics for image #i1.
            camera_intrinsics_i2: intrinsics for image #i2.

        Returns:
            Estimated rotation i2Ri1, or None if it cannot be estimated.
            Estimated unit translation i2Ui1, or None if it cannot be estimated.
            Indices of verified correspondences, of shape (N, 2) with N <= N3. These are subset of match_indices.
            Inlier ratio of w.r.t. the estimated model, i.e. the #final RANSAC inliers/ #putatives.
        """
        if match_indices.shape[0] < self._min_matches:
            return self._failure_result

        i2Fi1, inlier_mask = pydegensac.findFundamentalMatrix(
            keypoints_i1.coordinates[match_indices[:, 0]],
            keypoints_i2.coordinates[match_indices[:, 1]],
            px_th=self._estimation_threshold_px,
        )
        inlier_idxs = np.where(inlier_mask.ravel() == 1)[0]

        v_corr_idxs = match_indices[inlier_idxs]
        inlier_ratio_est_model = np.mean(inlier_mask)

        i2Ei1_matrix = verification_utils.fundamental_to_essential_matrix(
            i2Fi1, camera_intrinsics_i1, camera_intrinsics_i2)

        i2Ri1, i2Ui1 = verification_utils.recover_relative_pose_from_essential_matrix(
            i2Ei1_matrix,
            keypoints_i1.coordinates[match_indices[inlier_idxs, 0]],
            keypoints_i2.coordinates[match_indices[inlier_idxs, 1]],
            camera_intrinsics_i1,
            camera_intrinsics_i2,
        )

        return i2Ri1, i2Ui1, v_corr_idxs, inlier_ratio_est_model
示例#4
0
    def test_recover_relative_pose_from_essential_matrix_none(self):
        """Test for function to extract relative pose from essential matrix."""

        # simulate correspondences and the essential matrix
        corr_i1, corr_i2, _ = simulate_two_planes_scene(10, 10)

        i2Ri1, i2Ui1 = verification_utils.recover_relative_pose_from_essential_matrix(
            i2Ei1=None,
            verified_coordinates_i1=corr_i1.coordinates,
            verified_coordinates_i2=corr_i2.coordinates,
            camera_intrinsics_i1=Cal3Bundler(),
            camera_intrinsics_i2=Cal3Bundler(),
        )

        # compare the recovered R and U with the ground truth
        self.assertIsNone(i2Ri1)
        self.assertIsNone(i2Ui1)
示例#5
0
文件: loransac.py 项目: borglab/gtsfm
    def verify(
        self,
        keypoints_i1: Keypoints,
        keypoints_i2: Keypoints,
        match_indices: np.ndarray,
        camera_intrinsics_i1: Cal3Bundler,
        camera_intrinsics_i2: Cal3Bundler,
    ) -> Tuple[Optional[Rot3], Optional[Unit3], np.ndarray, float]:
        """Performs verification of correspondences between two images to recover the relative pose and indices of
        verified correspondences.

        Args:
            keypoints_i1: detected features in image #i1.
            keypoints_i2: detected features in image #i2.
            match_indices: matches as indices of features from both images, of shape (N3, 2), where N3 <= min(N1, N2).
            camera_intrinsics_i1: intrinsics for image #i1.
            camera_intrinsics_i2: intrinsics for image #i2.

        Returns:
            Estimated rotation i2Ri1, or None if it cannot be estimated.
            Estimated unit translation i2Ui1, or None if it cannot be estimated.
            Indices of verified correspondences, of shape (N, 2) with N <= N3. These are subset of match_indices.
            Inlier ratio of w.r.t. the estimated model, i.e. the #final RANSAC inliers/ #putatives.
        """
        if match_indices.shape[0] < self._min_matches:
            logger.info(
                "[LORANSAC] Not enough correspondences for verification.")
            return self._failure_result

        uv_i1 = keypoints_i1.coordinates[match_indices[:, 0]]
        uv_i2 = keypoints_i2.coordinates[match_indices[:, 1]]

        if self._use_intrinsics_in_verification:
            result_dict = self.__estimate_essential_matrix(
                uv_i1, uv_i2, camera_intrinsics_i1, camera_intrinsics_i2)
        else:
            result_dict = pycolmap.fundamental_matrix_estimation(
                uv_i1,
                uv_i2,
                max_error_px=self._estimation_threshold_px,
                min_inlier_ratio=MIN_INLIER_RATIO,
                min_num_trials=MIN_NUM_TRIALS,
                max_num_trials=MAX_NUM_TRIALS,
                confidence=CONFIDENCE,
            )

        success = result_dict["success"]
        if not success:
            matrix_type = "Essential" if self.use_intrinsics_in_verification else "Fundamental"
            logger.info(
                f"[LORANSAC] {matrix_type} matrix estimation unsuccessful.")
            return self._failure_result

        num_inliers = result_dict["num_inliers"]
        inlier_ratio_est_model = num_inliers / match_indices.shape[0]

        inlier_mask = np.array(result_dict["inliers"])
        v_corr_idxs = match_indices[inlier_mask]
        if self._use_intrinsics_in_verification:
            # case where E-matrix was estimated
            # See https://github.com/colmap/colmap/blob/dev/src/base/pose.h#L72 for quaternion coefficient ordering
            qw, qx, qy, qz = result_dict["qvec"]
            i2Ui1 = result_dict["tvec"]
            i2Ri1 = Rot3(qw, qx, qy, qz)
            i2Ui1 = Unit3(i2Ui1)
        else:
            # case where F-matrix was estimated
            i2Fi1 = result_dict["F"]
            i2Ei1 = verification_utils.fundamental_to_essential_matrix(
                i2Fi1, camera_intrinsics_i1, camera_intrinsics_i2)
            (
                i2Ri1, i2Ui1
            ) = verification_utils.recover_relative_pose_from_essential_matrix(
                i2Ei1=i2Ei1,
                verified_coordinates_i1=uv_i1[inlier_mask],
                verified_coordinates_i2=uv_i2[inlier_mask],
                camera_intrinsics_i1=camera_intrinsics_i1,
                camera_intrinsics_i2=camera_intrinsics_i2,
            )
        return i2Ri1, i2Ui1, v_corr_idxs, inlier_ratio_est_model
示例#6
0
    def verify(
        self,
        keypoints_i1: Keypoints,
        keypoints_i2: Keypoints,
        match_indices: np.ndarray,
        camera_intrinsics_i1: Cal3Bundler,
        camera_intrinsics_i2: Cal3Bundler,
    ) -> Tuple[Optional[Rot3], Optional[Unit3], np.ndarray, float]:
        """Performs verification of correspondences between two images to recover the relative pose and indices of
        verified correspondences.

        Args:
            keypoints_i1: detected features in image #i1.
            keypoints_i2: detected features in image #i2.
            match_indices: matches as indices of features from both images, of shape (N3, 2), where N3 <= min(N1, N2).
            camera_intrinsics_i1: intrinsics for image #i1.
            camera_intrinsics_i2: intrinsics for image #i2.
        
        Returns:
            Estimated rotation i2Ri1, or None if it cannot be estimated.
            Estimated unit translation i2Ui1, or None if it cannot be estimated.
            Indices of verified correspondences, of shape (N, 2) with N <= N3. These are subset of match_indices.
            Inlier ratio w.r.t. the estimated model, i.e. #ransac inliers / # putative matches.
        """
        if match_indices.shape[0] < self._min_matches:
            return self._failure_result

        if self._use_intrinsics_in_verification:
            uv_norm_i1 = feature_utils.normalize_coordinates(
                keypoints_i1.coordinates, camera_intrinsics_i1)
            uv_norm_i2 = feature_utils.normalize_coordinates(
                keypoints_i2.coordinates, camera_intrinsics_i2)

            # OpenCV can fail here, for some reason
            if match_indices.shape[0] < 6:
                return self._failure_result

            if np.amax(match_indices[:, 1]) >= uv_norm_i2.shape[0]:
                print("Out of bounds access w/ keypoints",
                      keypoints_i2.coordinates[:10])
            if np.amax(match_indices[:, 0]) >= uv_norm_i1.shape[0]:
                print("Out of bounds access w/ keypoints",
                      keypoints_i1.coordinates[:10])

            # Use larger focal length, among the two choices, to yield a stricter threshold as (threshold_px / fx).
            fx = max(camera_intrinsics_i1.K()[0, 0],
                     camera_intrinsics_i2.K()[0, 0])
            i2Ei1, inlier_mask = self.estimate_E(uv_norm_i1=uv_norm_i1,
                                                 uv_norm_i2=uv_norm_i2,
                                                 match_indices=match_indices,
                                                 fx=fx)
        else:
            i2Fi1, inlier_mask = self.estimate_F(keypoints_i1=keypoints_i1,
                                                 keypoints_i2=keypoints_i2,
                                                 match_indices=match_indices)
            i2Ei1 = verification_utils.fundamental_to_essential_matrix(
                i2Fi1, camera_intrinsics_i1, camera_intrinsics_i2)

        inlier_idxs = np.where(inlier_mask.ravel() == 1)[0]

        v_corr_idxs = match_indices[inlier_idxs]
        inlier_ratio_est_model = np.mean(inlier_mask)
        (i2Ri1, i2Ui1
         ) = verification_utils.recover_relative_pose_from_essential_matrix(
             i2Ei1,
             keypoints_i1.coordinates[v_corr_idxs[:, 0]],
             keypoints_i2.coordinates[v_corr_idxs[:, 1]],
             camera_intrinsics_i1,
             camera_intrinsics_i2,
         )
        return i2Ri1, i2Ui1, v_corr_idxs, inlier_ratio_est_model
示例#7
0
    def verify_with_exact_intrinsics(
        self,
        keypoints_i1: Keypoints,
        keypoints_i2: Keypoints,
        match_indices: np.ndarray,
        camera_intrinsics_i1: Cal3Bundler,
        camera_intrinsics_i2: Cal3Bundler,
    ) -> Tuple[Optional[Rot3], Optional[Unit3], np.ndarray]:
        """Estimates the essential matrix and verifies the feature matches.

        Note: this function is preferred when camera intrinsics are known. The
        feature coordinates are normalized and the essential matrix is directly
        estimated.

        Args:
            keypoints_i1: detected features in image #i1.
            keypoints_i2: detected features in image #i2.
            match_indices: matches as indices of features from both images, of
                           shape (N3, 2), where N3 <= min(N1, N2).
            camera_intrinsics_i1: intrinsics for image #i1.
            camera_intrinsics_i2: intrinsics for image #i2.

        Returns:
            Estimated rotation i2Ri1, or None if it cannot be estimated.
            Estimated unit translation i2Ui1, or None if it cannot be estimated.
            Indices of verified correspondences, of shape (N, 2) with N <= N3.
                These indices are subset of match_indices.
        """
        verified_indices = np.array([], dtype=np.uint32)

        # check if we don't have the minimum number of points
        if match_indices.shape[0] < self.min_pts:
            logger.info(
                "No match indices were provided to the verifier, returning early with None output"
            )
            return None, None, verified_indices

        uv_norm_i1 = feature_utils.normalize_coordinates(
            keypoints_i1.coordinates, camera_intrinsics_i1)
        uv_norm_i2 = feature_utils.normalize_coordinates(
            keypoints_i2.coordinates, camera_intrinsics_i2)
        K = np.eye(3)

        i2Ei1, inlier_mask = cv2.findEssentialMat(
            uv_norm_i1[match_indices[:, 0]],
            uv_norm_i2[match_indices[:, 1]],
            K,
            method=cv2.RANSAC,
            threshold=NORMALIZED_COORD_RANSAC_THRESH,
            prob=DEFAULT_RANSAC_SUCCESS_PROB,
        )
        inlier_idxs = np.where(inlier_mask.ravel() == 1)[0]

        (
            i2Ri1,
            i2Ui1,
        ) = verification_utils.recover_relative_pose_from_essential_matrix(
            i2Ei1,
            keypoints_i1.coordinates[match_indices[inlier_idxs, 0]],
            keypoints_i2.coordinates[match_indices[inlier_idxs, 1]],
            camera_intrinsics_i1,
            camera_intrinsics_i2,
        )

        return i2Ri1, i2Ui1, match_indices[inlier_idxs]
示例#8
0
    def verify(
        self,
        keypoints_i1: Keypoints,
        keypoints_i2: Keypoints,
        match_indices: np.ndarray,
        camera_intrinsics_i1: Cal3Bundler,
        camera_intrinsics_i2: Cal3Bundler,
    ) -> Tuple[Optional[Rot3], Optional[Unit3], np.ndarray]:
        """Performs verification of correspondences between two images to recover the relative pose and indices of
        verified correspondences.

        Args:
            keypoints_i1: detected features in image #i1.
            keypoints_i2: detected features in image #i2.
            match_indices: matches as indices of features from both images, of shape (N3, 2), where N3 <= min(N1, N2).
            camera_intrinsics_i1: intrinsics for image #i1.
            camera_intrinsics_i2: intrinsics for image #i2.
        Returns:
            Estimated rotation i2Ri1, or None if it cannot be estimated.
            Estimated unit translation i2Ui1, or None if it cannot be estimated.
            Indices of verified correspondences, of shape (N, 2) with N <= N3. These are subset of match_indices.
        """
        if match_indices.shape[0] < self._min_matches:
            return self._failure_result

        if self._use_intrinsics_in_verification:
            uv_norm_i1 = feature_utils.normalize_coordinates(keypoints_i1.coordinates, camera_intrinsics_i1)
            uv_norm_i2 = feature_utils.normalize_coordinates(keypoints_i2.coordinates, camera_intrinsics_i2)
            K = np.eye(3)

            i2Ei1, inlier_mask = cv2.findEssentialMat(
                uv_norm_i1[match_indices[:, 0]],
                uv_norm_i2[match_indices[:, 1]],
                K,
                method=cv2.RANSAC,
                threshold=NORMALIZED_COORD_RANSAC_THRESH,
                prob=DEFAULT_RANSAC_SUCCESS_PROB,
            )
        else:
            i2Fi1, inlier_mask = cv2.findFundamentalMat(
                keypoints_i1.extract_indices(match_indices[:, 0]).coordinates,
                keypoints_i2.extract_indices(match_indices[:, 1]).coordinates,
                method=cv2.FM_RANSAC,
                ransacReprojThreshold=PIXEL_COORD_RANSAC_THRESH,
                confidence=DEFAULT_RANSAC_SUCCESS_PROB,
                maxIters=10000,
            )

            i2Ei1 = verification_utils.fundamental_to_essential_matrix(
                i2Fi1, camera_intrinsics_i1, camera_intrinsics_i2
            )

        inlier_idxs = np.where(inlier_mask.ravel() == 1)[0]

        (i2Ri1, i2Ui1) = verification_utils.recover_relative_pose_from_essential_matrix(
            i2Ei1,
            keypoints_i1.coordinates[match_indices[inlier_idxs, 0]],
            keypoints_i2.coordinates[match_indices[inlier_idxs, 1]],
            camera_intrinsics_i1,
            camera_intrinsics_i2,
        )

        return i2Ri1, i2Ui1, match_indices[inlier_idxs]