Exemplo n.º 1
0
def recover_relative_pose_from_essential_matrix(
    i2Ei1: np.ndarray,
    verified_coordinates_i1: np.ndarray,
    verified_coordinates_i2: np.ndarray,
    camera_intrinsics_i1: Cal3Bundler,
    camera_intrinsics_i2: Cal3Bundler,
) -> Tuple[Rot3, Unit3]:
    """Recovers the relative rotation and translation direction from essential matrix and verified correspondences
    using opencv's API.

    Args:
        i2Ei1: essential matrix as a numpy array, of shape 3x3.
        verified_coordinates_i1: coordinates of verified correspondences in image i1, of shape Nx2.
        verified_coordinates_i2: coordinates of verified correspondences in image i2, of shape Nx2.
        camera_intrinsics_i1: intrinsics for image i1.
        camera_intrinsics_i2: intrinsics for image i2.

    Returns:
        relative rotation i2Ri1.
        relative translation direction i2Ui1.
    """
    # obtain points in normalized coordinates using intrinsics.
    normalized_coordinates_i1 = feature_utils.normalize_coordinates(
        verified_coordinates_i1, camera_intrinsics_i1)
    normalized_coordinates_i2 = feature_utils.normalize_coordinates(
        verified_coordinates_i2, camera_intrinsics_i2)

    # use opencv to recover pose
    _, i2Ri1, i2ti1, _ = cv.recoverPose(i2Ei1, normalized_coordinates_i1,
                                        normalized_coordinates_i2)

    return Rot3(i2Ri1), Unit3(i2ti1.squeeze())
Exemplo n.º 2
0
def recover_relative_pose_from_essential_matrix(
    i2Ei1: Optional[np.ndarray],
    verified_coordinates_i1: np.ndarray,
    verified_coordinates_i2: np.ndarray,
    camera_intrinsics_i1: Cal3Bundler,
    camera_intrinsics_i2: Cal3Bundler,
) -> Tuple[Optional[Rot3], Optional[Unit3]]:
    """Recovers the relative rotation and translation direction from essential matrix and verified correspondences
    using opencv's API.

    Args:
        i2Ei1: essential matrix as a numpy array, of shape 3x3.
        verified_coordinates_i1: coordinates of verified correspondences in image i1, of shape Nx2.
        verified_coordinates_i2: coordinates of verified correspondences in image i2, of shape Nx2.
        camera_intrinsics_i1: intrinsics for image i1.
        camera_intrinsics_i2: intrinsics for image i2.

    Returns:
        relative rotation i2Ri1, or None if the input essential matrix is None.
        relative translation direction i2Ui1, or None if the input essential matrix is None.
    """
    if i2Ei1 is None:
        return None, None

    # obtain points in normalized coordinates using intrinsics.
    normalized_coordinates_i1 = feature_utils.normalize_coordinates(
        verified_coordinates_i1, camera_intrinsics_i1)
    normalized_coordinates_i2 = feature_utils.normalize_coordinates(
        verified_coordinates_i2, camera_intrinsics_i2)

    # use opencv to recover pose
    _, i2Ri1, i2ti1, _ = cv.recoverPose(i2Ei1, normalized_coordinates_i1,
                                        normalized_coordinates_i2)
    i2Ri1 = Rot3(i2Ri1)
    i2Ui1 = Unit3(i2ti1.squeeze())
    i2Ei1_reconstructed = EssentialMatrix(i2Ri1, i2Ui1).matrix()

    # normalizing the two essential matrices
    i2Ei1_normalized = i2Ei1 / np.linalg.norm(i2Ei1, axis=None)
    i2Ei1_reconstructed_normalized = i2Ei1_reconstructed / np.linalg.norm(
        i2Ei1_reconstructed, axis=None)
    if not np.allclose(i2Ei1_normalized, i2Ei1_reconstructed_normalized):
        logger.warning(
            "Recovered R, t cannot create the input Essential Matrix")

    return i2Ri1, i2Ui1
Exemplo n.º 3
0
    def test_normalize_coordinates(self):
        coordinates = np.array([[10.0, 20.0], [25.0, 12.0], [30.0, 33.0]])

        intrinsics = Cal3Bundler(fx=100, k1=0.0, k2=0.0, u0=20.0, v0=30.0)

        normalized_coordinates = feature_utils.normalize_coordinates(coordinates, intrinsics)

        expected_coordinates = np.array([[-0.1, -0.1], [0.05, -0.18], [0.1, 0.03]])

        np.testing.assert_allclose(normalized_coordinates, expected_coordinates)
Exemplo n.º 4
0
Arquivo: metrics.py Projeto: asa/gtsfm
def count_correct_correspondences(
    keypoints_i1: Keypoints,
    keypoints_i2: Keypoints,
    intrinsics_i1: Cal3Bundler,
    intrinsics_i2: Cal3Bundler,
    i2Ti1: Pose3,
    epipolar_dist_threshold: float,
) -> int:
    """Checks the correspondences for epipolar distances and counts ones which are below the threshold.

    Args:
        keypoints_i1: keypoints in image i1.
        keypoints_i2: corr. keypoints in image i2.
        intrinsics_i1: intrinsics for i1.
        intrinsics_i2: intrinsics for i2.
        i2Ti1: relative pose
        epipolar_dist_threshold: max acceptable distance for a correct correspondence.

    Raises:
        ValueError: when the number of keypoints do not match.

    Returns:
        Number of correspondences which are correct.
    """
    # TODO: add unit test, with mocking.
    if len(keypoints_i1) != len(keypoints_i2):
        raise ValueError("Keypoints must have same counts")

    if len(keypoints_i1) == 0:
        return 0

    normalized_coords_i1 = feature_utils.normalize_coordinates(
        keypoints_i1.coordinates, intrinsics_i1)
    normalized_coords_i2 = feature_utils.normalize_coordinates(
        keypoints_i2.coordinates, intrinsics_i2)
    i2Ei1 = EssentialMatrix(i2Ti1.rotation(), Unit3(i2Ti1.translation()))

    epipolar_distances = verification_utils.compute_epipolar_distances(
        normalized_coords_i1, normalized_coords_i2, i2Ei1)
    return np.count_nonzero(epipolar_distances < epipolar_dist_threshold)
Exemplo n.º 5
0
    def verify(
        self,
        keypoints_i1: Keypoints,
        keypoints_i2: Keypoints,
        match_indices: np.ndarray,
        camera_intrinsics_i1: Cal3Bundler,
        camera_intrinsics_i2: Cal3Bundler,
    ) -> Tuple[Optional[Rot3], Optional[Unit3], np.ndarray, float]:
        """Performs verification of correspondences between two images to recover the relative pose and indices of
        verified correspondences.

        Args:
            keypoints_i1: detected features in image #i1.
            keypoints_i2: detected features in image #i2.
            match_indices: matches as indices of features from both images, of shape (N3, 2), where N3 <= min(N1, N2).
            camera_intrinsics_i1: intrinsics for image #i1.
            camera_intrinsics_i2: intrinsics for image #i2.
        
        Returns:
            Estimated rotation i2Ri1, or None if it cannot be estimated.
            Estimated unit translation i2Ui1, or None if it cannot be estimated.
            Indices of verified correspondences, of shape (N, 2) with N <= N3. These are subset of match_indices.
            Inlier ratio w.r.t. the estimated model, i.e. #ransac inliers / # putative matches.
        """
        if match_indices.shape[0] < self._min_matches:
            return self._failure_result

        if self._use_intrinsics_in_verification:
            uv_norm_i1 = feature_utils.normalize_coordinates(
                keypoints_i1.coordinates, camera_intrinsics_i1)
            uv_norm_i2 = feature_utils.normalize_coordinates(
                keypoints_i2.coordinates, camera_intrinsics_i2)

            # OpenCV can fail here, for some reason
            if match_indices.shape[0] < 6:
                return self._failure_result

            if np.amax(match_indices[:, 1]) >= uv_norm_i2.shape[0]:
                print("Out of bounds access w/ keypoints",
                      keypoints_i2.coordinates[:10])
            if np.amax(match_indices[:, 0]) >= uv_norm_i1.shape[0]:
                print("Out of bounds access w/ keypoints",
                      keypoints_i1.coordinates[:10])

            # Use larger focal length, among the two choices, to yield a stricter threshold as (threshold_px / fx).
            fx = max(camera_intrinsics_i1.K()[0, 0],
                     camera_intrinsics_i2.K()[0, 0])
            i2Ei1, inlier_mask = self.estimate_E(uv_norm_i1=uv_norm_i1,
                                                 uv_norm_i2=uv_norm_i2,
                                                 match_indices=match_indices,
                                                 fx=fx)
        else:
            i2Fi1, inlier_mask = self.estimate_F(keypoints_i1=keypoints_i1,
                                                 keypoints_i2=keypoints_i2,
                                                 match_indices=match_indices)
            i2Ei1 = verification_utils.fundamental_to_essential_matrix(
                i2Fi1, camera_intrinsics_i1, camera_intrinsics_i2)

        inlier_idxs = np.where(inlier_mask.ravel() == 1)[0]

        v_corr_idxs = match_indices[inlier_idxs]
        inlier_ratio_est_model = np.mean(inlier_mask)
        (i2Ri1, i2Ui1
         ) = verification_utils.recover_relative_pose_from_essential_matrix(
             i2Ei1,
             keypoints_i1.coordinates[v_corr_idxs[:, 0]],
             keypoints_i2.coordinates[v_corr_idxs[:, 1]],
             camera_intrinsics_i1,
             camera_intrinsics_i2,
         )
        return i2Ri1, i2Ui1, v_corr_idxs, inlier_ratio_est_model
Exemplo n.º 6
0
    def verify_with_exact_intrinsics(
        self,
        keypoints_i1: Keypoints,
        keypoints_i2: Keypoints,
        match_indices: np.ndarray,
        camera_intrinsics_i1: Cal3Bundler,
        camera_intrinsics_i2: Cal3Bundler,
    ) -> Tuple[Optional[Rot3], Optional[Unit3], np.ndarray]:
        """Estimates the essential matrix and verifies the feature matches.

        Note: this function is preferred when camera intrinsics are known. The
        feature coordinates are normalized and the essential matrix is directly
        estimated.

        Args:
            keypoints_i1: detected features in image #i1.
            keypoints_i2: detected features in image #i2.
            match_indices: matches as indices of features from both images, of
                           shape (N3, 2), where N3 <= min(N1, N2).
            camera_intrinsics_i1: intrinsics for image #i1.
            camera_intrinsics_i2: intrinsics for image #i2.

        Returns:
            Estimated rotation i2Ri1, or None if it cannot be estimated.
            Estimated unit translation i2Ui1, or None if it cannot be estimated.
            Indices of verified correspondences, of shape (N, 2) with N <= N3.
                These indices are subset of match_indices.
        """
        verified_indices = np.array([], dtype=np.uint32)

        # check if we don't have the minimum number of points
        if match_indices.shape[0] < self.min_pts:
            logger.info(
                "No match indices were provided to the verifier, returning early with None output"
            )
            return None, None, verified_indices

        uv_norm_i1 = feature_utils.normalize_coordinates(
            keypoints_i1.coordinates, camera_intrinsics_i1)
        uv_norm_i2 = feature_utils.normalize_coordinates(
            keypoints_i2.coordinates, camera_intrinsics_i2)
        K = np.eye(3)

        i2Ei1, inlier_mask = cv2.findEssentialMat(
            uv_norm_i1[match_indices[:, 0]],
            uv_norm_i2[match_indices[:, 1]],
            K,
            method=cv2.RANSAC,
            threshold=NORMALIZED_COORD_RANSAC_THRESH,
            prob=DEFAULT_RANSAC_SUCCESS_PROB,
        )
        inlier_idxs = np.where(inlier_mask.ravel() == 1)[0]

        (
            i2Ri1,
            i2Ui1,
        ) = verification_utils.recover_relative_pose_from_essential_matrix(
            i2Ei1,
            keypoints_i1.coordinates[match_indices[inlier_idxs, 0]],
            keypoints_i2.coordinates[match_indices[inlier_idxs, 1]],
            camera_intrinsics_i1,
            camera_intrinsics_i2,
        )

        return i2Ri1, i2Ui1, match_indices[inlier_idxs]
Exemplo n.º 7
0
    def verify(
        self,
        keypoints_i1: Keypoints,
        keypoints_i2: Keypoints,
        match_indices: np.ndarray,
        camera_intrinsics_i1: Cal3Bundler,
        camera_intrinsics_i2: Cal3Bundler,
    ) -> Tuple[Optional[Rot3], Optional[Unit3], np.ndarray]:
        """Performs verification of correspondences between two images to recover the relative pose and indices of
        verified correspondences.

        Args:
            keypoints_i1: detected features in image #i1.
            keypoints_i2: detected features in image #i2.
            match_indices: matches as indices of features from both images, of shape (N3, 2), where N3 <= min(N1, N2).
            camera_intrinsics_i1: intrinsics for image #i1.
            camera_intrinsics_i2: intrinsics for image #i2.
        Returns:
            Estimated rotation i2Ri1, or None if it cannot be estimated.
            Estimated unit translation i2Ui1, or None if it cannot be estimated.
            Indices of verified correspondences, of shape (N, 2) with N <= N3. These are subset of match_indices.
        """
        if match_indices.shape[0] < self._min_matches:
            return self._failure_result

        if self._use_intrinsics_in_verification:
            uv_norm_i1 = feature_utils.normalize_coordinates(keypoints_i1.coordinates, camera_intrinsics_i1)
            uv_norm_i2 = feature_utils.normalize_coordinates(keypoints_i2.coordinates, camera_intrinsics_i2)
            K = np.eye(3)

            i2Ei1, inlier_mask = cv2.findEssentialMat(
                uv_norm_i1[match_indices[:, 0]],
                uv_norm_i2[match_indices[:, 1]],
                K,
                method=cv2.RANSAC,
                threshold=NORMALIZED_COORD_RANSAC_THRESH,
                prob=DEFAULT_RANSAC_SUCCESS_PROB,
            )
        else:
            i2Fi1, inlier_mask = cv2.findFundamentalMat(
                keypoints_i1.extract_indices(match_indices[:, 0]).coordinates,
                keypoints_i2.extract_indices(match_indices[:, 1]).coordinates,
                method=cv2.FM_RANSAC,
                ransacReprojThreshold=PIXEL_COORD_RANSAC_THRESH,
                confidence=DEFAULT_RANSAC_SUCCESS_PROB,
                maxIters=10000,
            )

            i2Ei1 = verification_utils.fundamental_to_essential_matrix(
                i2Fi1, camera_intrinsics_i1, camera_intrinsics_i2
            )

        inlier_idxs = np.where(inlier_mask.ravel() == 1)[0]

        (i2Ri1, i2Ui1) = verification_utils.recover_relative_pose_from_essential_matrix(
            i2Ei1,
            keypoints_i1.coordinates[match_indices[inlier_idxs, 0]],
            keypoints_i2.coordinates[match_indices[inlier_idxs, 1]],
            camera_intrinsics_i1,
            camera_intrinsics_i2,
        )

        return i2Ri1, i2Ui1, match_indices[inlier_idxs]