Exemple #1
0
def fundamental_to_essential_matrix(
        i2Fi1: np.ndarray, camera_intrinsics_i1: Cal3Bundler,
        camera_intrinsics_i2: Cal3Bundler) -> np.ndarray:
    """Converts the fundamental matrix to essential matrix using camera intrinsics.

    Args:
        i2Fi1: fundamental matrix which maps points in image #i1 to lines in image #i2.
        camera_intrinsics_i1: intrinsics for image #i1.
        camera_intrinsics_i2: intrinsics for image #i2.

    Returns:
        Estimated essential matrix i2Ei1 as numpy array of shape (3x3).
    """
    return camera_intrinsics_i2.K().T @ i2Fi1 @ camera_intrinsics_i1.K()
Exemple #2
0
def essential_to_fundamental_matrix(
        i2Ei1: EssentialMatrix, camera_intrinsics_i1: Cal3Bundler,
        camera_intrinsics_i2: Cal3Bundler) -> np.ndarray:
    """Converts the essential matrix to fundamental matrix using camera intrinsics.

    Args:
        i2Ei1: essential matrix which maps points in image #i1 to lines in image #i2.
        camera_intrinsics_i1: intrinsics for image #i1.
        camera_intrinsics_i2: intrinsics for image #i2.

    Returns:
        Fundamental matrix i2Fi1 as numpy array of shape (3x3).
    """
    return np.linalg.inv(
        camera_intrinsics_i2.K().T) @ i2Ei1.matrix() @ np.linalg.inv(
            camera_intrinsics_i1.K())
Exemple #3
0
    def verify(
        self,
        keypoints_i1: Keypoints,
        keypoints_i2: Keypoints,
        match_indices: np.ndarray,
        camera_intrinsics_i1: Cal3Bundler,
        camera_intrinsics_i2: Cal3Bundler,
    ) -> Tuple[Optional[Rot3], Optional[Unit3], np.ndarray, float]:
        """Performs verification of correspondences between two images to recover the relative pose and indices of
        verified correspondences.

        Args:
            keypoints_i1: detected features in image #i1.
            keypoints_i2: detected features in image #i2.
            match_indices: matches as indices of features from both images, of shape (N3, 2), where N3 <= min(N1, N2).
            camera_intrinsics_i1: intrinsics for image #i1.
            camera_intrinsics_i2: intrinsics for image #i2.
        
        Returns:
            Estimated rotation i2Ri1, or None if it cannot be estimated.
            Estimated unit translation i2Ui1, or None if it cannot be estimated.
            Indices of verified correspondences, of shape (N, 2) with N <= N3. These are subset of match_indices.
            Inlier ratio w.r.t. the estimated model, i.e. #ransac inliers / # putative matches.
        """
        if match_indices.shape[0] < self._min_matches:
            return self._failure_result

        if self._use_intrinsics_in_verification:
            uv_norm_i1 = feature_utils.normalize_coordinates(
                keypoints_i1.coordinates, camera_intrinsics_i1)
            uv_norm_i2 = feature_utils.normalize_coordinates(
                keypoints_i2.coordinates, camera_intrinsics_i2)

            # OpenCV can fail here, for some reason
            if match_indices.shape[0] < 6:
                return self._failure_result

            if np.amax(match_indices[:, 1]) >= uv_norm_i2.shape[0]:
                print("Out of bounds access w/ keypoints",
                      keypoints_i2.coordinates[:10])
            if np.amax(match_indices[:, 0]) >= uv_norm_i1.shape[0]:
                print("Out of bounds access w/ keypoints",
                      keypoints_i1.coordinates[:10])

            # Use larger focal length, among the two choices, to yield a stricter threshold as (threshold_px / fx).
            fx = max(camera_intrinsics_i1.K()[0, 0],
                     camera_intrinsics_i2.K()[0, 0])
            i2Ei1, inlier_mask = self.estimate_E(uv_norm_i1=uv_norm_i1,
                                                 uv_norm_i2=uv_norm_i2,
                                                 match_indices=match_indices,
                                                 fx=fx)
        else:
            i2Fi1, inlier_mask = self.estimate_F(keypoints_i1=keypoints_i1,
                                                 keypoints_i2=keypoints_i2,
                                                 match_indices=match_indices)
            i2Ei1 = verification_utils.fundamental_to_essential_matrix(
                i2Fi1, camera_intrinsics_i1, camera_intrinsics_i2)

        inlier_idxs = np.where(inlier_mask.ravel() == 1)[0]

        v_corr_idxs = match_indices[inlier_idxs]
        inlier_ratio_est_model = np.mean(inlier_mask)
        (i2Ri1, i2Ui1
         ) = verification_utils.recover_relative_pose_from_essential_matrix(
             i2Ei1,
             keypoints_i1.coordinates[v_corr_idxs[:, 0]],
             keypoints_i2.coordinates[v_corr_idxs[:, 1]],
             camera_intrinsics_i1,
             camera_intrinsics_i2,
         )
        return i2Ri1, i2Ui1, v_corr_idxs, inlier_ratio_est_model