示例#1
0
文件: viz.py 项目: asa/gtsfm
def plot_sfm_data_3d(sfm_data: SfmData,
                     ax: Axes,
                     max_plot_radius: float = 50) -> None:
    """Plot the camera poses and landmarks in 3D matplotlib plot.

    Args:
        sfm_data: SfmData object with camera and tracks.
        ax: axis to plot on.
        max_plot_radius: maximum distance threshold away from any camera for which a point
            will be plotted
    """
    # extract camera poses
    camera_poses = []
    for i in range(sfm_data.number_cameras()):
        camera_poses.append(sfm_data.camera(i).pose())

    plot_poses_3d(camera_poses, ax)

    num_tracks = sfm_data.number_tracks()
    # Restrict 3d points to some radius of camera poses
    points_3d = np.array(
        [list(sfm_data.track(j).point3()) for j in range(num_tracks)])

    nearby_points_3d = comp_utils.get_points_within_radius_of_cameras(
        camera_poses, points_3d, max_plot_radius)

    # plot 3D points
    for landmark in nearby_points_3d:
        ax.plot(landmark[0], landmark[1], landmark[2], "g.", markersize=1)
示例#2
0
def values_to_sfm_data(values: Values, initial_data: SfmData) -> SfmData:
    """Cast results from the optimization to SfmData object.

    Args:
        values: results of factor graph optimization.
        initial_data: data used to generate the factor graph; used to extract information about poses and 3d points in
                      the graph.

    Returns:
        optimized poses and landmarks.
    """
    result = SfmData()

    # add cameras
    for i in range(initial_data.number_cameras()):
        result.add_camera(values.atPinholeCameraCal3Bundler(C(i)))

    # add tracks
    for j in range(initial_data.number_tracks()):
        input_track = initial_data.track(j)

        # populate the result with optimized 3D point
        result_track = SfmTrack(values.atPoint3(P(j)), )

        for measurement_idx in range(input_track.number_measurements()):
            i, uv = input_track.measurement(measurement_idx)
            result_track.add_measurement(i, uv)

        result.add_track(result_track)

    return result
示例#3
0
    def run(
        self,
        cameras: Dict[int, PinholeCameraCal3Bundler],
        corr_idxs_dict: Dict[Tuple[int, int], np.ndarray],
        keypoints_list: List[Keypoints],
    ) -> Tuple[SfmData, Dict[str, Any]]:
        """Perform the data association.

        Args:
            cameras: dictionary, with image index -> camera mapping.
            corr_idxs_dict: dictionary, with key as image pair (i1,i2) and value as matching keypoint indices.
            keypoints_list: keypoints for each image.

        Returns:
            cameras and tracks as SfmData.
        """
        # generate tracks for 3D points using pairwise correspondences
        tracks_2d = SfmTrack2d.generate_tracks_from_pairwise_matches(
            corr_idxs_dict, keypoints_list)

        # metrics on tracks w/o triangulation check
        num_tracks_2d = len(tracks_2d)
        track_lengths = list(map(lambda x: x.number_measurements(), tracks_2d))
        mean_2d_track_length = np.mean(track_lengths)

        logger.debug("[Data association] input number of tracks: %s",
                     num_tracks_2d)
        logger.debug("[Data association] input avg. track length: %s",
                     mean_2d_track_length)

        # initializer of 3D landmark for each track
        point3d_initializer = Point3dInitializer(
            cameras,
            self.mode,
            self.reproj_error_thresh,
            self.num_ransac_hypotheses,
        )

        num_tracks_w_cheirality_exceptions = 0
        per_accepted_track_avg_errors = []
        per_rejected_track_avg_errors = []
        # form SFMdata object after triangulation
        triangulated_data = SfmData()
        for track_2d in tracks_2d:
            # triangulate and filter based on reprojection error
            sfm_track, avg_track_reproj_error, is_cheirality_failure = point3d_initializer.triangulate(
                track_2d)
            if is_cheirality_failure:
                num_tracks_w_cheirality_exceptions += 1

            if sfm_track is not None and self.__validate_track(sfm_track):
                triangulated_data.add_track(sfm_track)
                per_accepted_track_avg_errors.append(avg_track_reproj_error)
            else:
                per_rejected_track_avg_errors.append(avg_track_reproj_error)

        num_accepted_tracks = triangulated_data.number_tracks()
        accepted_tracks_ratio = num_accepted_tracks / len(tracks_2d)
        track_cheirality_failure_ratio = num_tracks_w_cheirality_exceptions / len(
            tracks_2d)

        # TODO: improve dropped camera handling
        num_cameras = len(cameras.keys())
        expected_camera_indices = np.arange(num_cameras)
        # add cameras to landmark_map
        for i, cam in cameras.items():
            if i != expected_camera_indices[i]:
                raise RuntimeError("Some cameras must have been dropped ")
            triangulated_data.add_camera(cam)

        mean_3d_track_length, median_3d_track_length, track_lengths_3d = SfmResult(
            triangulated_data, None).get_track_length_statistics()

        logger.debug("[Data association] output number of tracks: %s",
                     num_accepted_tracks)
        logger.debug("[Data association] output avg. track length: %s",
                     mean_3d_track_length)

        # dump the 3d point cloud before Bundle Adjustment for offline visualization
        points_3d = [
            list(triangulated_data.track(j).point3())
            for j in range(num_accepted_tracks)
        ]
        # bin edges are halfway between each integer
        track_lengths_histogram, _ = np.histogram(track_lengths_3d,
                                                  bins=np.linspace(
                                                      -0.5, 10.5, 12))

        # min possible track len is 2, above 10 is improbable
        histogram_dict = {
            f"num_len_{i}_tracks": int(track_lengths_histogram[i])
            for i in range(2, 11)
        }

        data_assoc_metrics = {
            "mean_2d_track_length":
            np.round(mean_2d_track_length, 3),
            "accepted_tracks_ratio":
            np.round(accepted_tracks_ratio, 3),
            "track_cheirality_failure_ratio":
            np.round(track_cheirality_failure_ratio, 3),
            "num_accepted_tracks":
            num_accepted_tracks,
            "3d_tracks_length": {
                "median": median_3d_track_length,
                "mean": mean_3d_track_length,
                "min": int(track_lengths_3d.min()),
                "max": int(track_lengths_3d.max()),
                "track_lengths_histogram": histogram_dict,
            },
            "mean_accepted_track_avg_error":
            np.array(per_accepted_track_avg_errors).mean(),
            "per_rejected_track_avg_errors":
            per_rejected_track_avg_errors,
            "per_accepted_track_avg_errors":
            per_accepted_track_avg_errors,
            "points_3d":
            points_3d,
        }

        return triangulated_data, data_assoc_metrics
示例#4
0
    def run(self, initial_data: SfmData) -> SfmResult:
        """Run the bundle adjustment by forming factor graph and optimizing using Levenberg–Marquardt optimization.

        Args:
            initial_data: initialized cameras, tracks w/ their 3d landmark from triangulation.

        Results:
            optimized camera poses, 3D point w/ tracks, and error metrics.
        """
        logger.info(
            f"Input: {initial_data.number_tracks()} tracks on {initial_data.number_cameras()} cameras\n"
        )

        # noise model for measurements -- one pixel in u and v
        measurement_noise = gtsam.noiseModel.Isotropic.Sigma(
            IMG_MEASUREMENT_DIM, 1.0)

        # Create a factor graph
        graph = gtsam.NonlinearFactorGraph()

        # Add measurements to the factor graph
        for j in range(initial_data.number_tracks()):
            track = initial_data.track(j)  # SfmTrack
            # retrieve the SfmMeasurement objects
            for m_idx in range(track.number_measurements()):
                # i represents the camera index, and uv is the 2d measurement
                i, uv = track.measurement(m_idx)
                # note use of shorthand symbols C and P
                graph.add(
                    GeneralSFMFactorCal3Bundler(uv, measurement_noise, C(i),
                                                P(j)))

        # Add a prior on pose x1. This indirectly specifies where the origin is.
        graph.push_back(
            gtsam.PriorFactorPinholeCameraCal3Bundler(
                C(0),
                initial_data.camera(0),
                gtsam.noiseModel.Isotropic.Sigma(PINHOLE_CAM_CAL3BUNDLER_DOF,
                                                 0.1),
            ))
        # Also add a prior on the position of the first landmark to fix the scale
        graph.push_back(
            gtsam.PriorFactorPoint3(
                P(0),
                initial_data.track(0).point3(),
                gtsam.noiseModel.Isotropic.Sigma(POINT3_DOF, 0.1),
            ))

        # Create initial estimate
        initial = gtsam.Values()

        i = 0
        # add each PinholeCameraCal3Bundler
        for cam_idx in range(initial_data.number_cameras()):
            camera = initial_data.camera(cam_idx)
            initial.insert(C(i), camera)
            i += 1

        j = 0
        # add each SfmTrack
        for t_idx in range(initial_data.number_tracks()):
            track = initial_data.track(t_idx)
            initial.insert(P(j), track.point3())
            j += 1

        # Optimize the graph and print results
        try:
            params = gtsam.LevenbergMarquardtParams()
            params.setVerbosityLM("ERROR")
            lm = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
            result_values = lm.optimize()
        except Exception as e:
            logger.exception("LM Optimization failed")
            return SfmResult(SfmData(), float("Nan"))

        final_error = graph.error(result_values)

        # Error drops from ~2764.22 to ~0.046
        logger.info(f"initial error: {graph.error(initial):.2f}")
        logger.info(f"final error: {final_error:.2f}")

        # construct the results
        optimized_data = values_to_sfm_data(result_values, initial_data)
        sfm_result = SfmResult(optimized_data, final_error)

        return sfm_result