示例#1
0
文件: viz.py 项目: asa/gtsfm
def plot_sfm_data_3d(sfm_data: SfmData,
                     ax: Axes,
                     max_plot_radius: float = 50) -> None:
    """Plot the camera poses and landmarks in 3D matplotlib plot.

    Args:
        sfm_data: SfmData object with camera and tracks.
        ax: axis to plot on.
        max_plot_radius: maximum distance threshold away from any camera for which a point
            will be plotted
    """
    # extract camera poses
    camera_poses = []
    for i in range(sfm_data.number_cameras()):
        camera_poses.append(sfm_data.camera(i).pose())

    plot_poses_3d(camera_poses, ax)

    num_tracks = sfm_data.number_tracks()
    # Restrict 3d points to some radius of camera poses
    points_3d = np.array(
        [list(sfm_data.track(j).point3()) for j in range(num_tracks)])

    nearby_points_3d = comp_utils.get_points_within_radius_of_cameras(
        camera_poses, points_3d, max_plot_radius)

    # plot 3D points
    for landmark in nearby_points_3d:
        ax.plot(landmark[0], landmark[1], landmark[2], "g.", markersize=1)
示例#2
0
def values_to_sfm_data(values: Values, initial_data: SfmData) -> SfmData:
    """Cast results from the optimization to SfmData object.

    Args:
        values: results of factor graph optimization.
        initial_data: data used to generate the factor graph; used to extract information about poses and 3d points in
                      the graph.

    Returns:
        optimized poses and landmarks.
    """
    result = SfmData()

    # add cameras
    for i in range(initial_data.number_cameras()):
        result.add_camera(values.atPinholeCameraCal3Bundler(C(i)))

    # add tracks
    for j in range(initial_data.number_tracks()):
        input_track = initial_data.track(j)

        # populate the result with optimized 3D point
        result_track = SfmTrack(values.atPoint3(P(j)), )

        for measurement_idx in range(input_track.number_measurements()):
            i, uv = input_track.measurement(measurement_idx)
            result_track.add_measurement(i, uv)

        result.add_track(result_track)

    return result
示例#3
0
def plot_scene(scene_data: SfmData, result: gtsam.Values) -> None:
    """Plot the SFM results."""
    plot_vals = gtsam.Values()
    for i in range(scene_data.numberCameras()):
        plot_vals.insert(i, result.atPinholeCameraCal3Bundler(i).pose())
    for j in range(scene_data.numberTracks()):
        plot_vals.insert(P(j), result.atPoint3(P(j)))

    plot.plot_3d_points(0, plot_vals, linespec="g.")
    plot.plot_trajectory(0, plot_vals, title="SFM results")

    plt.show()
示例#4
0
    def test_Balbianello(self):
        """ Check that we can successfully read a bundler file and create a 
            factor graph from it
        """
        # The structure where we will save the SfM data
        filename = gtsam.findExampleDataFile("Balbianello.out")
        sfm_data = SfmData.FromBundlerFile(filename)

        # Check number of things
        self.assertEqual(5, sfm_data.numberCameras())
        self.assertEqual(544, sfm_data.numberTracks())
        track0 = sfm_data.track(0)
        self.assertEqual(3, track0.numberMeasurements())

        # Check projection of a given point
        self.assertEqual(0, track0.measurement(0)[0])
        camera0 = sfm_data.camera(0)
        expected = camera0.project(track0.point3())
        actual = track0.measurement(0)[1]
        self.gtsamAssertEquals(expected, actual, 1)

        # We share *one* noiseModel between all projection factors
        model = gtsam.noiseModel.Isotropic.Sigma(2,
                                                 1.0)  # one pixel in u and v

        # Convert to NonlinearFactorGraph
        graph = sfm_data.sfmFactorGraph(model)
        self.assertEqual(1419, graph.size())  # regression

        # Get initial estimate
        values = gtsam.initialCamerasAndPointsEstimate(sfm_data)
        self.assertEqual(549, values.size())  # regression
示例#5
0
    def from_sfm_data(cls, sfm_data: gtsam.SfmData) -> "GtsfmData":
        """Initialize from gtsam.SfmData instance.

        Args:
            sfm_data: camera parameters and point tracks.

        Returns:
            A new GtsfmData instancee.
        """
        num_images = sfm_data.numberCameras()
        gtsfm_data = cls(num_images)
        for i in range(num_images):
            camera = sfm_data.camera(i)
            gtsfm_data.add_camera(i, camera)
        for j in range(sfm_data.numberTracks()):
            gtsfm_data.add_track(sfm_data.track(j))

        return gtsfm_data
示例#6
0
    def run(
        self,
        cameras: Dict[int, PinholeCameraCal3Bundler],
        corr_idxs_dict: Dict[Tuple[int, int], np.ndarray],
        keypoints_list: List[Keypoints],
    ) -> SfmData:
        """Perform the data association.

        Args:
            cameras: dictionary with image index as key, and camera object w/ intrinsics + extrinsics as value.
            corr_idxs_dict: dictionary, with key as image pair (i1,i2) and value as matching keypoint indices.
            keypoints_list: keypoints for each image.

        Returns:
            cameras and tracks as SfmData
        """

        available_cams = np.array(list(cameras.keys()), dtype=np.uint32)

        # form few tracks randomly
        tracks = []
        num_tracks = random.randint(5, 10)

        for _ in range(num_tracks):
            # obtain 3D points for the track randomly
            point_3d = np.random.rand(3, 1)

            # create GTSAM's SfmTrack object
            sfmTrack = SfmTrack(point_3d)

            # randomly select cameras for this track
            selected_cams = np.random.choice(available_cams, self.min_track_len, replace=False)

            # for each selected camera, randomly select a point
            for cam_idx in selected_cams:
                measurement_idx = random.randint(0, len(keypoints_list[cam_idx]) - 1)
                measurement = keypoints_list[cam_idx].coordinates[measurement_idx]

                sfmTrack.add_measurement(cam_idx, measurement)

            tracks.append(sfmTrack)

        # create the final SfmData object
        sfm_data = SfmData()
        for cam in cameras.values():
            sfm_data.add_camera(cam)

        for track in tracks:
            sfm_data.add_track(track)

        return sfm_data
示例#7
0
    def test_filter_landmarks(self):
        """Tests filtering of SfmData based on reprojection error."""
        max_reproj_error = 15

        VALID_TRACK_INDICES = [0, 1, 5]

        # construct expected data w/ tracks with reprojection errors below the
        # threshold
        expected_data = SfmData()
        for i in range(EXAMPLE_RESULT.sfm_data.number_cameras()):
            expected_data.add_camera(EXAMPLE_RESULT.sfm_data.camera(i))

        for j in VALID_TRACK_INDICES:
            expected_data.add_track(EXAMPLE_RESULT.sfm_data.track(j))

        # run the fn under test
        filtered_sfm_data = EXAMPLE_RESULT.filter_landmarks(max_reproj_error)

        # compare the SfmData objects
        self.assertTrue(filtered_sfm_data.equals(expected_data, 1e-9))
示例#8
0
Authors: Ayush Baid
"""
import unittest

import gtsam
from gtsam import SfmData

from gtsfm.common.sfm_result import SfmResult

GTSAM_EXAMPLE_FILE = "dubrovnik-3-7-pre"
EXAMPLE_RESULT = SfmResult(
    gtsam.readBal(gtsam.findExampleDataFile(GTSAM_EXAMPLE_FILE)),
    total_reproj_error=1.5e1,
)

NULL_RESULT = SfmResult(SfmData(), total_reproj_error=float("Nan"))


class TestSfmResult(unittest.TestCase):
    """Unit tests for SfmResult"""
    def testEqualsWithSameObject(self):
        """Test equality function with the same object."""
        self.assertEqual(EXAMPLE_RESULT, EXAMPLE_RESULT)

    def testEqualsWithDifferentObject(self):
        """Test the equality function with different object, expecting false result."""
        other_example_file = "dubrovnik-1-1-pre.txt"
        other_result = SfmResult(
            gtsam.readBal(gtsam.findExampleDataFile(other_example_file)),
            total_reproj_error=1.1e1,
        )
示例#9
0
    def run(
        self,
        cameras: Dict[int, PinholeCameraCal3Bundler],
        corr_idxs_dict: Dict[Tuple[int, int], np.ndarray],
        keypoints_list: List[Keypoints],
    ) -> Tuple[SfmData, Dict[str, Any]]:
        """Perform the data association.

        Args:
            cameras: dictionary, with image index -> camera mapping.
            corr_idxs_dict: dictionary, with key as image pair (i1,i2) and value as matching keypoint indices.
            keypoints_list: keypoints for each image.

        Returns:
            cameras and tracks as SfmData.
        """
        # generate tracks for 3D points using pairwise correspondences
        tracks_2d = SfmTrack2d.generate_tracks_from_pairwise_matches(
            corr_idxs_dict, keypoints_list)

        # metrics on tracks w/o triangulation check
        num_tracks_2d = len(tracks_2d)
        track_lengths = list(map(lambda x: x.number_measurements(), tracks_2d))
        mean_2d_track_length = np.mean(track_lengths)

        logger.debug("[Data association] input number of tracks: %s",
                     num_tracks_2d)
        logger.debug("[Data association] input avg. track length: %s",
                     mean_2d_track_length)

        # initializer of 3D landmark for each track
        point3d_initializer = Point3dInitializer(
            cameras,
            self.mode,
            self.reproj_error_thresh,
            self.num_ransac_hypotheses,
        )

        num_tracks_w_cheirality_exceptions = 0
        per_accepted_track_avg_errors = []
        per_rejected_track_avg_errors = []
        # form SFMdata object after triangulation
        triangulated_data = SfmData()
        for track_2d in tracks_2d:
            # triangulate and filter based on reprojection error
            sfm_track, avg_track_reproj_error, is_cheirality_failure = point3d_initializer.triangulate(
                track_2d)
            if is_cheirality_failure:
                num_tracks_w_cheirality_exceptions += 1

            if sfm_track is not None and self.__validate_track(sfm_track):
                triangulated_data.add_track(sfm_track)
                per_accepted_track_avg_errors.append(avg_track_reproj_error)
            else:
                per_rejected_track_avg_errors.append(avg_track_reproj_error)

        num_accepted_tracks = triangulated_data.number_tracks()
        accepted_tracks_ratio = num_accepted_tracks / len(tracks_2d)
        track_cheirality_failure_ratio = num_tracks_w_cheirality_exceptions / len(
            tracks_2d)

        # TODO: improve dropped camera handling
        num_cameras = len(cameras.keys())
        expected_camera_indices = np.arange(num_cameras)
        # add cameras to landmark_map
        for i, cam in cameras.items():
            if i != expected_camera_indices[i]:
                raise RuntimeError("Some cameras must have been dropped ")
            triangulated_data.add_camera(cam)

        mean_3d_track_length, median_3d_track_length, track_lengths_3d = SfmResult(
            triangulated_data, None).get_track_length_statistics()

        logger.debug("[Data association] output number of tracks: %s",
                     num_accepted_tracks)
        logger.debug("[Data association] output avg. track length: %s",
                     mean_3d_track_length)

        # dump the 3d point cloud before Bundle Adjustment for offline visualization
        points_3d = [
            list(triangulated_data.track(j).point3())
            for j in range(num_accepted_tracks)
        ]
        # bin edges are halfway between each integer
        track_lengths_histogram, _ = np.histogram(track_lengths_3d,
                                                  bins=np.linspace(
                                                      -0.5, 10.5, 12))

        # min possible track len is 2, above 10 is improbable
        histogram_dict = {
            f"num_len_{i}_tracks": int(track_lengths_histogram[i])
            for i in range(2, 11)
        }

        data_assoc_metrics = {
            "mean_2d_track_length":
            np.round(mean_2d_track_length, 3),
            "accepted_tracks_ratio":
            np.round(accepted_tracks_ratio, 3),
            "track_cheirality_failure_ratio":
            np.round(track_cheirality_failure_ratio, 3),
            "num_accepted_tracks":
            num_accepted_tracks,
            "3d_tracks_length": {
                "median": median_3d_track_length,
                "mean": mean_3d_track_length,
                "min": int(track_lengths_3d.min()),
                "max": int(track_lengths_3d.max()),
                "track_lengths_histogram": histogram_dict,
            },
            "mean_accepted_track_avg_error":
            np.array(per_accepted_track_avg_errors).mean(),
            "per_rejected_track_avg_errors":
            per_rejected_track_avg_errors,
            "per_accepted_track_avg_errors":
            per_accepted_track_avg_errors,
            "points_3d":
            points_3d,
        }

        return triangulated_data, data_assoc_metrics
示例#10
0
def run(args: argparse.Namespace) -> None:
    """ Run LM optimization with BAL input data and report resulting error """
    input_file = args.input_file

    # Load the SfM data from file
    scene_data = SfmData.FromBalFile(input_file)
    logging.info("read %d tracks on %d cameras\n", scene_data.numberTracks(),
                 scene_data.numberCameras())

    # Create a factor graph
    graph = gtsam.NonlinearFactorGraph()

    # We share *one* noiseModel between all projection factors
    noise = gtsam.noiseModel.Isotropic.Sigma(2, 1.0)  # one pixel in u and v

    # Add measurements to the factor graph
    for j in range(scene_data.numberTracks()):
        track = scene_data.track(j)  # SfmTrack
        # retrieve the SfmMeasurement objects
        for m_idx in range(track.numberMeasurements()):
            # i represents the camera index, and uv is the 2d measurement
            i, uv = track.measurement(m_idx)
            # note use of shorthand symbols C and P
            graph.add(GeneralSFMFactorCal3Bundler(uv, noise, i, P(j)))

    # Add a prior on pose x1. This indirectly specifies where the origin is.
    graph.push_back(
        PriorFactorPinholeCameraCal3Bundler(
            0, scene_data.camera(0), gtsam.noiseModel.Isotropic.Sigma(9, 0.1)))
    # Also add a prior on the position of the first landmark to fix the scale
    graph.push_back(
        PriorFactorPoint3(P(0),
                          scene_data.track(0).point3(),
                          gtsam.noiseModel.Isotropic.Sigma(3, 0.1)))

    # Create initial estimate
    initial = gtsam.Values()

    i = 0
    # add each PinholeCameraCal3Bundler
    for i in range(scene_data.numberCameras()):
        camera = scene_data.camera(i)
        initial.insert(i, camera)
        i += 1

    # add each SfmTrack
    for j in range(scene_data.numberTracks()):
        track = scene_data.track(j)
        initial.insert(P(j), track.point3())

    # Optimize the graph and print results
    try:
        params = gtsam.LevenbergMarquardtParams()
        params.setVerbosityLM("ERROR")
        lm = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
        result = lm.optimize()
    except RuntimeError:
        logging.exception("LM Optimization failed")
        return

    # Error drops from ~2764.22 to ~0.046
    logging.info("initial error: %f", graph.error(initial))
    logging.info("final error: %f", graph.error(result))

    plot_scene(scene_data, result)
示例#11
0
 def setUp(self):
     """Initialize SfmData and SfmTrack"""
     self.data = SfmData()
     # initialize SfmTrack with 3D point
     self.tracks = SfmTrack()
示例#12
0
class TestSfmData(GtsamTestCase):
    """Tests for SfmData and SfmTrack modules."""
    def setUp(self):
        """Initialize SfmData and SfmTrack"""
        self.data = SfmData()
        # initialize SfmTrack with 3D point
        self.tracks = SfmTrack()

    def test_tracks(self):
        """Test functions in SfmTrack"""
        # measurement is of format (camera_idx, imgPoint)
        # create arbitrary camera indices for two cameras
        i1, i2 = 4, 5

        # create arbitrary image measurements for cameras i1 and i2
        uv_i1 = Point2(12.6, 82)

        # translating point uv_i1 along X-axis
        uv_i2 = Point2(24.88, 82)

        # add measurements to the track
        self.tracks.addMeasurement(i1, uv_i1)
        self.tracks.addMeasurement(i2, uv_i2)

        # Number of measurements in the track is 2
        self.assertEqual(self.tracks.numberMeasurements(), 2)

        # camera_idx in the first measurement of the track corresponds to i1
        cam_idx, img_measurement = self.tracks.measurement(0)
        self.assertEqual(cam_idx, i1)
        np.testing.assert_array_almost_equal(Point3(0., 0., 0.),
                                             self.tracks.point3())

    def test_data(self):
        """Test functions in SfmData"""
        # Create new track with 3 measurements
        i1, i2, i3 = 3, 5, 6
        uv_i1 = Point2(21.23, 45.64)

        # translating along X-axis
        uv_i2 = Point2(45.7, 45.64)
        uv_i3 = Point2(68.35, 45.64)

        # add measurements and arbitrary point to the track
        measurements = [(i1, uv_i1), (i2, uv_i2), (i3, uv_i3)]
        pt = Point3(1.0, 6.0, 2.0)
        track2 = SfmTrack(pt)
        track2.addMeasurement(i1, uv_i1)
        track2.addMeasurement(i2, uv_i2)
        track2.addMeasurement(i3, uv_i3)
        self.data.addTrack(self.tracks)
        self.data.addTrack(track2)

        # Number of tracks in SfmData is 2
        self.assertEqual(self.data.numberTracks(), 2)

        # camera idx of first measurement of second track corresponds to i1
        cam_idx, img_measurement = self.data.track(1).measurement(0)
        self.assertEqual(cam_idx, i1)

    def test_Balbianello(self):
        """ Check that we can successfully read a bundler file and create a 
            factor graph from it
        """
        # The structure where we will save the SfM data
        filename = gtsam.findExampleDataFile("Balbianello.out")
        sfm_data = SfmData.FromBundlerFile(filename)

        # Check number of things
        self.assertEqual(5, sfm_data.numberCameras())
        self.assertEqual(544, sfm_data.numberTracks())
        track0 = sfm_data.track(0)
        self.assertEqual(3, track0.numberMeasurements())

        # Check projection of a given point
        self.assertEqual(0, track0.measurement(0)[0])
        camera0 = sfm_data.camera(0)
        expected = camera0.project(track0.point3())
        actual = track0.measurement(0)[1]
        self.gtsamAssertEquals(expected, actual, 1)

        # We share *one* noiseModel between all projection factors
        model = gtsam.noiseModel.Isotropic.Sigma(2,
                                                 1.0)  # one pixel in u and v

        # Convert to NonlinearFactorGraph
        graph = sfm_data.sfmFactorGraph(model)
        self.assertEqual(1419, graph.size())  # regression

        # Get initial estimate
        values = gtsam.initialCamerasAndPointsEstimate(sfm_data)
        self.assertEqual(549, values.size())  # regression
示例#13
0
import copy
import unittest
import unittest.mock as mock

import gtsam
import numpy as np
from gtsam import Cal3Bundler, PinholeCameraCal3Bundler, Pose3, SfmData, SfmTrack

import gtsfm.utils.graph as graph_utils
import gtsfm.utils.io as io_utils
from gtsfm.common.gtsfm_data import GtsfmData

GTSAM_EXAMPLE_FILE = "dubrovnik-3-7-pre"  # example data with 3 cams and 7 tracks
EXAMPLE_DATA = io_utils.read_bal(gtsam.findExampleDataFile(GTSAM_EXAMPLE_FILE))

NULL_DATA = SfmData()

# create example with non-consecutive cams
EXAMPLE_WITH_NON_CONSECUTIVE_CAMS = GtsfmData(number_images=5)
EXAMPLE_WITH_NON_CONSECUTIVE_CAMS.add_camera(index=0,
                                             camera=EXAMPLE_DATA.get_camera(0))
EXAMPLE_WITH_NON_CONSECUTIVE_CAMS.add_camera(index=2,
                                             camera=EXAMPLE_DATA.get_camera(1))
EXAMPLE_WITH_NON_CONSECUTIVE_CAMS.add_camera(index=3,
                                             camera=EXAMPLE_DATA.get_camera(2))

EQUALITY_TOLERANCE = 1e-5


class TestGtsfmData(unittest.TestCase):
    """Unit tests for GtsfmData."""
示例#14
0
    def run(self, initial_data: SfmData) -> SfmResult:
        """Run the bundle adjustment by forming factor graph and optimizing using Levenberg–Marquardt optimization.

        Args:
            initial_data: initialized cameras, tracks w/ their 3d landmark from triangulation.

        Results:
            optimized camera poses, 3D point w/ tracks, and error metrics.
        """
        logger.info(
            f"Input: {initial_data.number_tracks()} tracks on {initial_data.number_cameras()} cameras\n"
        )

        # noise model for measurements -- one pixel in u and v
        measurement_noise = gtsam.noiseModel.Isotropic.Sigma(
            IMG_MEASUREMENT_DIM, 1.0)

        # Create a factor graph
        graph = gtsam.NonlinearFactorGraph()

        # Add measurements to the factor graph
        for j in range(initial_data.number_tracks()):
            track = initial_data.track(j)  # SfmTrack
            # retrieve the SfmMeasurement objects
            for m_idx in range(track.number_measurements()):
                # i represents the camera index, and uv is the 2d measurement
                i, uv = track.measurement(m_idx)
                # note use of shorthand symbols C and P
                graph.add(
                    GeneralSFMFactorCal3Bundler(uv, measurement_noise, C(i),
                                                P(j)))

        # Add a prior on pose x1. This indirectly specifies where the origin is.
        graph.push_back(
            gtsam.PriorFactorPinholeCameraCal3Bundler(
                C(0),
                initial_data.camera(0),
                gtsam.noiseModel.Isotropic.Sigma(PINHOLE_CAM_CAL3BUNDLER_DOF,
                                                 0.1),
            ))
        # Also add a prior on the position of the first landmark to fix the scale
        graph.push_back(
            gtsam.PriorFactorPoint3(
                P(0),
                initial_data.track(0).point3(),
                gtsam.noiseModel.Isotropic.Sigma(POINT3_DOF, 0.1),
            ))

        # Create initial estimate
        initial = gtsam.Values()

        i = 0
        # add each PinholeCameraCal3Bundler
        for cam_idx in range(initial_data.number_cameras()):
            camera = initial_data.camera(cam_idx)
            initial.insert(C(i), camera)
            i += 1

        j = 0
        # add each SfmTrack
        for t_idx in range(initial_data.number_tracks()):
            track = initial_data.track(t_idx)
            initial.insert(P(j), track.point3())
            j += 1

        # Optimize the graph and print results
        try:
            params = gtsam.LevenbergMarquardtParams()
            params.setVerbosityLM("ERROR")
            lm = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
            result_values = lm.optimize()
        except Exception as e:
            logger.exception("LM Optimization failed")
            return SfmResult(SfmData(), float("Nan"))

        final_error = graph.error(result_values)

        # Error drops from ~2764.22 to ~0.046
        logger.info(f"initial error: {graph.error(initial):.2f}")
        logger.info(f"final error: {final_error:.2f}")

        # construct the results
        optimized_data = values_to_sfm_data(result_values, initial_data)
        sfm_result = SfmResult(optimized_data, final_error)

        return sfm_result