Exemplo n.º 1
0
    def test_run(self):
        gt_poses = ExampleValues()
        measurements = SimulateMeasurements(gt_poses, [[0, 1], [0, 2], [1, 2]])

        # Set verbosity to Silent for tests
        lmParams = gtsam.LevenbergMarquardtParams()
        lmParams.setVerbosityLM("silent")

        algorithm = gtsam.TranslationRecovery(measurements, lmParams)
        scale = 2.0
        result = algorithm.run(scale)

        w_aTc = gt_poses.atPose3(2).translation() - gt_poses.atPose3(
            0).translation()
        w_aTb = gt_poses.atPose3(1).translation() - gt_poses.atPose3(
            0).translation()
        w_aTc_expected = w_aTc * scale / np.linalg.norm(w_aTb)
        w_aTb_expected = w_aTb * scale / np.linalg.norm(w_aTb)

        np.testing.assert_array_almost_equal(result.atPoint3(0),
                                             np.array([0, 0, 0]), 6,
                                             "Origin result is incorrect.")
        np.testing.assert_array_almost_equal(result.atPoint3(1),
                                             w_aTb_expected, 6,
                                             "Point B result is incorrect.")
        np.testing.assert_array_almost_equal(result.atPoint3(2),
                                             w_aTc_expected, 6,
                                             "Point C result is incorrect.")
Exemplo n.º 2
0
 def test_constructor(self):
     """Construct from binary measurements."""
     algorithm = gtsam.TranslationRecovery()
     self.assertIsInstance(algorithm, gtsam.TranslationRecovery)
     algorithm_params = gtsam.TranslationRecovery(
         gtsam.LevenbergMarquardtParams())
     self.assertIsInstance(algorithm_params, gtsam.TranslationRecovery)
Exemplo n.º 3
0
def main():
    """Main runner."""
    # Create an empty nonlinear factor graph
    graph = gtsam.NonlinearFactorGraph()

    # Add a prior on the first point, setting it to the origin
    # A prior factor consists of a mean and a noise model (covariance matrix)
    priorMean = gtsam.Pose3()  # prior at origin
    graph.add(gtsam.PriorFactorPose3(1, priorMean, PRIOR_NOISE))

    # Add GPS factors
    gps = gtsam.Point3(lat0, lon0, h0)
    graph.add(gtsam.GPSFactor(1, gps, GPS_NOISE))
    print("\nFactor Graph:\n{}".format(graph))

    # Create the data structure to hold the initialEstimate estimate to the solution
    # For illustrative purposes, these have been deliberately set to incorrect values
    initial = gtsam.Values()
    initial.insert(1, gtsam.Pose3())
    print("\nInitial Estimate:\n{}".format(initial))

    # optimize using Levenberg-Marquardt optimization
    params = gtsam.LevenbergMarquardtParams()
    optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
    result = optimizer.optimize()
    print("\nFinal Result:\n{}".format(result))
Exemplo n.º 4
0
 def optimize(self, graph: gtsam.NonlinearFactorGraph,
              initial: gtsam.Values):
     """Optimize using Levenberg-Marquardt optimization."""
     params = gtsam.LevenbergMarquardtParams()
     params.setVerbosityLM("SUMMARY")
     optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
     result = optimizer.optimize()
     return result
Exemplo n.º 5
0
    def estimate_global(self):
        np.random.seed(2)
        n0 = 0.000001
        n03 = np.array([n0, n0, n0])
        nNoiseFactor3 = np.array(
            [0.2, 0.2,
             0.2])  # TODO: something about floats and major row? check cython

        # Create an empty nonlinear factor graph
        graph = gtsam.NonlinearFactorGraph()

        # Add a prior on the first pose, setting it to the origin
        priorMean = self.odom_global[0]
        priorNoise = gtsam.noiseModel_Diagonal.Sigmas(n03)
        graph.add(gtsam.PriorFactorPose2(self.X(1), priorMean, priorNoise))

        # Add odometry factors
        odometryNoise = gtsam.noiseModel_Diagonal.Sigmas(nNoiseFactor3)
        for i, pose in enumerate(self.odom_relative):
            graph.add(
                gtsam.BetweenFactorPose2(self.X(i + 1), self.X(i + 2), pose,
                                         odometryNoise))

        # Add visual factors
        visualNoise = gtsam.noiseModel_Diagonal.Sigmas(nNoiseFactor3)
        for i, pose in enumerate(self.visual_relative):
            graph.add(
                gtsam.BetweenFactorPose2(self.X(i + 1), self.X(i + 2), pose,
                                         visualNoise))

        # set initial guess to odometry estimates
        initialEstimate = gtsam.Values()
        for i, pose in enumerate(self.odom_global):
            initialEstimate.insert(self.X(i + 1), pose)

        # optimize using Levenberg-Marquardt optimization
        params = gtsam.LevenbergMarquardtParams()
        params.setVerbosityLM("SUMMARY")
        # gtsam_quadrics.setMaxIterations(params, iter)
        # gtsam_quadrics.setRelativeErrorTol(params, 1e-8)
        # gtsam_quadrics.setAbsoluteErrorTol(params, 1e-8)

        # graph.print_('graph')
        # initialEstimate.print_('initialEstimate ')
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate,
                                                      params)

        # parameters = gtsam.GaussNewtonParams()
        # parameters.relativeErrorTol = 1e-8
        # parameters.maxIterations = 300
        # optimizer = gtsam.GaussNewtonOptimizer(graph, initialEstimate, parameters)

        result = optimizer.optimize()

        # result.print_('result ')
        # self.draw_trajectories([self.odom_global, self.visual_global], ['b', 'r'], 2)

        return self.unwrap_results(result)
Exemplo n.º 6
0
    def optimize(self, verbosity=0):
        params = gtsam.LevenbergMarquardtParams()
        params.setVerbosity(["SILENT", "TERMINATION"][verbosity])
        params.setAbsoluteErrorTol(1e-10)
        params.setRelativeErrorTol(1e-10)
        optimizer = gtsam.LevenbergMarquardtOptimizer(self.gtsam_graph, self.values, params)

        result_values = optimizer.optimize()

        return {name: gtsam2sophus(result_values.atPose3(var)) for name, var in self.vars.items()}
Exemplo n.º 7
0
def build_graph():

    print("build_graph !!!")
    # Create noise models
    ODOMETRY_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2,
                                                                0.1]))
    PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1]))

    # Create an empty nonlinear factor graph
    graph = gtsam.NonlinearFactorGraph()

    # Add a prior on the first pose, setting it to the origin
    # A prior factor consists of a mean and a noise model (covariance matrix)
    priorMean = gtsam.Pose2(0.0, 0.0, 0.0)  # prior at origin
    graph.add(gtsam.PriorFactorPose2(1, priorMean, PRIOR_NOISE))

    # Add odometry factors
    odometry = gtsam.Pose2(2.0, 0.0, 0.0)
    odometry1 = gtsam.Pose2(edge_list[0].position.x, edge_list[0].position.y,
                            edge_list[0].position.z)
    odometry2 = gtsam.Pose2(edge_list[1].position.x, edge_list[1].position.y,
                            edge_list[1].position.z)

    # For simplicity, we will use the same noise model for each odometry factor
    # Create odometry (Between) factors between consecutive poses
    graph.add(gtsam.BetweenFactorPose2(1, 2, odometry1, ODOMETRY_NOISE))
    graph.add(gtsam.BetweenFactorPose2(2, 3, odometry2, ODOMETRY_NOISE))
    print("\nFactor Graph:\n{}".format(graph))

    # Create the data structure to hold the initialEstimate estimate to the solution
    # For illustrative purposes, these have been deliberately set to incorrect values
    initial = gtsam.Values()
    initial.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
    initial.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
    initial.insert(3, gtsam.Pose2(4.1, 0.1, 0.1))
    print("\nInitial Estimate:\n{}".format(initial))

    # optimize using Levenberg-Marquardt optimization
    params = gtsam.LevenbergMarquardtParams()
    optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
    result = optimizer.optimize()
    print("\nFinal Result:\n{}".format(result))

    # 5. Calculate and print marginal covariances for all variables
    marginals = gtsam.Marginals(graph, result)
    for i in range(1, 4):
        print("X{} covariance:\n{}\n".format(i,
                                             marginals.marginalCovariance(i)))

    fig = plt.figure(0)
    for i in range(1, 4):
        gtsam_plot.plot_pose2(0, result.atPose2(i), 0.5,
                              marginals.marginalCovariance(i))
    plt.axis('equal')
    plt.show()
Exemplo n.º 8
0
    def __optimize_factor_graph(self, graph: NonlinearFactorGraph,
                                initial_values: Values) -> Values:
        """Optimize the factor graph."""
        params = gtsam.LevenbergMarquardtParams()
        params.setVerbosityLM("ERROR")
        if self._max_iterations:
            params.setMaxIterations(self._max_iterations)
        lm = gtsam.LevenbergMarquardtOptimizer(graph, initial_values, params)

        result_values = lm.optimize()
        return result_values
    def optimize(self, graph_to_optimize=None):

        # Optimization
        params = gtsam.LevenbergMarquardtParams()
        if graph_to_optimize is None:
            optimizer = gtsam.LevenbergMarquardtOptimizer(
                self.graph, self.initial, params)
        else:
            optimizer = gtsam.LevenbergMarquardtOptimizer(
                graph_to_optimize, self.prop_initial, params)

        self.result = optimizer.optimize()
        self.initial = self.result
        self.resultsFlag = True
Exemplo n.º 10
0
    def test_lm_simple_printing(self):
        """Make sure we are properly terminating LM"""
        def hook(_, error):
            print(error)

        params = gtsam.LevenbergMarquardtParams()
        actual = optimize_using(gtsam.LevenbergMarquardtOptimizer, hook,
                                self.graph, self.initial)
        self.check(actual)
        actual = optimize_using(gtsam.LevenbergMarquardtOptimizer, hook,
                                self.graph, self.initial, params)
        self.check(actual)
        actual = gtsam_optimize(
            gtsam.LevenbergMarquardtOptimizer(self.graph, self.initial,
                                              params), params, hook)
Exemplo n.º 11
0
    def optimize_old(self,
                     graph_to_optimize=None
                     ):  #TODO: Old one; Turn back on if ISAM2 refuses to work.

        # Optimization
        params = gtsam.LevenbergMarquardtParams()
        if graph_to_optimize is None:
            optimizer = gtsam.LevenbergMarquardtOptimizer(
                self.graph, self.initial, params)
        else:
            optimizer = gtsam.LevenbergMarquardtOptimizer(
                graph_to_optimize, self.prop_initial, params)

        self.result = optimizer.optimize()
        self.initial = self.result
        self.resultsFlag = True
Exemplo n.º 12
0
    def test_optimization(self):
        """Tests if a factor graph with a CustomFactor can be properly optimized"""
        gT1 = Pose2(1, 2, np.pi / 2)
        gT2 = Pose2(-1, 4, np.pi)

        expected = Pose2(2, 2, np.pi / 2)

        def error_func(this: CustomFactor, v: gtsam.Values,
                       H: List[np.ndarray]):
            """
            Error function that mimics a BetweenFactor
            :param this: reference to the current CustomFactor being evaluated
            :param v: Values object
            :param H: list of references to the Jacobian arrays
            :return: the non-linear error
            """
            key0 = this.keys()[0]
            key1 = this.keys()[1]
            gT1, gT2 = v.atPose2(key0), v.atPose2(key1)
            error = expected.localCoordinates(gT1.between(gT2))

            if H is not None:
                result = gT1.between(gT2)
                H[0] = -result.inverse().AdjointMap()
                H[1] = np.eye(3)
            return error

        noise_model = gtsam.noiseModel.Unit.Create(3)
        cf = CustomFactor(noise_model, [0, 1], error_func)

        fg = gtsam.NonlinearFactorGraph()
        fg.add(cf)
        fg.add(gtsam.PriorFactorPose2(0, gT1, noise_model))

        v = Values()
        v.insert(0, Pose2(0, 0, 0))
        v.insert(1, Pose2(0, 0, 0))

        params = gtsam.LevenbergMarquardtParams()
        optimizer = gtsam.LevenbergMarquardtOptimizer(fg, v, params)
        result = optimizer.optimize()

        self.gtsamAssertEquals(result.atPose2(0), gT1, tol=1e-5)
        self.gtsamAssertEquals(result.atPose2(1), gT2, tol=1e-5)
Exemplo n.º 13
0
    def optimize(graph, initial_estimate, calibration):

        # create optimizer parameters
        params = gtsam.LevenbergMarquardtParams()
        params.setVerbosityLM("SUMMARY")    # SILENT = 0, SUMMARY, TERMINATION, LAMBDA, TRYLAMBDA, TRYCONFIG, DAMPED, TRYDELTA : VALUES, ERROR 
        params.setMaxIterations(100)
        params.setlambdaInitial(1e-5)
        params.setlambdaUpperBound(1e10)
        params.setlambdaLowerBound(1e-8)
        params.setRelativeErrorTol(1e-5)
        params.setAbsoluteErrorTol(1e-5)
  
        # create optimizer
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate, params)

        # run optimizer
        print('starting optimization')
        estimate = optimizer.optimize()
        print('optimization finished')

        return estimate
Exemplo n.º 14
0
    def compute_control(self, start_state, nominal_states):
        # build graph
        graph = gtsam.NonlinearFactorGraph()
        x0_key = self.state_key(0)
        graph.add(gtd.PriorFactorVector5(x0_key, start_state,
                                         self.prior_noise))
        for i in range(self.N):
            x_curr_key = self.state_key(i)
            x_next_key = self.state_key(i + 1)
            graph.add(
                gtd.CPDynamicsFactor(x_curr_key, x_next_key,
                                     self.transition_noise, self.dt))
            graph.add(gtd.CPStateCostFactor(x_next_key, self.cost_noise))

        # build init values
        init_values = gtsam.Values()
        for i in range(self.N + 1):
            x_key = self.state_key(i)
            init_values.insert(x_key, np.array(nominal_states[i]))

        # optimize
        params = gtsam.LevenbergMarquardtParams()
        params.setMaxIterations(20)
        # params.setVerbosityLM("SUMMARY")
        params.setLinearSolverType("MULTIFRONTAL_QR")
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, init_values,
                                                      params)
        results = optimizer.optimize()
        # print("error:", graph.error(results))

        # get results
        states = []
        for i in range(self.N + 1):
            x_key = self.state_key(i)
            states.append(results.atVector(x_key))

        dx_c = states[1][-1] - states[0][-1]
        u_star = states[0][-1] + 1.1 * dx_c / (20 * self.dt)
        return u_star, states
Exemplo n.º 15
0
    def setUp(self):
        """Set up a small Karcher mean optimization example."""
        # Grabbed from KarcherMeanFactor unit tests.
        R = Rot3.Expmap(np.array([0.1, 0, 0]))
        rotations = {R, R.inverse()}  # mean is the identity
        self.expected = Rot3()

        graph = gtsam.NonlinearFactorGraph()
        for R in rotations:
            graph.add(gtsam.PriorFactorRot3(KEY, R, MODEL))
        initial = gtsam.Values()
        initial.insert(KEY, R)
        self.params = gtsam.GaussNewtonParams()
        self.optimizer = gtsam.GaussNewtonOptimizer(graph, initial,
                                                    self.params)

        self.lmparams = gtsam.LevenbergMarquardtParams()
        self.lmoptimizer = gtsam.LevenbergMarquardtOptimizer(
            graph, initial, self.lmparams)

        # setup output capture
        self.capturedOutput = StringIO()
        sys.stdout = self.capturedOutput
Exemplo n.º 16
0
    def run(self, initial_data: GtsfmData) -> GtsfmData:
        """Run the bundle adjustment by forming factor graph and optimizing using Levenberg–Marquardt optimization.

        Args:
            initial_data: initialized cameras, tracks w/ their 3d landmark from triangulation.

        Results:
            optimized camera poses, 3D point w/ tracks, and error metrics.
        """
        logger.info(
            f"Input: {initial_data.number_tracks()} tracks on {len(initial_data.get_valid_camera_indices())} cameras\n"
        )

        # noise model for measurements -- one pixel in u and v
        measurement_noise = gtsam.noiseModel.Isotropic.Sigma(
            IMG_MEASUREMENT_DIM, 1.0)

        # Create a factor graph
        graph = gtsam.NonlinearFactorGraph()

        # Add measurements to the factor graph
        for j in range(initial_data.number_tracks()):
            track = initial_data.get_track(j)  # SfmTrack
            # retrieve the SfmMeasurement objects
            for m_idx in range(track.number_measurements()):
                # i represents the camera index, and uv is the 2d measurement
                i, uv = track.measurement(m_idx)
                # note use of shorthand symbols C and P
                graph.add(
                    GeneralSFMFactorCal3Bundler(uv, measurement_noise, C(i),
                                                P(j)))

        # get all the valid camera indices, which need to be added to the graph.
        valid_camera_indices = initial_data.get_valid_camera_indices()

        # Add a prior on first pose. This indirectly specifies where the origin is.
        graph.push_back(
            gtsam.PriorFactorPinholeCameraCal3Bundler(
                C(valid_camera_indices[0]),
                initial_data.get_camera(valid_camera_indices[0]),
                gtsam.noiseModel.Isotropic.Sigma(PINHOLE_CAM_CAL3BUNDLER_DOF,
                                                 0.1),
            ))
        # Also add a prior on the position of the first landmark to fix the scale
        graph.push_back(
            gtsam.PriorFactorPoint3(
                P(0),
                initial_data.get_track(0).point3(),
                gtsam.noiseModel.Isotropic.Sigma(POINT3_DOF, 0.1)))

        # Create initial estimate
        initial = gtsam.Values()

        # add each PinholeCameraCal3Bundler
        for i in valid_camera_indices:
            camera = initial_data.get_camera(i)
            initial.insert(C(i), camera)

        # add each SfmTrack
        for j in range(initial_data.number_tracks()):
            track = initial_data.get_track(j)
            initial.insert(P(j), track.point3())

        # Optimize the graph and print results
        try:
            params = gtsam.LevenbergMarquardtParams()
            params.setVerbosityLM("ERROR")
            lm = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
            result_values = lm.optimize()
        except Exception:
            logger.exception("LM Optimization failed")
            # as we did not perform the bundle adjustment, we skip computing the total reprojection error
            return GtsfmData(initial_data.number_images())

        final_error = graph.error(result_values)

        # Error drops from ~2764.22 to ~0.046
        logger.info(f"initial error: {graph.error(initial):.2f}")
        logger.info(f"final error: {final_error:.2f}")

        # construct the results
        optimized_data = values_to_gtsfm_data(result_values, initial_data)

        metrics_dict = {}
        metrics_dict["before_filtering"] = optimized_data.aggregate_metrics()
        logger.info("[Result] Number of tracks before filtering: %d",
                    metrics_dict["before_filtering"]["number_tracks"])

        # filter the largest errors
        filtered_result = optimized_data.filter_landmarks(
            self.output_reproj_error_thresh)

        metrics_dict["after_filtering"] = filtered_result.aggregate_metrics()
        io_utils.save_json_file(
            os.path.join(METRICS_PATH, "bundle_adjustment_metrics.json"),
            metrics_dict)

        logger.info("[Result] Number of tracks after filtering: %d",
                    metrics_dict["after_filtering"]["number_tracks"])
        logger.info(
            "[Result] Mean track length %.3f",
            metrics_dict["after_filtering"]["3d_track_lengths"]["mean"])
        logger.info(
            "[Result] Median track length %.3f",
            metrics_dict["after_filtering"]["3d_track_lengths"]["median"])
        filtered_result.log_scene_reprojection_error_stats()

        return filtered_result
def batch_factorgraph_example():
    # Create an empty nonlinear factor graph.
    graph = gtsam.NonlinearFactorGraph()

    # Create the keys for the poses.
    X1 = X(1)
    X2 = X(2)
    X3 = X(3)
    pose_variables = [X1, X2, X3]

    # Create keys for the landmarks.
    L1 = L(1)
    L2 = L(2)
    landmark_variables = [L1, L2]

    # Add a prior on pose X1 at the origin.
    prior_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.1, 0.1]))
    graph.add(
        gtsam.PriorFactorPose2(X1, gtsam.Pose2(0.0, 0.0, 0.0), prior_noise))

    # Add odometry factors between X1,X2 and X2,X3, respectively
    odometry_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.1,
                                                                0.1]))
    graph.add(
        gtsam.BetweenFactorPose2(X1, X2, gtsam.Pose2(2.0, 0.0, 0.0),
                                 odometry_noise))
    graph.add(
        gtsam.BetweenFactorPose2(X2, X3, gtsam.Pose2(2.0, 0.0, 0.0),
                                 odometry_noise))

    # Add Range-Bearing measurements to two different landmarks L1 and L2
    measurement_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.05, 0.1]))
    graph.add(
        gtsam.BearingRangeFactor2D(X1, L1, gtsam.Rot2.fromDegrees(45),
                                   np.sqrt(4.0 + 4.0), measurement_noise))
    graph.add(
        gtsam.BearingRangeFactor2D(X2, L1, gtsam.Rot2.fromDegrees(90), 2.0,
                                   measurement_noise))
    graph.add(
        gtsam.BearingRangeFactor2D(X3, L2, gtsam.Rot2.fromDegrees(90), 2.0,
                                   measurement_noise))

    # Create (deliberately inaccurate) initial estimate
    initial_estimate = gtsam.Values()
    initial_estimate.insert(X1, gtsam.Pose2(-0.25, 0.20, 0.15))
    initial_estimate.insert(X2, gtsam.Pose2(2.30, 0.10, -0.20))
    initial_estimate.insert(X3, gtsam.Pose2(4.10, 0.10, 0.10))
    initial_estimate.insert(L1, gtsam.Point2(1.80, 2.10))
    initial_estimate.insert(L2, gtsam.Point2(4.10, 1.80))

    # Create an optimizer.
    params = gtsam.LevenbergMarquardtParams()
    optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate,
                                                  params)

    # Solve the MAP problem.
    result = optimizer.optimize()

    # Calculate marginal covariances for all variables.
    marginals = gtsam.Marginals(graph, result)

    # Extract marginals
    pose_marginals = []
    for var in pose_variables:
        pose_marginals.append(
            MultivariateNormalParameters(result.atPose2(var),
                                         marginals.marginalCovariance(var)))

    landmark_marginals = []
    for var in landmark_variables:
        landmark_marginals.append(
            MultivariateNormalParameters(result.atPoint2(var),
                                         marginals.marginalCovariance(var)))

    # You can extract the joint marginals like this.
    joint_all = marginals.jointMarginalCovariance(
        gtsam.KeyVector(pose_variables + landmark_variables))
    print("Joint covariance over all variables:")
    print(joint_all.fullMatrix())

    # Plot the marginals.
    plot_result(pose_marginals, landmark_marginals)
Exemplo n.º 18
0
def run(args):
    """ Run LM optimization with BAL input data and report resulting error """
    input_file = gtsam.findExampleDataFile(args.input_file)

    # Load the SfM data from file
    scene_data = readBal(input_file)
    logging.info(
        f"read {scene_data.number_tracks()} tracks on {scene_data.number_cameras()} cameras\n"
    )

    # Create a factor graph
    graph = gtsam.NonlinearFactorGraph()

    # We share *one* noiseModel between all projection factors
    noise = gtsam.noiseModel.Isotropic.Sigma(2, 1.0)  # one pixel in u and v

    # Add measurements to the factor graph
    j = 0
    for t_idx in range(scene_data.number_tracks()):
        track = scene_data.track(t_idx)  # SfmTrack
        # retrieve the SfmMeasurement objects
        for m_idx in range(track.number_measurements()):
            # i represents the camera index, and uv is the 2d measurement
            i, uv = track.measurement(m_idx)
            # note use of shorthand symbols C and P
            graph.add(GeneralSFMFactorCal3Bundler(uv, noise, C(i), P(j)))
        j += 1

    # Add a prior on pose x1. This indirectly specifies where the origin is.
    graph.push_back(
        gtsam.PriorFactorPinholeCameraCal3Bundler(
            C(0), scene_data.camera(0),
            gtsam.noiseModel.Isotropic.Sigma(9, 0.1)))
    # Also add a prior on the position of the first landmark to fix the scale
    graph.push_back(
        gtsam.PriorFactorPoint3(P(0),
                                scene_data.track(0).point3(),
                                gtsam.noiseModel.Isotropic.Sigma(3, 0.1)))

    # Create initial estimate
    initial = gtsam.Values()

    i = 0
    # add each PinholeCameraCal3Bundler
    for cam_idx in range(scene_data.number_cameras()):
        camera = scene_data.camera(cam_idx)
        initial.insert(C(i), camera)
        i += 1

    j = 0
    # add each SfmTrack
    for t_idx in range(scene_data.number_tracks()):
        track = scene_data.track(t_idx)
        initial.insert(P(j), track.point3())
        j += 1

    # Optimize the graph and print results
    try:
        params = gtsam.LevenbergMarquardtParams()
        params.setVerbosityLM("ERROR")
        lm = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
        result = lm.optimize()
    except Exception as e:
        logging.exception("LM Optimization failed")
        return
    # Error drops from ~2764.22 to ~0.046
    logging.info(f"final error: {graph.error(result)}")
Exemplo n.º 19
0
    def run(self, T=12, compute_covariances=False, verbose=True):
        graph = gtsam.NonlinearFactorGraph()

        # initialize data structure for pre-integrated IMU measurements
        pim = gtsam.PreintegratedImuMeasurements(self.params, self.actualBias)

        # T = 12
        num_poses = T  # assumes 1 factor per second
        initial = gtsam.Values()
        initial.insert(BIAS_KEY, self.actualBias)

        # simulate the loop
        i = 0  # state index
        initial_state_i = self.scenario.navState(0)
        initial.insert(X(i), initial_state_i.pose())
        initial.insert(V(i), initial_state_i.velocity())

        # add prior on beginning
        self.addPrior(0, graph)

        gtNavStates = []
        predictedNavStates = []
        integrationTime = []
        for k, t in enumerate(np.arange(0, T, self.dt)):
            # get measurements and add them to PIM
            measuredOmega = self.runner.measuredAngularVelocity(t)
            measuredAcc = self.runner.measuredSpecificForce(t)
            start = time()
            pim.integrateMeasurement(measuredAcc, measuredOmega, self.dt)
            integrationTime.append(time() - start)
            # Plot IMU many times
            if k % 10 == 0 and not DISABLE_VISUAL:
                self.plotImu(t, measuredOmega, measuredAcc)

            if (k + 1) % int(1 / self.dt) == 0:
                # Plot every second
                gtNavState = self.plotGroundTruthPose(t, scale=0.3)
                gtNavStates.append(gtNavState)
                plt.title("Ground Truth + Estimated Trajectory")

                # create IMU factor every second
                factor = gtsam.ImuFactor(X(i), V(i), X(i + 1), V(i + 1),
                                         BIAS_KEY, pim)
                graph.push_back(factor)

                if verbose:
                    print(factor)
                    print(pim.predict(initial_state_i, self.actualBias))

                pim.resetIntegration()

                rotationNoise = gtsam.Rot3.Expmap(np.random.randn(3) * 0.1 * 1)
                translationNoise = gtsam.Point3(*np.random.randn(3) * 1 * 1)
                poseNoise = gtsam.Pose3(rotationNoise, translationNoise)

                actual_state_i = self.scenario.navState(t + self.dt)
                if not DISABLE_VISUAL:
                    print("Actual state at {0}:\n{1}".format(
                        t + self.dt, actual_state_i))

                noisy_state_i = gtsam.NavState(
                    actual_state_i.pose().compose(poseNoise),
                    actual_state_i.velocity() + np.random.randn(3) * 0.1)

                initial.insert(X(i + 1), noisy_state_i.pose())
                initial.insert(V(i + 1), noisy_state_i.velocity())
                i += 1

        # add priors on end
        self.addPrior(num_poses - 1, graph)

        initial.print_("Initial values:")

        # optimize using Levenberg-Marquardt optimization
        params = gtsam.LevenbergMarquardtParams()
        params.setVerbosityLM("SUMMARY")
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
        result = optimizer.optimize()

        result.print_("Optimized values:")

        if compute_covariances:
            # Calculate and print marginal covariances
            marginals = gtsam.Marginals(graph, result)
            print("Covariance on bias:\n",
                  marginals.marginalCovariance(BIAS_KEY))
            for i in range(num_poses):
                print("Covariance on pose {}:\n{}\n".format(
                    i, marginals.marginalCovariance(X(i))))
                print("Covariance on vel {}:\n{}\n".format(
                    i, marginals.marginalCovariance(V(i))))

        # Plot resulting poses
        i = 0
        while result.exists(X(i)):
            pose_i = result.atPose3(X(i))
            predictedNavStates.append(pose_i)
            if not DISABLE_VISUAL:
                plot_pose3(POSES_FIG, pose_i, 1)
            i += 1
        # plt.title("Estimated Trajectory")

        if not DISABLE_VISUAL:
            gtsam.utils.plot.set_axes_equal(POSES_FIG)

        print("Bias Values", result.atConstantBias(BIAS_KEY))

        ATE = []
        # import ipdb; ipdb.set_trace()
        for gt, pred in zip(gtNavStates, predictedNavStates[1:]):
            delta = gt.inverse().compose(pred)
            ATE.append(np.linalg.norm(delta.Logmap(delta))**2)
        print("ATE={}".format(np.sqrt(np.mean(ATE))))

        print("Run time={}".format(np.median(integrationTime)))
        plt.ioff()
        plt.show()
Exemplo n.º 20
0
def run(slam_request=DEFAULT_REQUEST):
    graph = gtsam.NonlinearFactorGraph()

    # Create the keys corresponding to unknown variables in the factor graph
    unknowns = slam_request.symbols

    # Add a prior on pose X1 at the origin. A prior factor consists of a mean
    # and a noise model
    for prior in slam_request.priors:
        graph.add(prior)

    # Add odometry factors between X1,X2 and X2,X3, respectively
    for factor in slam_request.between_pose_factors:
        graph.add(factor)

    for factor in slam_request.bearing_range_factors:
        graph.add(factor)

    # Print graph
    print("Factor Graph:\n{}".format(graph))

    # Create (deliberately inaccurate) initial estimate
    initial_estimate = gtsam.Values()
    for variable, estimate in slam_request.initial_estimates.iteritems():
        initial_estimate.insert(variable, estimate)

    # Print
    print("Initial Estimate:\n{}".format(initial_estimate))

    # Optimize using Levenberg-Marquardt optimization. The optimizer
    # accepts an optional set of configuration parameters, controlling
    # things like convergence criteria, the type of linear system solver
    # to use, and the amount of information displayed during optimization.
    # Here we will use the default set of parameters.  See the
    # documentation for the full set of parameters.
    params = gtsam.LevenbergMarquardtParams()
    optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate,
                                                  params)
    result = optimizer.optimize()
    print("\nFinal Result:\n{}".format(result))

    output_estimations = {}
    reverse_lookup = dict(
        (value, key) for (key, value) in unknowns.iteritems())

    for variable, initial_estimate in slam_request.initial_estimates.iteritems(
    ):
        if isinstance(initial_estimate, gtsam.Pose2):
            pose = result.atPose2(variable)
            estimation = [pose.x(), pose.y(), pose.theta()]

        elif isinstance(initial_estimate, gtsam.Point2):
            point = result.atPoint2(variable)
            estimation = [point.x(), point.y()]
        else:
            raise Exception("Unsupported type: " + type(variable))

        output_estimations[reverse_lookup[variable]] = estimation

    # Calculate and print marginal covariances for all variables
    marginals = gtsam.Marginals(graph, result)
    covariance_dict = {}

    for (string, variable) in unknowns.iteritems():
        covariance = marginals.marginalCovariance(variable)
        print("{} covariance:\n{}\n".format(string, covariance))
        covariance_dict[string] = covariance

    return PlanarSlamOutput(
        result=output_estimations,
        covariance=covariance_dict,
    )
Exemplo n.º 21
0
    def run(self):
        graph = gtsam.NonlinearFactorGraph()

        # initialize data structure for pre-integrated IMU measurements
        pim = gtsam.PreintegratedImuMeasurements(self.params, self.actualBias)

        H9 = gtsam.OptionalJacobian9()

        T = 12
        num_poses = T + 1  # assumes 1 factor per second
        initial = gtsam.Values()
        initial.insert(BIAS_KEY, self.actualBias)
        for i in range(num_poses):
            state_i = self.scenario.navState(float(i))
            initial.insert(X(i), state_i.pose())
            initial.insert(V(i), state_i.velocity())

        # simulate the loop
        i = 0  # state index
        actual_state_i = self.scenario.navState(0)
        for k, t in enumerate(np.arange(0, T, self.dt)):
            # get measurements and add them to PIM
            measuredOmega = self.runner.measuredAngularVelocity(t)
            measuredAcc = self.runner.measuredSpecificForce(t)
            pim.integrateMeasurement(measuredAcc, measuredOmega, self.dt)

            # Plot IMU many times
            if k % 10 == 0:
                self.plotImu(t, measuredOmega, measuredAcc)

            # Plot every second
            if k % int(1 / self.dt) == 0:
                self.plotGroundTruthPose(t)

            # create IMU factor every second
            if (k + 1) % int(1 / self.dt) == 0:
                factor = gtsam.ImuFactor(X(i), V(i), X(i + 1), V(i + 1),
                                         BIAS_KEY, pim)
                graph.push_back(factor)
                if True:
                    print(factor)
                    H2 = gtsam.OptionalJacobian96()
                    print(pim.predict(actual_state_i, self.actualBias, H9, H2))
                pim.resetIntegration()
                actual_state_i = self.scenario.navState(t + self.dt)
                i += 1

        # add priors on beginning and end
        self.addPrior(0, graph)
        self.addPrior(num_poses - 1, graph)

        # optimize using Levenberg-Marquardt optimization
        params = gtsam.LevenbergMarquardtParams()
        params.setVerbosityLM("SUMMARY")
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
        result = optimizer.optimize()

        # Calculate and print marginal covariances
        marginals = gtsam.Marginals(graph, result)
        print("Covariance on bias:\n", marginals.marginalCovariance(BIAS_KEY))
        for i in range(num_poses):
            print("Covariance on pose {}:\n{}\n".format(
                i, marginals.marginalCovariance(X(i))))
            print("Covariance on vel {}:\n{}\n".format(
                i, marginals.marginalCovariance(V(i))))

        # Plot resulting poses
        i = 0
        while result.exists(X(i)):
            pose_i = result.atPose3(X(i))
            plotPose3(POSES_FIG, pose_i, 0.1)
            i += 1
        print(result.atConstantBias(BIAS_KEY))

        plt.ioff()
        plt.show()
Exemplo n.º 22
0
graph.add(gt.BetweenFactorPose2(8, 2, gt.Pose2(2.9161, 0.0, 0.0), model))
graph.add(gt.BetweenFactorPose2(2, 6, gt.Pose2(-0.3506, 0.0, 0.0), model))
graph.add(gt.BetweenFactorPose2(6, 7, gt.Pose2(-2.1365, 0.0, 0.0), model))
graph.add(gt.BetweenFactorPose2(1, 8, gt.Pose2(0.3261, 0.0, 0.0), model))
graph.add(gt.BetweenFactorPose2(8, 6, gt.Pose2(2.5653, 0.0, 0.0), model))
graph.add(gt.BetweenFactorPose2(5, 6, gt.Pose2(0.8577, 0.0, 0.0), model))
graph.add(gt.BetweenFactorPose2(4, 5, gt.Pose2(0.4074, 0.0, 0.0), model))
graph.add(gt.BetweenFactorPose2(3, 4, gt.Pose2(-0.2153, 0.0, 0.0), model))
graph.add(gt.BetweenFactorPose2(3, 6, gt.Pose2(1.0458, 0.0, 0.0), model))

graph.print("\nFactor Graph\n")

initialEstimate = gt.Values()
initialEstimate.insert(1, gt.Pose2(0.0, 0.0, 0.0))
initialEstimate.insert(2, gt.Pose2(3.2456, 0.0, 0.0))
initialEstimate.insert(3, gt.Pose2(1.8443, 0.0, 0.0))
initialEstimate.insert(4, gt.Pose2(1.6266, 0.0, 0.0))
initialEstimate.insert(5, gt.Pose2(2.034, 0.0, 0.0))
initialEstimate.insert(6, gt.Pose2(2.8917, 0.0, 0.0))
initialEstimate.insert(7, gt.Pose2(0.7579, 0.0, 0.0))
initialEstimate.insert(8, gt.Pose2(0.3261, 0.0, 0.0))
initialEstimate.print("\nInitial Estimate:\n")  # print

parameters = gt.LevenbergMarquardtParams()
parameters.relativeErrorTol = 1e-5
parameters.maxIterations = 100
optimizer = gt.LevenbergMarquardtOptimizer(graph, initialEstimate, parameters)
result = optimizer.optimize()
result.print("Final Result:\n")

Exemplo n.º 23
0
def run(args: argparse.Namespace) -> None:
    """ Run LM optimization with BAL input data and report resulting error """
    input_file = args.input_file

    # Load the SfM data from file
    scene_data = SfmData.FromBalFile(input_file)
    logging.info("read %d tracks on %d cameras\n", scene_data.numberTracks(),
                 scene_data.numberCameras())

    # Create a factor graph
    graph = gtsam.NonlinearFactorGraph()

    # We share *one* noiseModel between all projection factors
    noise = gtsam.noiseModel.Isotropic.Sigma(2, 1.0)  # one pixel in u and v

    # Add measurements to the factor graph
    for j in range(scene_data.numberTracks()):
        track = scene_data.track(j)  # SfmTrack
        # retrieve the SfmMeasurement objects
        for m_idx in range(track.numberMeasurements()):
            # i represents the camera index, and uv is the 2d measurement
            i, uv = track.measurement(m_idx)
            # note use of shorthand symbols C and P
            graph.add(GeneralSFMFactorCal3Bundler(uv, noise, i, P(j)))

    # Add a prior on pose x1. This indirectly specifies where the origin is.
    graph.push_back(
        PriorFactorPinholeCameraCal3Bundler(
            0, scene_data.camera(0), gtsam.noiseModel.Isotropic.Sigma(9, 0.1)))
    # Also add a prior on the position of the first landmark to fix the scale
    graph.push_back(
        PriorFactorPoint3(P(0),
                          scene_data.track(0).point3(),
                          gtsam.noiseModel.Isotropic.Sigma(3, 0.1)))

    # Create initial estimate
    initial = gtsam.Values()

    i = 0
    # add each PinholeCameraCal3Bundler
    for i in range(scene_data.numberCameras()):
        camera = scene_data.camera(i)
        initial.insert(i, camera)
        i += 1

    # add each SfmTrack
    for j in range(scene_data.numberTracks()):
        track = scene_data.track(j)
        initial.insert(P(j), track.point3())

    # Optimize the graph and print results
    try:
        params = gtsam.LevenbergMarquardtParams()
        params.setVerbosityLM("ERROR")
        lm = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
        result = lm.optimize()
    except RuntimeError:
        logging.exception("LM Optimization failed")
        return

    # Error drops from ~2764.22 to ~0.046
    logging.info("initial error: %f", graph.error(initial))
    logging.info("final error: %f", graph.error(result))

    plot_scene(scene_data, result)
Exemplo n.º 24
0
graph = gtsam.NonlinearFactorGraph()

# Add a prior on the first pose, setting it to the origin
# A prior factor consists of a mean and a noise model (covariance matrix)
priorMean = gtsam.Pose2(0.0, 0.0, 0.0)  # prior at origin
priorNoise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1]))
graph.add(gtsam.PriorFactorPose2(1, priorMean, priorNoise))

# Add odometry factors
odometry = gtsam.Pose2(2.0, 0.0, 0.0)
# For simplicity, we will use the same noise model for each odometry factor
odometryNoise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1]))
# Create odometry (Between) factors between consecutive poses
graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, odometryNoise))
graph.add(gtsam.BetweenFactorPose2(2, 3, odometry, odometryNoise))
graph.print("\nFactor Graph:\n")

# Create the data structure to hold the initialEstimate estimate to the solution
# For illustrative purposes, these have been deliberately set to incorrect values
initial = gtsam.Values()
initial.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
initial.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
initial.insert(3, gtsam.Pose2(4.1, 0.1, 0.1))
initial.print("\nInitial Estimate:\n")

# optimize using Levenberg-Marquardt optimization
params = gtsam.LevenbergMarquardtParams()
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
result = optimizer.optimize()
result.print("\nFinal Result:\n")
Exemplo n.º 25
0
def main():
    """Main runner"""

    # Create an empty nonlinear factor graph
    graph = gtsam.NonlinearFactorGraph()

    # Create the keys corresponding to unknown variables in the factor graph
    X1 = X(1)
    X2 = X(2)
    X3 = X(3)
    L1 = L(4)
    L2 = L(5)

    # Add a prior on pose X1 at the origin. A prior factor consists of a mean and a noise model
    graph.add(
        gtsam.PriorFactorPose2(X1, gtsam.Pose2(0.0, 0.0, 0.0), PRIOR_NOISE))

    # Add odometry factors between X1,X2 and X2,X3, respectively
    graph.add(
        gtsam.BetweenFactorPose2(X1, X2, gtsam.Pose2(2.0, 0.0, 0.0),
                                 ODOMETRY_NOISE))
    graph.add(
        gtsam.BetweenFactorPose2(X2, X3, gtsam.Pose2(2.0, 0.0, 0.0),
                                 ODOMETRY_NOISE))

    # Add Range-Bearing measurements to two different landmarks L1 and L2
    graph.add(
        gtsam.BearingRangeFactor2D(X1, L1, gtsam.Rot2.fromDegrees(45),
                                   np.sqrt(4.0 + 4.0), MEASUREMENT_NOISE))
    graph.add(
        gtsam.BearingRangeFactor2D(X2, L1, gtsam.Rot2.fromDegrees(90), 2.0,
                                   MEASUREMENT_NOISE))
    graph.add(
        gtsam.BearingRangeFactor2D(X3, L2, gtsam.Rot2.fromDegrees(90), 2.0,
                                   MEASUREMENT_NOISE))

    # Print graph
    print("Factor Graph:\n{}".format(graph))

    # Create (deliberately inaccurate) initial estimate
    initial_estimate = gtsam.Values()
    initial_estimate.insert(X1, gtsam.Pose2(-0.25, 0.20, 0.15))
    initial_estimate.insert(X2, gtsam.Pose2(2.30, 0.10, -0.20))
    initial_estimate.insert(X3, gtsam.Pose2(4.10, 0.10, 0.10))
    initial_estimate.insert(L1, gtsam.Point2(1.80, 2.10))
    initial_estimate.insert(L2, gtsam.Point2(4.10, 1.80))

    # Print
    print("Initial Estimate:\n{}".format(initial_estimate))

    # Optimize using Levenberg-Marquardt optimization. The optimizer
    # accepts an optional set of configuration parameters, controlling
    # things like convergence criteria, the type of linear system solver
    # to use, and the amount of information displayed during optimization.
    # Here we will use the default set of parameters.  See the
    # documentation for the full set of parameters.
    params = gtsam.LevenbergMarquardtParams()
    optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate,
                                                  params)
    result = optimizer.optimize()
    print("\nFinal Result:\n{}".format(result))

    # Calculate and print marginal covariances for all variables
    marginals = gtsam.Marginals(graph, result)
    for (key, s) in [(X1, "X1"), (X2, "X2"), (X3, "X3"), (L1, "L1"),
                     (L2, "L2")]:
        print("{} covariance:\n{}\n".format(s,
                                            marginals.marginalCovariance(key)))
    for i, imu_measurement in enumerate(imu_measurements):
        if i in preintegration_steps_set:
            predicted_nav_state = current_preintegrated_IMU.predict(
                params.initial_state, params.IMU_bias)
            initial_values.insert(symbol('x', i), predicted_nav_state.pose())
            initial_values.insert(symbol('v', i),
                                  predicted_nav_state.velocity())

        current_preintegrated_IMU.integrateMeasurement(imu_measurements[i, :3],
                                                       imu_measurements[i, 3:],
                                                       params.dt)

    ###############################    Optimize the factor graph   ####################################

    # Use the Levenberg-Marquardt algorithm
    optimization_params = gtsam.LevenbergMarquardtParams()
    optimization_params.setVerbosityLM("SUMMARY")
    optimizer = gtsam.LevenbergMarquardtOptimizer(factor_graph, initial_values,
                                                  optimization_params)
    optimization_result = optimizer.optimize()

    ###############################        Plot the solution       ####################################

    for i in preintegration_steps:
        # Ground truth pose
        plot_pose3(1, gtsam.Pose3(true_poses[i]), 0.3)
        # Estimated pose
        plot_pose3(1, optimization_result.atPose3(symbol('x', i)), 0.1)

    ax = plt.gca()
    ax.set_xlim3d(-5, 5)
Exemplo n.º 27
0
    def run(self, initial_data: SfmData) -> SfmResult:
        """Run the bundle adjustment by forming factor graph and optimizing using Levenberg–Marquardt optimization.

        Args:
            initial_data: initialized cameras, tracks w/ their 3d landmark from triangulation.

        Results:
            optimized camera poses, 3D point w/ tracks, and error metrics.
        """
        logger.info(
            f"Input: {initial_data.number_tracks()} tracks on {initial_data.number_cameras()} cameras\n"
        )

        # noise model for measurements -- one pixel in u and v
        measurement_noise = gtsam.noiseModel.Isotropic.Sigma(
            IMG_MEASUREMENT_DIM, 1.0)

        # Create a factor graph
        graph = gtsam.NonlinearFactorGraph()

        # Add measurements to the factor graph
        for j in range(initial_data.number_tracks()):
            track = initial_data.track(j)  # SfmTrack
            # retrieve the SfmMeasurement objects
            for m_idx in range(track.number_measurements()):
                # i represents the camera index, and uv is the 2d measurement
                i, uv = track.measurement(m_idx)
                # note use of shorthand symbols C and P
                graph.add(
                    GeneralSFMFactorCal3Bundler(uv, measurement_noise, C(i),
                                                P(j)))

        # Add a prior on pose x1. This indirectly specifies where the origin is.
        graph.push_back(
            gtsam.PriorFactorPinholeCameraCal3Bundler(
                C(0),
                initial_data.camera(0),
                gtsam.noiseModel.Isotropic.Sigma(PINHOLE_CAM_CAL3BUNDLER_DOF,
                                                 0.1),
            ))
        # Also add a prior on the position of the first landmark to fix the scale
        graph.push_back(
            gtsam.PriorFactorPoint3(
                P(0),
                initial_data.track(0).point3(),
                gtsam.noiseModel.Isotropic.Sigma(POINT3_DOF, 0.1),
            ))

        # Create initial estimate
        initial = gtsam.Values()

        i = 0
        # add each PinholeCameraCal3Bundler
        for cam_idx in range(initial_data.number_cameras()):
            camera = initial_data.camera(cam_idx)
            initial.insert(C(i), camera)
            i += 1

        j = 0
        # add each SfmTrack
        for t_idx in range(initial_data.number_tracks()):
            track = initial_data.track(t_idx)
            initial.insert(P(j), track.point3())
            j += 1

        # Optimize the graph and print results
        try:
            params = gtsam.LevenbergMarquardtParams()
            params.setVerbosityLM("ERROR")
            lm = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
            result_values = lm.optimize()
        except Exception as e:
            logger.exception("LM Optimization failed")
            return SfmResult(SfmData(), float("Nan"))

        final_error = graph.error(result_values)

        # Error drops from ~2764.22 to ~0.046
        logger.info(f"initial error: {graph.error(initial):.2f}")
        logger.info(f"final error: {final_error:.2f}")

        # construct the results
        optimized_data = values_to_sfm_data(result_values, initial_data)
        sfm_result = SfmResult(optimized_data, final_error)

        return sfm_result