Example #1
0
    def test_SFMExample(self):
        options = generator.Options()
        options.triangle = False
        options.nrCameras = 10

        [data, truth] = generator.generate_data(options)

        measurementNoiseSigma = 1.0
        pointNoiseSigma = 0.1
        poseNoiseSigmas = np.array([0.001, 0.001, 0.001, 0.1, 0.1, 0.1])

        graph = gtsam.NonlinearFactorGraph()

        # Add factors for all measurements
        measurementNoise = Isotropic.Sigma(2, measurementNoiseSigma)
        for i in range(len(data.Z)):
            for k in range(len(data.Z[i])):
                j = data.J[i][k]
                graph.add(gtsam.GenericProjectionFactorCal3_S2(
                    data.Z[i][k], measurementNoise,
                    X(i), P(j), data.K))

        posePriorNoise = Diagonal.Sigmas(poseNoiseSigmas)
        graph.add(gtsam.PriorFactorPose3(X(0),
                                   truth.cameras[0].pose(), posePriorNoise))
        pointPriorNoise = Isotropic.Sigma(3, pointNoiseSigma)
        graph.add(gtsam.PriorFactorPoint3(P(0),
                                    truth.points[0], pointPriorNoise))

        # Initial estimate
        initialEstimate = gtsam.Values()
        for i in range(len(truth.cameras)):
            pose_i = truth.cameras[i].pose()
            initialEstimate.insert(X(i), pose_i)
        for j in range(len(truth.points)):
            point_j = truth.points[j]
            initialEstimate.insert(P(j), point_j)

        # Optimization
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate)
        for i in range(5):
            optimizer.iterate()
        result = optimizer.values()

        # Marginalization
        marginals = gtsam.Marginals(graph, result)
        marginals.marginalCovariance(P(0))
        marginals.marginalCovariance(X(0))

        # Check optimized results, should be equal to ground truth
        for i in range(len(truth.cameras)):
            pose_i = result.atPose3(X(i))
            self.gtsamAssertEquals(pose_i, truth.cameras[i].pose(), 1e-5)

        for j in range(len(truth.points)):
            point_j = result.atPoint3(P(j))
            self.gtsamAssertEquals(point_j, truth.points[j], 1e-5)
Example #2
0
    def trajectory_estimator(self, features, visible_map):
        """ Estimate current pose with matched features through GTSAM and update the trajectory
        Parameters:
            features: A Feature Object. Stores all matched Superpoint features.
            visible_map: A Map Object. Stores all matched Landmark points.
            pose: gtsam.pose3 Object. The pose at the last state from the atrium map trajectory.
        Returns:
            current_pose: gtsam.pose3 Object. The current estimate pose.

        Use input matched features as the projection factors of the graph.  
        Use input visible_map (match landmark points of the map) as the landmark priors of the graph.
        Use the last time step pose from the trajectory as the initial estimate value of the current pose
        """
        assert len(self.atrium_map.trajectory) != 0, "Trajectory is empty."
        pose = self.atrium_map.trajectory[len(self.atrium_map.trajectory)-1]

        # Initialize factor graph
        graph = gtsam.NonlinearFactorGraph()
        initialEstimate = gtsam.Values()

        # Add projection factors with matched feature points for all measurements
        measurementNoiseSigma = 1.0
        measurementNoise = gtsam.noiseModel_Isotropic.Sigma(
            2, measurementNoiseSigma)
        for i, feature in enumerate(features.key_point_list):
            graph.add(gtsam.GenericProjectionFactorCal3_S2(
                feature, measurementNoise,
                X(0), P(i), self.calibration))

        # Create initial estimate for the pose
        # Because the robot moves slowly, we can use the previous pose as an initial estimation of the current pose.
        initialEstimate.insert(X(0), pose)

        # Create priors for visual
        # Because the map is known, we use the landmarks from the visible map with nearly zero error as priors.
        pointPriorNoise = gtsam.noiseModel_Isotropic.Sigma(3, 0.01)
        for i, landmark in enumerate(visible_map.landmark_list):
            
            graph.add(gtsam.PriorFactorPoint3(P(i),
                                              landmark, pointPriorNoise))
            initialEstimate.insert(P(i), landmark)

        # Optimization
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate)
        result = optimizer.optimize()

        # Add current pose to the trajectory
        current_pose = result.atPose3(symbol(ord('x'), 0))
        self.atrium_map.append_trajectory(current_pose)

        return current_pose
Example #3
0
    def pose_estimate(self, observations, estimated_pose):
        """ Estimate current pose with matched features through GTSAM and update the trajectory
        Parameters:
            observations - [(Point2(), Point3())]
            pose - gtsam.pose3 Object. The pose at the last state from the trajectory.
        Returns:
            current_pose - gtsam.pose3 Object. The current estimate pose.
        """
        if len(observations) == 0:
            print("No observation")
        # Initialize factor graph
        graph = gtsam.NonlinearFactorGraph()
        initial_estimate = gtsam.Values()

        measurement_noise_sigma = 1.0
        measurement_noise = gtsam.noiseModel_Isotropic.Sigma(
            2, measurement_noise_sigma)

        # Because the map is known, we use the landmarks from the visible map with nearly zero error as priors.
        point_prior_noise = gtsam.noiseModel_Isotropic.Sigma(3, 0.01)
        for i, observation in enumerate(observations):
            print(i)
            # Add projection factors with matched feature points
            graph.add(
                gtsam.GenericProjectionFactorCal3_S2(observation[0],
                                                     measurement_noise, X(0),
                                                     P(i), self.calibration))
            # Create priors for all observations
            graph.add(
                gtsam.PriorFactorPoint3(P(i), observation[1],
                                        point_prior_noise))
            initial_estimate.insert(P(i), observation[1])
        # Create initial estimate for the pose
        # Because the robot moves slowly, we can use the previous pose as an initial estimation of the current pose.
        initial_estimate.insert(X(0), estimated_pose)

        # Optimization
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate)
        result = optimizer.optimize()
        # Add current pose to the trajectory
        current_pose = result.atPose3(X(0))
        print(current_pose)

        return current_pose
Example #4
0
    def __construct_factor_graph(
        self,
        cameras_to_model: List[int],
        initial_data: GtsfmData,
        absolute_pose_priors: List[Optional[PosePrior]],
        relative_pose_priors: Dict[Tuple[int, int], PosePrior],
    ) -> NonlinearFactorGraph:
        """Construct the factor graph with reprojection factors, BetweenFactors, and prior factors."""
        is_fisheye_calibration = isinstance(
            initial_data.get_camera(cameras_to_model[0]),
            PinholeCameraCal3Fisheye)

        graph = NonlinearFactorGraph()

        # Create a factor graph
        graph.push_back(
            self.__reprojection_factors(
                initial_data=initial_data,
                is_fisheye_calibration=is_fisheye_calibration))
        graph.push_back(
            self._between_factors(relative_pose_priors=relative_pose_priors,
                                  cameras_to_model=cameras_to_model))
        graph.push_back(
            self.__pose_priors(
                absolute_pose_priors=absolute_pose_priors,
                initial_data=initial_data,
                camera_for_origin=cameras_to_model[0],
            ))
        graph.push_back(
            self.__calibration_priors(initial_data, cameras_to_model,
                                      is_fisheye_calibration))

        # Also add a prior on the position of the first landmark to fix the scale
        graph.push_back(
            gtsam.PriorFactorPoint3(
                P(0),
                initial_data.get_track(0).point3(),
                gtsam.noiseModel.Isotropic.Sigma(POINT3_DOF, 0.1)))

        return graph
Example #5
0
    def run(self, initial_data: GtsfmData) -> GtsfmData:
        """Run the bundle adjustment by forming factor graph and optimizing using Levenberg–Marquardt optimization.

        Args:
            initial_data: initialized cameras, tracks w/ their 3d landmark from triangulation.

        Results:
            optimized camera poses, 3D point w/ tracks, and error metrics.
        """
        logger.info(
            f"Input: {initial_data.number_tracks()} tracks on {len(initial_data.get_valid_camera_indices())} cameras\n"
        )

        # noise model for measurements -- one pixel in u and v
        measurement_noise = gtsam.noiseModel.Isotropic.Sigma(
            IMG_MEASUREMENT_DIM, 1.0)

        # Create a factor graph
        graph = gtsam.NonlinearFactorGraph()

        # Add measurements to the factor graph
        for j in range(initial_data.number_tracks()):
            track = initial_data.get_track(j)  # SfmTrack
            # retrieve the SfmMeasurement objects
            for m_idx in range(track.number_measurements()):
                # i represents the camera index, and uv is the 2d measurement
                i, uv = track.measurement(m_idx)
                # note use of shorthand symbols C and P
                graph.add(
                    GeneralSFMFactorCal3Bundler(uv, measurement_noise, C(i),
                                                P(j)))

        # get all the valid camera indices, which need to be added to the graph.
        valid_camera_indices = initial_data.get_valid_camera_indices()

        # Add a prior on first pose. This indirectly specifies where the origin is.
        graph.push_back(
            gtsam.PriorFactorPinholeCameraCal3Bundler(
                C(valid_camera_indices[0]),
                initial_data.get_camera(valid_camera_indices[0]),
                gtsam.noiseModel.Isotropic.Sigma(PINHOLE_CAM_CAL3BUNDLER_DOF,
                                                 0.1),
            ))
        # Also add a prior on the position of the first landmark to fix the scale
        graph.push_back(
            gtsam.PriorFactorPoint3(
                P(0),
                initial_data.get_track(0).point3(),
                gtsam.noiseModel.Isotropic.Sigma(POINT3_DOF, 0.1)))

        # Create initial estimate
        initial = gtsam.Values()

        # add each PinholeCameraCal3Bundler
        for i in valid_camera_indices:
            camera = initial_data.get_camera(i)
            initial.insert(C(i), camera)

        # add each SfmTrack
        for j in range(initial_data.number_tracks()):
            track = initial_data.get_track(j)
            initial.insert(P(j), track.point3())

        # Optimize the graph and print results
        try:
            params = gtsam.LevenbergMarquardtParams()
            params.setVerbosityLM("ERROR")
            lm = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
            result_values = lm.optimize()
        except Exception:
            logger.exception("LM Optimization failed")
            # as we did not perform the bundle adjustment, we skip computing the total reprojection error
            return GtsfmData(initial_data.number_images())

        final_error = graph.error(result_values)

        # Error drops from ~2764.22 to ~0.046
        logger.info(f"initial error: {graph.error(initial):.2f}")
        logger.info(f"final error: {final_error:.2f}")

        # construct the results
        optimized_data = values_to_gtsfm_data(result_values, initial_data)

        metrics_dict = {}
        metrics_dict["before_filtering"] = optimized_data.aggregate_metrics()
        logger.info("[Result] Number of tracks before filtering: %d",
                    metrics_dict["before_filtering"]["number_tracks"])

        # filter the largest errors
        filtered_result = optimized_data.filter_landmarks(
            self.output_reproj_error_thresh)

        metrics_dict["after_filtering"] = filtered_result.aggregate_metrics()
        io_utils.save_json_file(
            os.path.join(METRICS_PATH, "bundle_adjustment_metrics.json"),
            metrics_dict)

        logger.info("[Result] Number of tracks after filtering: %d",
                    metrics_dict["after_filtering"]["number_tracks"])
        logger.info(
            "[Result] Mean track length %.3f",
            metrics_dict["after_filtering"]["3d_track_lengths"]["mean"])
        logger.info(
            "[Result] Median track length %.3f",
            metrics_dict["after_filtering"]["3d_track_lengths"]["median"])
        filtered_result.log_scene_reprojection_error_stats()

        return filtered_result
Example #6
0
def visual_ISAM2_example():
    plt.ion()

    # Define the camera calibration parameters
    K = gtsam.Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0)

    # Define the camera observation noise model
    measurement_noise = gtsam.noiseModel_Isotropic.Sigma(
        2, 1.0)  # one pixel in u and v

    # Create the set of ground-truth landmarks
    points = SFMdata.createPoints()

    # Create the set of ground-truth poses
    poses = SFMdata.createPoses(K)

    # Create an iSAM2 object. Unlike iSAM1, which performs periodic batch steps
    # to maintain proper linearization and efficient variable ordering, iSAM2
    # performs partial relinearization/reordering at each step. A parameter
    # structure is available that allows the user to set various properties, such
    # as the relinearization threshold and type of linear solver. For this
    # example, we we set the relinearization threshold small so the iSAM2 result
    # will approach the batch result.
    parameters = gtsam.ISAM2Params()
    parameters.setRelinearizeThreshold(0.01)
    parameters.setRelinearizeSkip(1)
    isam = gtsam.ISAM2(parameters)

    # Create a Factor Graph and Values to hold the new data
    graph = gtsam.NonlinearFactorGraph()
    initial_estimate = gtsam.Values()

    #  Loop over the different poses, adding the observations to iSAM incrementally
    for i, pose in enumerate(poses):

        # Add factors for each landmark observation
        for j, point in enumerate(points):
            camera = gtsam.PinholeCameraCal3_S2(pose, K)
            measurement = camera.project(point)
            graph.push_back(
                gtsam.GenericProjectionFactorCal3_S2(measurement,
                                                     measurement_noise, X(i),
                                                     L(j), K))

        # Add an initial guess for the current pose
        # Intentionally initialize the variables off from the ground truth
        initial_estimate.insert(
            X(i),
            pose.compose(
                gtsam.Pose3(gtsam.Rot3.Rodrigues(-0.1, 0.2, 0.25),
                            gtsam.Point3(0.05, -0.10, 0.20))))

        # If this is the first iteration, add a prior on the first pose to set the
        # coordinate frame and a prior on the first landmark to set the scale.
        # Also, as iSAM solves incrementally, we must wait until each is observed
        # at least twice before adding it to iSAM.
        if i == 0:
            # Add a prior on pose x0
            pose_noise = gtsam.noiseModel_Diagonal.Sigmas(
                np.array([0.3, 0.3, 0.3, 0.1, 0.1,
                          0.1]))  # 30cm std on x,y,z 0.1 rad on roll,pitch,yaw
            graph.push_back(gtsam.PriorFactorPose3(X(0), poses[0], pose_noise))

            # Add a prior on landmark l0
            point_noise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1)
            graph.push_back(
                gtsam.PriorFactorPoint3(L(0), points[0],
                                        point_noise))  # add directly to graph

            # Add initial guesses to all observed landmarks
            # Intentionally initialize the variables off from the ground truth
            for j, point in enumerate(points):
                initial_estimate.insert(
                    L(j),
                    gtsam.Point3(point.x() - 0.25,
                                 point.y() + 0.20,
                                 point.z() + 0.15))
        else:
            # Update iSAM with the new factors
            isam.update(graph, initial_estimate)
            # Each call to iSAM2 update(*) performs one iteration of the iterative nonlinear solver.
            # If accuracy is desired at the expense of time, update(*) can be called additional
            # times to perform multiple optimizer iterations every step.
            isam.update()
            current_estimate = isam.calculateEstimate()
            print("****************************************************")
            print("Frame", i, ":")
            for j in range(i + 1):
                print(X(j), ":", current_estimate.atPose3(X(j)))

            for j in range(len(points)):
                print(L(j), ":", current_estimate.atPoint3(L(j)))

            visual_ISAM2_plot(current_estimate)

            # Clear the factor graph and values for the next iteration
            graph.resize(0)
            initial_estimate.clear()

    plt.ioff()
    plt.show()
Example #7
0
def initialize(data, truth, options):
    # Initialize iSAM
    params = gtsam.ISAM2Params()
    if options.alwaysRelinearize:
        params.relinearizeSkip = 1
    isam = gtsam.ISAM2(params=params)

    # Add constraints/priors
    # TODO: should not be from ground truth!
    newFactors = gtsam.NonlinearFactorGraph()
    initialEstimates = gtsam.Values()
    for i in range(2):
        ii = symbol('x', i)
        if i == 0:
            if options.hardConstraint:  # add hard constraint
                newFactors.add(
                    gtsam.NonlinearEqualityPose3(ii, truth.cameras[0].pose()))
            else:
                newFactors.add(
                    gtsam.PriorFactorPose3(ii, truth.cameras[i].pose(),
                                           data.noiseModels.posePrior))
        initialEstimates.insert(ii, truth.cameras[i].pose())

    nextPoseIndex = 2

    # Add visual measurement factors from two first poses and initialize
    # observed landmarks
    for i in range(2):
        ii = symbol('x', i)
        for k in range(len(data.Z[i])):
            j = data.J[i][k]
            jj = symbol('l', j)
            newFactors.add(
                gtsam.GenericProjectionFactorCal3_S2(
                    data.Z[i][k], data.noiseModels.measurement, ii, jj,
                    data.K))
            # TODO: initial estimates should not be from ground truth!
            if not initialEstimates.exists(jj):
                initialEstimates.insert(jj, truth.points[j])
            if options.pointPriors:  # add point priors
                newFactors.add(
                    gtsam.PriorFactorPoint3(jj, truth.points[j],
                                            data.noiseModels.pointPrior))

    # Add odometry between frames 0 and 1
    newFactors.add(
        gtsam.BetweenFactorPose3(symbol('x', 0), symbol('x', 1),
                                 data.odometry[1], data.noiseModels.odometry))

    # Update ISAM
    if options.batchInitialization:  # Do a full optimize for first two poses
        batchOptimizer = gtsam.LevenbergMarquardtOptimizer(
            newFactors, initialEstimates)
        fullyOptimized = batchOptimizer.optimize()
        isam.update(newFactors, fullyOptimized)
    else:
        isam.update(newFactors, initialEstimates)

    # figure(1)tic
    # t=toc plot(frame_i,t,'r.') tic
    result = isam.calculateEstimate()
    # t=toc plot(frame_i,t,'g.')

    return isam, result, nextPoseIndex
Example #8
0
def run(args):
    """ Run LM optimization with BAL input data and report resulting error """
    input_file = gtsam.findExampleDataFile(args.input_file)

    # Load the SfM data from file
    scene_data = readBal(input_file)
    logging.info(
        f"read {scene_data.number_tracks()} tracks on {scene_data.number_cameras()} cameras\n"
    )

    # Create a factor graph
    graph = gtsam.NonlinearFactorGraph()

    # We share *one* noiseModel between all projection factors
    noise = gtsam.noiseModel.Isotropic.Sigma(2, 1.0)  # one pixel in u and v

    # Add measurements to the factor graph
    j = 0
    for t_idx in range(scene_data.number_tracks()):
        track = scene_data.track(t_idx)  # SfmTrack
        # retrieve the SfmMeasurement objects
        for m_idx in range(track.number_measurements()):
            # i represents the camera index, and uv is the 2d measurement
            i, uv = track.measurement(m_idx)
            # note use of shorthand symbols C and P
            graph.add(GeneralSFMFactorCal3Bundler(uv, noise, C(i), P(j)))
        j += 1

    # Add a prior on pose x1. This indirectly specifies where the origin is.
    graph.push_back(
        gtsam.PriorFactorPinholeCameraCal3Bundler(
            C(0), scene_data.camera(0),
            gtsam.noiseModel.Isotropic.Sigma(9, 0.1)))
    # Also add a prior on the position of the first landmark to fix the scale
    graph.push_back(
        gtsam.PriorFactorPoint3(P(0),
                                scene_data.track(0).point3(),
                                gtsam.noiseModel.Isotropic.Sigma(3, 0.1)))

    # Create initial estimate
    initial = gtsam.Values()

    i = 0
    # add each PinholeCameraCal3Bundler
    for cam_idx in range(scene_data.number_cameras()):
        camera = scene_data.camera(cam_idx)
        initial.insert(C(i), camera)
        i += 1

    j = 0
    # add each SfmTrack
    for t_idx in range(scene_data.number_tracks()):
        track = scene_data.track(t_idx)
        initial.insert(P(j), track.point3())
        j += 1

    # Optimize the graph and print results
    try:
        params = gtsam.LevenbergMarquardtParams()
        params.setVerbosityLM("ERROR")
        lm = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
        result = lm.optimize()
    except Exception as e:
        logging.exception("LM Optimization failed")
        return
    # Error drops from ~2764.22 to ~0.046
    logging.info(f"final error: {graph.error(result)}")
Example #9
0
    # Add a prior factor on the initial state
    factor_graph.push_back(
        gtsam.PriorFactorPose3(symbol('x', preintegration_steps[0]),
                               params.initial_state.pose(),
                               params.initial_pose_covariance))
    factor_graph.push_back(
        gtsam.PriorFactorVector(symbol('v', preintegration_steps[0]),
                                params.initial_state.velocity(),
                                params.initial_velocity_covariance))
    factor_graph.push_back(
        gtsam.PriorFactorConstantBias(symbol('b', 0), params.IMU_bias,
                                      params.IMU_bias_covariance))

    if USE_VISUAL_FACTORS:
        factor_graph.push_back(
            gtsam.PriorFactorPoint3(symbol('m', 0), gtsam.Point3(1, 0, 0),
                                    gtsam.noiseModel_Isotropic.Sigma(3, 0.05)))

    # Other example priors, e.g. a constraint that the intial and the final states should be roughly identical:
    factor_graph.push_back(
        gtsam.PriorFactorPose3(symbol('x', preintegration_steps[-1]),
                               params.initial_state.pose(),
                               params.initial_pose_covariance))
    factor_graph.push_back(
        gtsam.PriorFactorVector(symbol('v', preintegration_steps[-1]),
                                params.initial_state.velocity(),
                                params.initial_velocity_covariance))

    ####################################     Add visual factors     #######################################

    if USE_VISUAL_FACTORS:
        calibration_matrices = [
Example #10
0
    def run(self, initial_data: SfmData) -> SfmResult:
        """Run the bundle adjustment by forming factor graph and optimizing using Levenberg–Marquardt optimization.

        Args:
            initial_data: initialized cameras, tracks w/ their 3d landmark from triangulation.

        Results:
            optimized camera poses, 3D point w/ tracks, and error metrics.
        """
        logger.info(
            f"Input: {initial_data.number_tracks()} tracks on {initial_data.number_cameras()} cameras\n"
        )

        # noise model for measurements -- one pixel in u and v
        measurement_noise = gtsam.noiseModel.Isotropic.Sigma(
            IMG_MEASUREMENT_DIM, 1.0)

        # Create a factor graph
        graph = gtsam.NonlinearFactorGraph()

        # Add measurements to the factor graph
        for j in range(initial_data.number_tracks()):
            track = initial_data.track(j)  # SfmTrack
            # retrieve the SfmMeasurement objects
            for m_idx in range(track.number_measurements()):
                # i represents the camera index, and uv is the 2d measurement
                i, uv = track.measurement(m_idx)
                # note use of shorthand symbols C and P
                graph.add(
                    GeneralSFMFactorCal3Bundler(uv, measurement_noise, C(i),
                                                P(j)))

        # Add a prior on pose x1. This indirectly specifies where the origin is.
        graph.push_back(
            gtsam.PriorFactorPinholeCameraCal3Bundler(
                C(0),
                initial_data.camera(0),
                gtsam.noiseModel.Isotropic.Sigma(PINHOLE_CAM_CAL3BUNDLER_DOF,
                                                 0.1),
            ))
        # Also add a prior on the position of the first landmark to fix the scale
        graph.push_back(
            gtsam.PriorFactorPoint3(
                P(0),
                initial_data.track(0).point3(),
                gtsam.noiseModel.Isotropic.Sigma(POINT3_DOF, 0.1),
            ))

        # Create initial estimate
        initial = gtsam.Values()

        i = 0
        # add each PinholeCameraCal3Bundler
        for cam_idx in range(initial_data.number_cameras()):
            camera = initial_data.camera(cam_idx)
            initial.insert(C(i), camera)
            i += 1

        j = 0
        # add each SfmTrack
        for t_idx in range(initial_data.number_tracks()):
            track = initial_data.track(t_idx)
            initial.insert(P(j), track.point3())
            j += 1

        # Optimize the graph and print results
        try:
            params = gtsam.LevenbergMarquardtParams()
            params.setVerbosityLM("ERROR")
            lm = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
            result_values = lm.optimize()
        except Exception as e:
            logger.exception("LM Optimization failed")
            return SfmResult(SfmData(), float("Nan"))

        final_error = graph.error(result_values)

        # Error drops from ~2764.22 to ~0.046
        logger.info(f"initial error: {graph.error(initial):.2f}")
        logger.info(f"final error: {final_error:.2f}")

        # construct the results
        optimized_data = values_to_sfm_data(result_values, initial_data)
        sfm_result = SfmResult(optimized_data, final_error)

        return sfm_result