コード例 #1
0
    def optimize(self, graph_to_optimize=None):

        # Optimization
        params = gtsam.LevenbergMarquardtParams()
        if graph_to_optimize is None:
            optimizer = gtsam.LevenbergMarquardtOptimizer(
                self.graph, self.initial, params)
        else:
            optimizer = gtsam.LevenbergMarquardtOptimizer(
                graph_to_optimize, self.prop_initial, params)

        self.result = optimizer.optimize()
        self.initial = self.result
        self.resultsFlag = True
コード例 #2
0
ファイル: test_Pose2SLAMExample.py プロジェクト: pu-wei/gtsam
    def test_Pose2SLAMExample(self):
        # Assumptions
        #  - All values are axis aligned
        #  - Robot poses are facing along the X axis (horizontal, to the right in images)
        #  - We have full odometry for measurements
        #  - The robot is on a grid, moving 2 meters each step

        # Create graph container and add factors to it
        graph = gtsam.NonlinearFactorGraph()

        # Add prior
        # gaussian for prior
        priorMean = gtsam.Pose2(0.0, 0.0, 0.0)  # prior at origin
        priorNoise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3,
                                                                0.1]))
        # add directly to graph
        graph.add(gtsam.PriorFactorPose2(1, priorMean, priorNoise))

        # Add odometry
        # general noisemodel for odometry
        odometryNoise = gtsam.noiseModel.Diagonal.Sigmas(
            np.array([0.2, 0.2, 0.1]))
        graph.add(
            gtsam.BetweenFactorPose2(1, 2, gtsam.Pose2(2.0, 0.0, 0.0),
                                     odometryNoise))
        graph.add(
            gtsam.BetweenFactorPose2(2, 3, gtsam.Pose2(2.0, 0.0, pi / 2),
                                     odometryNoise))
        graph.add(
            gtsam.BetweenFactorPose2(3, 4, gtsam.Pose2(2.0, 0.0, pi / 2),
                                     odometryNoise))
        graph.add(
            gtsam.BetweenFactorPose2(4, 5, gtsam.Pose2(2.0, 0.0, pi / 2),
                                     odometryNoise))

        # Add pose constraint
        model = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1]))
        graph.add(
            gtsam.BetweenFactorPose2(5, 2, gtsam.Pose2(2.0, 0.0, pi / 2),
                                     model))

        # Initialize to noisy points
        initialEstimate = gtsam.Values()
        initialEstimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
        initialEstimate.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
        initialEstimate.insert(3, gtsam.Pose2(4.1, 0.1, pi / 2))
        initialEstimate.insert(4, gtsam.Pose2(4.0, 2.0, pi))
        initialEstimate.insert(5, gtsam.Pose2(2.1, 2.1, -pi / 2))

        # Optimize using Levenberg-Marquardt optimization with an ordering from
        # colamd
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate)
        result = optimizer.optimizeSafely()

        # Plot Covariance Ellipses
        marginals = gtsam.Marginals(graph, result)
        P = marginals.marginalCovariance(1)

        pose_1 = result.atPose2(1)
        self.gtsamAssertEquals(pose_1, gtsam.Pose2(), 1e-4)
コード例 #3
0
def main():
    """Main runner."""
    # Create an empty nonlinear factor graph
    graph = gtsam.NonlinearFactorGraph()

    # Add a prior on the first point, setting it to the origin
    # A prior factor consists of a mean and a noise model (covariance matrix)
    priorMean = gtsam.Pose3()  # prior at origin
    graph.add(gtsam.PriorFactorPose3(1, priorMean, PRIOR_NOISE))

    # Add GPS factors
    gps = gtsam.Point3(lat0, lon0, h0)
    graph.add(gtsam.GPSFactor(1, gps, GPS_NOISE))
    print("\nFactor Graph:\n{}".format(graph))

    # Create the data structure to hold the initialEstimate estimate to the solution
    # For illustrative purposes, these have been deliberately set to incorrect values
    initial = gtsam.Values()
    initial.insert(1, gtsam.Pose3())
    print("\nInitial Estimate:\n{}".format(initial))

    # optimize using Levenberg-Marquardt optimization
    params = gtsam.LevenbergMarquardtParams()
    optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
    result = optimizer.optimize()
    print("\nFinal Result:\n{}".format(result))
コード例 #4
0
    def optimize_old(self,
                     graph_to_optimize=None
                     ):  #TODO: Old one; Turn back on if ISAM2 refuses to work.

        # Optimization
        params = gtsam.LevenbergMarquardtParams()
        if graph_to_optimize is None:
            optimizer = gtsam.LevenbergMarquardtOptimizer(
                self.graph, self.initial, params)
        else:
            optimizer = gtsam.LevenbergMarquardtOptimizer(
                graph_to_optimize, self.prop_initial, params)

        self.result = optimizer.optimize()
        self.initial = self.result
        self.resultsFlag = True
コード例 #5
0
ファイル: ImuFactorExample.py プロジェクト: borglab/gtsam
 def optimize(self, graph: gtsam.NonlinearFactorGraph,
              initial: gtsam.Values):
     """Optimize using Levenberg-Marquardt optimization."""
     params = gtsam.LevenbergMarquardtParams()
     params.setVerbosityLM("SUMMARY")
     optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
     result = optimizer.optimize()
     return result
コード例 #6
0
    def estimate_global(self):
        np.random.seed(2)
        n0 = 0.000001
        n03 = np.array([n0, n0, n0])
        nNoiseFactor3 = np.array(
            [0.2, 0.2,
             0.2])  # TODO: something about floats and major row? check cython

        # Create an empty nonlinear factor graph
        graph = gtsam.NonlinearFactorGraph()

        # Add a prior on the first pose, setting it to the origin
        priorMean = self.odom_global[0]
        priorNoise = gtsam.noiseModel_Diagonal.Sigmas(n03)
        graph.add(gtsam.PriorFactorPose2(self.X(1), priorMean, priorNoise))

        # Add odometry factors
        odometryNoise = gtsam.noiseModel_Diagonal.Sigmas(nNoiseFactor3)
        for i, pose in enumerate(self.odom_relative):
            graph.add(
                gtsam.BetweenFactorPose2(self.X(i + 1), self.X(i + 2), pose,
                                         odometryNoise))

        # Add visual factors
        visualNoise = gtsam.noiseModel_Diagonal.Sigmas(nNoiseFactor3)
        for i, pose in enumerate(self.visual_relative):
            graph.add(
                gtsam.BetweenFactorPose2(self.X(i + 1), self.X(i + 2), pose,
                                         visualNoise))

        # set initial guess to odometry estimates
        initialEstimate = gtsam.Values()
        for i, pose in enumerate(self.odom_global):
            initialEstimate.insert(self.X(i + 1), pose)

        # optimize using Levenberg-Marquardt optimization
        params = gtsam.LevenbergMarquardtParams()
        params.setVerbosityLM("SUMMARY")
        # gtsam_quadrics.setMaxIterations(params, iter)
        # gtsam_quadrics.setRelativeErrorTol(params, 1e-8)
        # gtsam_quadrics.setAbsoluteErrorTol(params, 1e-8)

        # graph.print_('graph')
        # initialEstimate.print_('initialEstimate ')
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate,
                                                      params)

        # parameters = gtsam.GaussNewtonParams()
        # parameters.relativeErrorTol = 1e-8
        # parameters.maxIterations = 300
        # optimizer = gtsam.GaussNewtonOptimizer(graph, initialEstimate, parameters)

        result = optimizer.optimize()

        # result.print_('result ')
        # self.draw_trajectories([self.odom_global, self.visual_global], ['b', 'r'], 2)

        return self.unwrap_results(result)
コード例 #7
0
ファイル: test_SFMExample.py プロジェクト: pu-wei/gtsam
    def test_SFMExample(self):
        options = generator.Options()
        options.triangle = False
        options.nrCameras = 10

        [data, truth] = generator.generate_data(options)

        measurementNoiseSigma = 1.0
        pointNoiseSigma = 0.1
        poseNoiseSigmas = np.array([0.001, 0.001, 0.001, 0.1, 0.1, 0.1])

        graph = gtsam.NonlinearFactorGraph()

        # Add factors for all measurements
        measurementNoise = Isotropic.Sigma(2, measurementNoiseSigma)
        for i in range(len(data.Z)):
            for k in range(len(data.Z[i])):
                j = data.J[i][k]
                graph.add(gtsam.GenericProjectionFactorCal3_S2(
                    data.Z[i][k], measurementNoise,
                    X(i), P(j), data.K))

        posePriorNoise = Diagonal.Sigmas(poseNoiseSigmas)
        graph.add(gtsam.PriorFactorPose3(X(0),
                                   truth.cameras[0].pose(), posePriorNoise))
        pointPriorNoise = Isotropic.Sigma(3, pointNoiseSigma)
        graph.add(gtsam.PriorFactorPoint3(P(0),
                                    truth.points[0], pointPriorNoise))

        # Initial estimate
        initialEstimate = gtsam.Values()
        for i in range(len(truth.cameras)):
            pose_i = truth.cameras[i].pose()
            initialEstimate.insert(X(i), pose_i)
        for j in range(len(truth.points)):
            point_j = truth.points[j]
            initialEstimate.insert(P(j), point_j)

        # Optimization
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate)
        for i in range(5):
            optimizer.iterate()
        result = optimizer.values()

        # Marginalization
        marginals = gtsam.Marginals(graph, result)
        marginals.marginalCovariance(P(0))
        marginals.marginalCovariance(X(0))

        # Check optimized results, should be equal to ground truth
        for i in range(len(truth.cameras)):
            pose_i = result.atPose3(X(i))
            self.gtsamAssertEquals(pose_i, truth.cameras[i].pose(), 1e-5)

        for j in range(len(truth.points)):
            point_j = result.atPoint3(P(j))
            self.gtsamAssertEquals(point_j, truth.points[j], 1e-5)
コード例 #8
0
    def bundle_adjustment(self):
        """
        Parameters:
            calibration - gtsam.Cal3_S2, camera calibration
            landmark_map - list, A map of landmarks and their correspondence
        """
        # Initialize factor Graph
        graph = gtsam.NonlinearFactorGraph()

        initial_estimate = self.create_initial_estimate()

        # Create Projection Factors
        # """
        #   Measurement noise for bundle adjustment:
        #   sigma = 1.0
        #   measurement_noise = gtsam.noiseModel_Isotropic.Sigma(2, sigma)
        # """
        for landmark_idx, observation_list in enumerate(self._landmark_map):
            for obersvation in observation_list:
                pose_idx = obersvation[0]
                key_point = obersvation[1]
                # To test indeterminate system
                # if(landmark_idx == 2 or landmark_idx == 480):
                #     continue
                # if(pose_idx == 6):
                #     print("yes2")
                #     continue
                graph.add(
                    gtsam.GenericProjectionFactorCal3_S2(
                        key_point, self._measurement_noise, X(pose_idx),
                        P(landmark_idx), self._calibration))

        # Create priors for first two poses
        # """
        #   Pose prior noise:
        #   rotation_sigma = np.radians(60)
        #   translation_sigma = 1
        #   pose_noise_sigmas = np.array([rotation_sigma, rotation_sigma, rotation_sigma,
        #                             translation_sigma, translation_sigma, translation_sigma])
        # """
        for idx in (0, 1):
            pose_i = initial_estimate.atPose3(X(idx))
            graph.add(
                gtsam.PriorFactorPose3(X(idx), pose_i, self._pose_prior_noise))

        # Optimization
        # Using QR rather than Cholesky decomposition
        # params = gtsam.LevenbergMarquardtParams()
        # params.setLinearSolverType("MULTIFRONTAL_QR")
        # optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate, params)
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate)

        sfm_result = optimizer.optimize()
        # Check if factor covariances are under constrain
        marginals = gtsam.Marginals(  # pylint: disable=unused-variable
            graph, sfm_result)
        return sfm_result
コード例 #9
0
ファイル: graph.py プロジェクト: Mario-Kart-Felix/droidlet
    def optimize(self, verbosity=0):
        params = gtsam.LevenbergMarquardtParams()
        params.setVerbosity(["SILENT", "TERMINATION"][verbosity])
        params.setAbsoluteErrorTol(1e-10)
        params.setRelativeErrorTol(1e-10)
        optimizer = gtsam.LevenbergMarquardtOptimizer(self.gtsam_graph, self.values, params)

        result_values = optimizer.optimize()

        return {name: gtsam2sophus(result_values.atPose3(var)) for name, var in self.vars.items()}
コード例 #10
0
def build_graph():

    print("build_graph !!!")
    # Create noise models
    ODOMETRY_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2,
                                                                0.1]))
    PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1]))

    # Create an empty nonlinear factor graph
    graph = gtsam.NonlinearFactorGraph()

    # Add a prior on the first pose, setting it to the origin
    # A prior factor consists of a mean and a noise model (covariance matrix)
    priorMean = gtsam.Pose2(0.0, 0.0, 0.0)  # prior at origin
    graph.add(gtsam.PriorFactorPose2(1, priorMean, PRIOR_NOISE))

    # Add odometry factors
    odometry = gtsam.Pose2(2.0, 0.0, 0.0)
    odometry1 = gtsam.Pose2(edge_list[0].position.x, edge_list[0].position.y,
                            edge_list[0].position.z)
    odometry2 = gtsam.Pose2(edge_list[1].position.x, edge_list[1].position.y,
                            edge_list[1].position.z)

    # For simplicity, we will use the same noise model for each odometry factor
    # Create odometry (Between) factors between consecutive poses
    graph.add(gtsam.BetweenFactorPose2(1, 2, odometry1, ODOMETRY_NOISE))
    graph.add(gtsam.BetweenFactorPose2(2, 3, odometry2, ODOMETRY_NOISE))
    print("\nFactor Graph:\n{}".format(graph))

    # Create the data structure to hold the initialEstimate estimate to the solution
    # For illustrative purposes, these have been deliberately set to incorrect values
    initial = gtsam.Values()
    initial.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
    initial.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
    initial.insert(3, gtsam.Pose2(4.1, 0.1, 0.1))
    print("\nInitial Estimate:\n{}".format(initial))

    # optimize using Levenberg-Marquardt optimization
    params = gtsam.LevenbergMarquardtParams()
    optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
    result = optimizer.optimize()
    print("\nFinal Result:\n{}".format(result))

    # 5. Calculate and print marginal covariances for all variables
    marginals = gtsam.Marginals(graph, result)
    for i in range(1, 4):
        print("X{} covariance:\n{}\n".format(i,
                                             marginals.marginalCovariance(i)))

    fig = plt.figure(0)
    for i in range(1, 4):
        gtsam_plot.plot_pose2(0, result.atPose2(i), 0.5,
                              marginals.marginalCovariance(i))
    plt.axis('equal')
    plt.show()
コード例 #11
0
ファイル: bundle_adjustment.py プロジェクト: borglab/gtsfm
    def __optimize_factor_graph(self, graph: NonlinearFactorGraph,
                                initial_values: Values) -> Values:
        """Optimize the factor graph."""
        params = gtsam.LevenbergMarquardtParams()
        params.setVerbosityLM("ERROR")
        if self._max_iterations:
            params.setMaxIterations(self._max_iterations)
        lm = gtsam.LevenbergMarquardtOptimizer(graph, initial_values, params)

        result_values = lm.optimize()
        return result_values
コード例 #12
0
    def trajectory_estimator(self, features, visible_map):
        """ Estimate current pose with matched features through GTSAM and update the trajectory
        Parameters:
            features: A Feature Object. Stores all matched Superpoint features.
            visible_map: A Map Object. Stores all matched Landmark points.
            pose: gtsam.pose3 Object. The pose at the last state from the atrium map trajectory.
        Returns:
            current_pose: gtsam.pose3 Object. The current estimate pose.

        Use input matched features as the projection factors of the graph.  
        Use input visible_map (match landmark points of the map) as the landmark priors of the graph.
        Use the last time step pose from the trajectory as the initial estimate value of the current pose
        """
        assert len(self.atrium_map.trajectory) != 0, "Trajectory is empty."
        pose = self.atrium_map.trajectory[len(self.atrium_map.trajectory)-1]

        # Initialize factor graph
        graph = gtsam.NonlinearFactorGraph()
        initialEstimate = gtsam.Values()

        # Add projection factors with matched feature points for all measurements
        measurementNoiseSigma = 1.0
        measurementNoise = gtsam.noiseModel_Isotropic.Sigma(
            2, measurementNoiseSigma)
        for i, feature in enumerate(features.key_point_list):
            graph.add(gtsam.GenericProjectionFactorCal3_S2(
                feature, measurementNoise,
                X(0), P(i), self.calibration))

        # Create initial estimate for the pose
        # Because the robot moves slowly, we can use the previous pose as an initial estimation of the current pose.
        initialEstimate.insert(X(0), pose)

        # Create priors for visual
        # Because the map is known, we use the landmarks from the visible map with nearly zero error as priors.
        pointPriorNoise = gtsam.noiseModel_Isotropic.Sigma(3, 0.01)
        for i, landmark in enumerate(visible_map.landmark_list):
            
            graph.add(gtsam.PriorFactorPoint3(P(i),
                                              landmark, pointPriorNoise))
            initialEstimate.insert(P(i), landmark)

        # Optimization
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate)
        result = optimizer.optimize()

        # Add current pose to the trajectory
        current_pose = result.atPose3(symbol(ord('x'), 0))
        self.atrium_map.append_trajectory(current_pose)

        return current_pose
コード例 #13
0
    def test_lm_simple_printing(self):
        """Make sure we are properly terminating LM"""
        def hook(_, error):
            print(error)

        params = gtsam.LevenbergMarquardtParams()
        actual = optimize_using(gtsam.LevenbergMarquardtOptimizer, hook,
                                self.graph, self.initial)
        self.check(actual)
        actual = optimize_using(gtsam.LevenbergMarquardtOptimizer, hook,
                                self.graph, self.initial, params)
        self.check(actual)
        actual = gtsam_optimize(
            gtsam.LevenbergMarquardtOptimizer(self.graph, self.initial,
                                              params), params, hook)
コード例 #14
0
    def test_Pose3SLAMExample(self) -> None:
        # Create a hexagon of poses
        hexagon = circlePose3(numPoses=6, radius=1.0)
        p0 = hexagon.atPose3(0)
        p1 = hexagon.atPose3(1)

        # create a Pose graph with one equality constraint and one measurement
        fg = gtsam.NonlinearFactorGraph()
        fg.add(gtsam.NonlinearEqualityPose3(0, p0))
        delta = p0.between(p1)
        covariance = gtsam.noiseModel.Diagonal.Sigmas(
            np.array([
                0.05, 0.05, 0.05,
                np.deg2rad(5.),
                np.deg2rad(5.),
                np.deg2rad(5.)
            ]))
        fg.add(gtsam.BetweenFactorPose3(0, 1, delta, covariance))
        fg.add(gtsam.BetweenFactorPose3(1, 2, delta, covariance))
        fg.add(gtsam.BetweenFactorPose3(2, 3, delta, covariance))
        fg.add(gtsam.BetweenFactorPose3(3, 4, delta, covariance))
        fg.add(gtsam.BetweenFactorPose3(4, 5, delta, covariance))
        fg.add(gtsam.BetweenFactorPose3(5, 0, delta, covariance))

        # Create initial config
        initial = gtsam.Values()
        s = 0.10
        initial.insert(0, p0)
        initial.insert(1,
                       hexagon.atPose3(1).retract(s * np.random.randn(6, 1)))
        initial.insert(2,
                       hexagon.atPose3(2).retract(s * np.random.randn(6, 1)))
        initial.insert(3,
                       hexagon.atPose3(3).retract(s * np.random.randn(6, 1)))
        initial.insert(4,
                       hexagon.atPose3(4).retract(s * np.random.randn(6, 1)))
        initial.insert(5,
                       hexagon.atPose3(5).retract(s * np.random.randn(6, 1)))

        # optimize
        optimizer = gtsam.LevenbergMarquardtOptimizer(fg, initial)
        result = optimizer.optimizeSafely()

        pose_1 = result.atPose3(1)
        self.gtsamAssertEquals(pose_1, p1, 1e-4)
コード例 #15
0
    def bundle_adjustment(self):
        """
        Bundle Adjustment.
        Input:
            self._image_features
            self._image_points
        Output:
            result - gtsam optimzation result
        """
        # Initialize factor Graph
        graph = gtsam.NonlinearFactorGraph()

        pose_indices, point_indices = self.create_index_sets()

        initial_estimate = self.create_initial_estimate(pose_indices)

        # Create Projection Factor
        sigma = 1.0
        measurementNoise = gtsam.noiseModel_Isotropic.Sigma(2, sigma)
        for img_idx, features in enumerate(self._image_features):
            for kp_idx, keypoint in enumerate(features.keypoints):
                if self.image_points[img_idx].kp_3d_exist(kp_idx):
                    landmark_id = self.image_points[img_idx].kp_3d(kp_idx)
                    if self._landmark_estimates[
                            landmark_id].seen >= self._min_landmark_seen:
                        key_point = Point2(keypoint[0], keypoint[1])
                        graph.add(
                            gtsam.GenericProjectionFactorCal3_S2(
                                key_point, measurementNoise, X(img_idx),
                                P(landmark_id), self._calibration))

        # Create priors for first two poses
        s = np.radians(60)
        poseNoiseSigmas = np.array([s, s, s, 1, 1, 1])
        posePriorNoise = gtsam.noiseModel_Diagonal.Sigmas(poseNoiseSigmas)
        for idx in (0, 1):
            pose_i = initial_estimate.atPose3(X(idx))
            graph.add(gtsam.PriorFactorPose3(X(idx), pose_i, posePriorNoise))

        # Optimization
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate)
        sfm_result = optimizer.optimize()

        return sfm_result, pose_indices, point_indices
コード例 #16
0
    def pose_estimate(self, observations, estimated_pose):
        """ Estimate current pose with matched features through GTSAM and update the trajectory
        Parameters:
            observations - [(Point2(), Point3())]
            pose - gtsam.pose3 Object. The pose at the last state from the trajectory.
        Returns:
            current_pose - gtsam.pose3 Object. The current estimate pose.
        """
        if len(observations) == 0:
            print("No observation")
        # Initialize factor graph
        graph = gtsam.NonlinearFactorGraph()
        initial_estimate = gtsam.Values()

        measurement_noise_sigma = 1.0
        measurement_noise = gtsam.noiseModel_Isotropic.Sigma(
            2, measurement_noise_sigma)

        # Because the map is known, we use the landmarks from the visible map with nearly zero error as priors.
        point_prior_noise = gtsam.noiseModel_Isotropic.Sigma(3, 0.01)
        for i, observation in enumerate(observations):
            print(i)
            # Add projection factors with matched feature points
            graph.add(
                gtsam.GenericProjectionFactorCal3_S2(observation[0],
                                                     measurement_noise, X(0),
                                                     P(i), self.calibration))
            # Create priors for all observations
            graph.add(
                gtsam.PriorFactorPoint3(P(i), observation[1],
                                        point_prior_noise))
            initial_estimate.insert(P(i), observation[1])
        # Create initial estimate for the pose
        # Because the robot moves slowly, we can use the previous pose as an initial estimation of the current pose.
        initial_estimate.insert(X(0), estimated_pose)

        # Optimization
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate)
        result = optimizer.optimize()
        # Add current pose to the trajectory
        current_pose = result.atPose3(X(0))
        print(current_pose)

        return current_pose
コード例 #17
0
ファイル: test_custom_factor.py プロジェクト: borglab/gtsam
    def test_optimization(self):
        """Tests if a factor graph with a CustomFactor can be properly optimized"""
        gT1 = Pose2(1, 2, np.pi / 2)
        gT2 = Pose2(-1, 4, np.pi)

        expected = Pose2(2, 2, np.pi / 2)

        def error_func(this: CustomFactor, v: gtsam.Values,
                       H: List[np.ndarray]):
            """
            Error function that mimics a BetweenFactor
            :param this: reference to the current CustomFactor being evaluated
            :param v: Values object
            :param H: list of references to the Jacobian arrays
            :return: the non-linear error
            """
            key0 = this.keys()[0]
            key1 = this.keys()[1]
            gT1, gT2 = v.atPose2(key0), v.atPose2(key1)
            error = expected.localCoordinates(gT1.between(gT2))

            if H is not None:
                result = gT1.between(gT2)
                H[0] = -result.inverse().AdjointMap()
                H[1] = np.eye(3)
            return error

        noise_model = gtsam.noiseModel.Unit.Create(3)
        cf = CustomFactor(noise_model, [0, 1], error_func)

        fg = gtsam.NonlinearFactorGraph()
        fg.add(cf)
        fg.add(gtsam.PriorFactorPose2(0, gT1, noise_model))

        v = Values()
        v.insert(0, Pose2(0, 0, 0))
        v.insert(1, Pose2(0, 0, 0))

        params = gtsam.LevenbergMarquardtParams()
        optimizer = gtsam.LevenbergMarquardtOptimizer(fg, v, params)
        result = optimizer.optimize()

        self.gtsamAssertEquals(result.atPose2(0), gT1, tol=1e-5)
        self.gtsamAssertEquals(result.atPose2(1), gT2, tol=1e-5)
コード例 #18
0
    def test_convertNonlinear(self):
        """Test converting a linear factor graph to a nonlinear one"""
        graph, X = create_graph()

        EXPECTEDM = [1, 2, 3]

        # create nonlinear factor graph for marginalization
        nfg = gtsam.LinearContainerFactor.ConvertLinearGraph(graph)
        optimizer = gtsam.LevenbergMarquardtOptimizer(nfg, gtsam.Values())
        nlresult = optimizer.optimizeSafely()

        # marginalize
        marginals = gtsam.Marginals(nfg, nlresult)
        m = [marginals.marginalCovariance(x) for x in X]

        # check linear marginalizations
        self.assertAlmostEqual(EXPECTEDM[0], m[0], delta=1e-8)
        self.assertAlmostEqual(EXPECTEDM[1], m[1], delta=1e-8)
        self.assertAlmostEqual(EXPECTEDM[2], m[2], delta=1e-8)
コード例 #19
0
    def test_LocalizationExample(self):
        # Create the graph (defined in pose2SLAM.h, derived from
        # NonlinearFactorGraph)
        graph = gtsam.NonlinearFactorGraph()

        # Add two odometry factors
        # create a measurement for both factors (the same in this case)
        odometry = gtsam.Pose2(2.0, 0.0, 0.0)
        odometryNoise = gtsam.noiseModel.Diagonal.Sigmas(
            np.array([0.2, 0.2, 0.1]))  # 20cm std on x,y, 0.1 rad on theta
        graph.add(gtsam.BetweenFactorPose2(0, 1, odometry, odometryNoise))
        graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, odometryNoise))

        # Add three "GPS" measurements
        # We use Pose2 Priors here with high variance on theta
        groundTruth = gtsam.Values()
        groundTruth.insert(0, gtsam.Pose2(0.0, 0.0, 0.0))
        groundTruth.insert(1, gtsam.Pose2(2.0, 0.0, 0.0))
        groundTruth.insert(2, gtsam.Pose2(4.0, 0.0, 0.0))
        model = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.1, 10.]))
        for i in range(3):
            graph.add(gtsam.PriorFactorPose2(i, groundTruth.atPose2(i), model))

        # Initialize to noisy points
        initialEstimate = gtsam.Values()
        initialEstimate.insert(0, gtsam.Pose2(0.5, 0.0, 0.2))
        initialEstimate.insert(1, gtsam.Pose2(2.3, 0.1, -0.2))
        initialEstimate.insert(2, gtsam.Pose2(4.1, 0.1, 0.1))

        # Optimize using Levenberg-Marquardt optimization with an ordering from
        # colamd
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate)
        result = optimizer.optimizeSafely()

        # Plot Covariance Ellipses
        marginals = gtsam.Marginals(graph, result)
        P = [None] * result.size()
        for i in range(0, result.size()):
            pose_i = result.atPose2(i)
            self.gtsamAssertEquals(pose_i, groundTruth.atPose2(i), 1e-4)
            P[i] = marginals.marginalCovariance(i)
コード例 #20
0
    def optimize(graph, initial_estimate, calibration):

        # create optimizer parameters
        params = gtsam.LevenbergMarquardtParams()
        params.setVerbosityLM("SUMMARY")    # SILENT = 0, SUMMARY, TERMINATION, LAMBDA, TRYLAMBDA, TRYCONFIG, DAMPED, TRYDELTA : VALUES, ERROR 
        params.setMaxIterations(100)
        params.setlambdaInitial(1e-5)
        params.setlambdaUpperBound(1e10)
        params.setlambdaLowerBound(1e-8)
        params.setRelativeErrorTol(1e-5)
        params.setAbsoluteErrorTol(1e-5)
  
        # create optimizer
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate, params)

        # run optimizer
        print('starting optimization')
        estimate = optimizer.optimize()
        print('optimization finished')

        return estimate
コード例 #21
0
    def compute_control(self, start_state, nominal_states):
        # build graph
        graph = gtsam.NonlinearFactorGraph()
        x0_key = self.state_key(0)
        graph.add(gtd.PriorFactorVector5(x0_key, start_state,
                                         self.prior_noise))
        for i in range(self.N):
            x_curr_key = self.state_key(i)
            x_next_key = self.state_key(i + 1)
            graph.add(
                gtd.CPDynamicsFactor(x_curr_key, x_next_key,
                                     self.transition_noise, self.dt))
            graph.add(gtd.CPStateCostFactor(x_next_key, self.cost_noise))

        # build init values
        init_values = gtsam.Values()
        for i in range(self.N + 1):
            x_key = self.state_key(i)
            init_values.insert(x_key, np.array(nominal_states[i]))

        # optimize
        params = gtsam.LevenbergMarquardtParams()
        params.setMaxIterations(20)
        # params.setVerbosityLM("SUMMARY")
        params.setLinearSolverType("MULTIFRONTAL_QR")
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, init_values,
                                                      params)
        results = optimizer.optimize()
        # print("error:", graph.error(results))

        # get results
        states = []
        for i in range(self.N + 1):
            x_key = self.state_key(i)
            states.append(results.atVector(x_key))

        dx_c = states[1][-1] - states[0][-1]
        u_star = states[0][-1] + 1.1 * dx_c / (20 * self.dt)
        return u_star, states
コード例 #22
0
    def setUp(self):
        """Set up a small Karcher mean optimization example."""
        # Grabbed from KarcherMeanFactor unit tests.
        R = Rot3.Expmap(np.array([0.1, 0, 0]))
        rotations = {R, R.inverse()}  # mean is the identity
        self.expected = Rot3()

        graph = gtsam.NonlinearFactorGraph()
        for R in rotations:
            graph.add(gtsam.PriorFactorRot3(KEY, R, MODEL))
        initial = gtsam.Values()
        initial.insert(KEY, R)
        self.params = gtsam.GaussNewtonParams()
        self.optimizer = gtsam.GaussNewtonOptimizer(graph, initial,
                                                    self.params)

        self.lmparams = gtsam.LevenbergMarquardtParams()
        self.lmoptimizer = gtsam.LevenbergMarquardtOptimizer(
            graph, initial, self.lmparams)

        # setup output capture
        self.capturedOutput = StringIO()
        sys.stdout = self.capturedOutput
コード例 #23
0
    def test_OdometryExample(self):
        # Create the graph (defined in pose2SLAM.h, derived from
        # NonlinearFactorGraph)
        graph = gtsam.NonlinearFactorGraph()

        # Add a Gaussian prior on pose x_1
        priorMean = gtsam.Pose2(0.0, 0.0, 0.0)  # prior mean is at origin
        priorNoise = gtsam.noiseModel_Diagonal.Sigmas(np.array(
            [0.3, 0.3, 0.1]))  # 30cm std on x,y, 0.1 rad on theta
        # add directly to graph
        graph.add(gtsam.PriorFactorPose2(1, priorMean, priorNoise))

        # Add two odometry factors
        # create a measurement for both factors (the same in this case)
        odometry = gtsam.Pose2(2.0, 0.0, 0.0)
        odometryNoise = gtsam.noiseModel_Diagonal.Sigmas(
            np.array([0.2, 0.2, 0.1]))  # 20cm std on x,y, 0.1 rad on theta
        graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, odometryNoise))
        graph.add(gtsam.BetweenFactorPose2(2, 3, odometry, odometryNoise))

        # Initialize to noisy points
        initialEstimate = gtsam.Values()
        initialEstimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
        initialEstimate.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
        initialEstimate.insert(3, gtsam.Pose2(4.1, 0.1, 0.1))

        # Optimize using Levenberg-Marquardt optimization with an ordering from
        # colamd
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate)
        result = optimizer.optimizeSafely()
        marginals = gtsam.Marginals(graph, result)
        marginals.marginalCovariance(1)

        # Check first pose equality
        pose_1 = result.atPose2(1)
        self.assertTrue(pose_1.equals(gtsam.Pose2(), 1e-4))
コード例 #24
0
    def run(self, initial_data: GtsfmData) -> GtsfmData:
        """Run the bundle adjustment by forming factor graph and optimizing using Levenberg–Marquardt optimization.

        Args:
            initial_data: initialized cameras, tracks w/ their 3d landmark from triangulation.

        Results:
            optimized camera poses, 3D point w/ tracks, and error metrics.
        """
        logger.info(
            f"Input: {initial_data.number_tracks()} tracks on {len(initial_data.get_valid_camera_indices())} cameras\n"
        )

        # noise model for measurements -- one pixel in u and v
        measurement_noise = gtsam.noiseModel.Isotropic.Sigma(
            IMG_MEASUREMENT_DIM, 1.0)

        # Create a factor graph
        graph = gtsam.NonlinearFactorGraph()

        # Add measurements to the factor graph
        for j in range(initial_data.number_tracks()):
            track = initial_data.get_track(j)  # SfmTrack
            # retrieve the SfmMeasurement objects
            for m_idx in range(track.number_measurements()):
                # i represents the camera index, and uv is the 2d measurement
                i, uv = track.measurement(m_idx)
                # note use of shorthand symbols C and P
                graph.add(
                    GeneralSFMFactorCal3Bundler(uv, measurement_noise, C(i),
                                                P(j)))

        # get all the valid camera indices, which need to be added to the graph.
        valid_camera_indices = initial_data.get_valid_camera_indices()

        # Add a prior on first pose. This indirectly specifies where the origin is.
        graph.push_back(
            gtsam.PriorFactorPinholeCameraCal3Bundler(
                C(valid_camera_indices[0]),
                initial_data.get_camera(valid_camera_indices[0]),
                gtsam.noiseModel.Isotropic.Sigma(PINHOLE_CAM_CAL3BUNDLER_DOF,
                                                 0.1),
            ))
        # Also add a prior on the position of the first landmark to fix the scale
        graph.push_back(
            gtsam.PriorFactorPoint3(
                P(0),
                initial_data.get_track(0).point3(),
                gtsam.noiseModel.Isotropic.Sigma(POINT3_DOF, 0.1)))

        # Create initial estimate
        initial = gtsam.Values()

        # add each PinholeCameraCal3Bundler
        for i in valid_camera_indices:
            camera = initial_data.get_camera(i)
            initial.insert(C(i), camera)

        # add each SfmTrack
        for j in range(initial_data.number_tracks()):
            track = initial_data.get_track(j)
            initial.insert(P(j), track.point3())

        # Optimize the graph and print results
        try:
            params = gtsam.LevenbergMarquardtParams()
            params.setVerbosityLM("ERROR")
            lm = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
            result_values = lm.optimize()
        except Exception:
            logger.exception("LM Optimization failed")
            # as we did not perform the bundle adjustment, we skip computing the total reprojection error
            return GtsfmData(initial_data.number_images())

        final_error = graph.error(result_values)

        # Error drops from ~2764.22 to ~0.046
        logger.info(f"initial error: {graph.error(initial):.2f}")
        logger.info(f"final error: {final_error:.2f}")

        # construct the results
        optimized_data = values_to_gtsfm_data(result_values, initial_data)

        metrics_dict = {}
        metrics_dict["before_filtering"] = optimized_data.aggregate_metrics()
        logger.info("[Result] Number of tracks before filtering: %d",
                    metrics_dict["before_filtering"]["number_tracks"])

        # filter the largest errors
        filtered_result = optimized_data.filter_landmarks(
            self.output_reproj_error_thresh)

        metrics_dict["after_filtering"] = filtered_result.aggregate_metrics()
        io_utils.save_json_file(
            os.path.join(METRICS_PATH, "bundle_adjustment_metrics.json"),
            metrics_dict)

        logger.info("[Result] Number of tracks after filtering: %d",
                    metrics_dict["after_filtering"]["number_tracks"])
        logger.info(
            "[Result] Mean track length %.3f",
            metrics_dict["after_filtering"]["3d_track_lengths"]["mean"])
        logger.info(
            "[Result] Median track length %.3f",
            metrics_dict["after_filtering"]["3d_track_lengths"]["median"])
        filtered_result.log_scene_reprojection_error_stats()

        return filtered_result
コード例 #25
0
            predicted_nav_state = current_preintegrated_IMU.predict(
                params.initial_state, params.IMU_bias)
            initial_values.insert(symbol('x', i), predicted_nav_state.pose())
            initial_values.insert(symbol('v', i),
                                  predicted_nav_state.velocity())

        current_preintegrated_IMU.integrateMeasurement(imu_measurements[i, :3],
                                                       imu_measurements[i, 3:],
                                                       params.dt)

    ###############################    Optimize the factor graph   ####################################

    # Use the Levenberg-Marquardt algorithm
    optimization_params = gtsam.LevenbergMarquardtParams()
    optimization_params.setVerbosityLM("SUMMARY")
    optimizer = gtsam.LevenbergMarquardtOptimizer(factor_graph, initial_values,
                                                  optimization_params)
    optimization_result = optimizer.optimize()

    ###############################        Plot the solution       ####################################

    for i in preintegration_steps:
        # Ground truth pose
        plot_pose3(1, gtsam.Pose3(true_poses[i]), 0.3)
        # Estimated pose
        plot_pose3(1, optimization_result.atPose3(symbol('x', i)), 0.1)

    ax = plt.gca()
    ax.set_xlim3d(-5, 5)
    ax.set_ylim3d(-5, 5)
    ax.set_zlim3d(-5, 5)
    ax.set_xlabel('x')
コード例 #26
0
graph = gtsam.NonlinearFactorGraph()

# Add a prior on the first pose, setting it to the origin
# A prior factor consists of a mean and a noise model (covariance matrix)
priorMean = gtsam.Pose2(0.0, 0.0, 0.0)  # prior at origin
priorNoise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1]))
graph.add(gtsam.PriorFactorPose2(1, priorMean, priorNoise))

# Add odometry factors
odometry = gtsam.Pose2(2.0, 0.0, 0.0)
# For simplicity, we will use the same noise model for each odometry factor
odometryNoise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1]))
# Create odometry (Between) factors between consecutive poses
graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, odometryNoise))
graph.add(gtsam.BetweenFactorPose2(2, 3, odometry, odometryNoise))
graph.print("\nFactor Graph:\n")

# Create the data structure to hold the initialEstimate estimate to the solution
# For illustrative purposes, these have been deliberately set to incorrect values
initial = gtsam.Values()
initial.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
initial.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
initial.insert(3, gtsam.Pose2(4.1, 0.1, 0.1))
initial.print("\nInitial Estimate:\n")

# optimize using Levenberg-Marquardt optimization
params = gtsam.LevenbergMarquardtParams()
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
result = optimizer.optimize()
result.print("\nFinal Result:\n")
コード例 #27
0
ファイル: ImuFactorExample.py プロジェクト: eglrp/GraphSlam
    def run(self):
        graph = gtsam.NonlinearFactorGraph()

        # initialize data structure for pre-integrated IMU measurements
        pim = gtsam.PreintegratedImuMeasurements(self.params, self.actualBias)

        H9 = gtsam.OptionalJacobian9()

        T = 12
        num_poses = T + 1  # assumes 1 factor per second
        initial = gtsam.Values()
        initial.insert(BIAS_KEY, self.actualBias)
        for i in range(num_poses):
            state_i = self.scenario.navState(float(i))
            initial.insert(X(i), state_i.pose())
            initial.insert(V(i), state_i.velocity())

        # simulate the loop
        i = 0  # state index
        actual_state_i = self.scenario.navState(0)
        for k, t in enumerate(np.arange(0, T, self.dt)):
            # get measurements and add them to PIM
            measuredOmega = self.runner.measuredAngularVelocity(t)
            measuredAcc = self.runner.measuredSpecificForce(t)
            pim.integrateMeasurement(measuredAcc, measuredOmega, self.dt)

            # Plot IMU many times
            if k % 10 == 0:
                self.plotImu(t, measuredOmega, measuredAcc)

            # Plot every second
            if k % int(1 / self.dt) == 0:
                self.plotGroundTruthPose(t)

            # create IMU factor every second
            if (k + 1) % int(1 / self.dt) == 0:
                factor = gtsam.ImuFactor(X(i), V(i), X(i + 1), V(i + 1),
                                         BIAS_KEY, pim)
                graph.push_back(factor)
                if True:
                    print(factor)
                    H2 = gtsam.OptionalJacobian96()
                    print(pim.predict(actual_state_i, self.actualBias, H9, H2))
                pim.resetIntegration()
                actual_state_i = self.scenario.navState(t + self.dt)
                i += 1

        # add priors on beginning and end
        self.addPrior(0, graph)
        self.addPrior(num_poses - 1, graph)

        # optimize using Levenberg-Marquardt optimization
        params = gtsam.LevenbergMarquardtParams()
        params.setVerbosityLM("SUMMARY")
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
        result = optimizer.optimize()

        # Calculate and print marginal covariances
        marginals = gtsam.Marginals(graph, result)
        print("Covariance on bias:\n", marginals.marginalCovariance(BIAS_KEY))
        for i in range(num_poses):
            print("Covariance on pose {}:\n{}\n".format(
                i, marginals.marginalCovariance(X(i))))
            print("Covariance on vel {}:\n{}\n".format(
                i, marginals.marginalCovariance(V(i))))

        # Plot resulting poses
        i = 0
        while result.exists(X(i)):
            pose_i = result.atPose3(X(i))
            plotPose3(POSES_FIG, pose_i, 0.1)
            i += 1
        print(result.atConstantBias(BIAS_KEY))

        plt.ioff()
        plt.show()
コード例 #28
0
def run(args):
    """Test Dogleg vs LM, inspired by issue #452."""

    # print parameters
    print("num samples = {}, deltaInitial = {}".format(args.num_samples,
                                                       args.delta))

    # Ground truth solution
    T11 = gtsam.Pose2(0, 0, 0)
    T12 = gtsam.Pose2(1, 0, 0)
    T21 = gtsam.Pose2(0, 1, 0)
    T22 = gtsam.Pose2(1, 1, 0)

    # Factor graph
    graph = gtsam.NonlinearFactorGraph()

    # Priors
    prior = gtsam.noiseModel.Isotropic.Sigma(3, 1)
    graph.add(gtsam.PriorFactorPose2(11, T11, prior))
    graph.add(gtsam.PriorFactorPose2(21, T21, prior))

    # Odometry
    model = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.01, 0.01, 0.3]))
    graph.add(gtsam.BetweenFactorPose2(11, 12, T11.between(T12), model))
    graph.add(gtsam.BetweenFactorPose2(21, 22, T21.between(T22), model))

    # Range
    model_rho = gtsam.noiseModel.Isotropic.Sigma(1, 0.01)
    graph.add(gtsam.RangeFactorPose2(12, 22, 1.0, model_rho))

    params = gtsam.DoglegParams()
    params.setDeltaInitial(args.delta)  # default is 10

    # Add progressively more noise to ground truth
    sigmas = [0.01, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20]
    n = len(sigmas)
    p_dl, s_dl, p_lm, s_lm = [0] * n, [0] * n, [0] * n, [0] * n
    for i, sigma in enumerate(sigmas):
        dl_fails, lm_fails = 0, 0
        # Attempt num_samples optimizations for both DL and LM
        for _attempt in range(args.num_samples):
            initial = gtsam.Values()
            initial.insert(11, T11.retract(np.random.normal(0, sigma, 3)))
            initial.insert(12, T12.retract(np.random.normal(0, sigma, 3)))
            initial.insert(21, T21.retract(np.random.normal(0, sigma, 3)))
            initial.insert(22, T22.retract(np.random.normal(0, sigma, 3)))

            # Run dogleg optimizer
            dl = gtsam.DoglegOptimizer(graph, initial, params)
            result = dl.optimize()
            dl_fails += graph.error(result) > 1e-9

            # Run
            lm = gtsam.LevenbergMarquardtOptimizer(graph, initial)
            result = lm.optimize()
            lm_fails += graph.error(result) > 1e-9

        # Calculate Bayes estimate of success probability
        # using a beta prior of alpha=0.5, beta=0.5
        alpha, beta = 0.5, 0.5
        v = args.num_samples + alpha + beta
        p_dl[i] = (args.num_samples - dl_fails + alpha) / v
        p_lm[i] = (args.num_samples - lm_fails + alpha) / v

        def stddev(p):
            """Calculate standard deviation."""
            return math.sqrt(p * (1 - p) / (1 + v))

        s_dl[i] = stddev(p_dl[i])
        s_lm[i] = stddev(p_lm[i])

        fmt = "sigma= {}:\tDL success {:.2f}% +/- {:.2f}%, LM success {:.2f}% +/- {:.2f}%"
        print(
            fmt.format(sigma, 100 * p_dl[i], 100 * s_dl[i], 100 * p_lm[i],
                       100 * s_lm[i]))

    if args.plot:
        fig, ax = plt.subplots()
        dl_plot = plt.errorbar(sigmas, p_dl, yerr=s_dl, label="Dogleg")
        lm_plot = plt.errorbar(sigmas, p_lm, yerr=s_lm, label="LM")
        plt.title("Dogleg emprical success vs. LM")
        plt.legend(handles=[dl_plot, lm_plot])
        ax.set_xlim(0, sigmas[-1] + 1)
        ax.set_ylim(0, 1)
        plt.show()
コード例 #29
0
        graph.add(odometry_factor)

    # reproject true quadrics into each true pose
    for j, quadric in enumerate(quadrics):
        for i, pose in enumerate(poses):
            conic = quadricslam.QuadricCamera.project(quadric, pose,
                                                      calibration)
            bounds = conic.bounds()
            bbf = quadricslam.BoundingBoxFactor(bounds, calibration, X(i),
                                                Q(j), bbox_noise)
            bbf.addToGraph(graph)

    # define lm parameters
    parameters = gtsam.LevenbergMarquardtParams()
    parameters.setVerbosityLM(
        "SUMMARY"
    )  # SILENT = 0, SUMMARY, TERMINATION, LAMBDA, TRYLAMBDA, TRYCONFIG, DAMPED, TRYDELTA : VALUES, ERROR
    parameters.setMaxIterations(100)
    parameters.setlambdaInitial(1e-5)
    parameters.setlambdaUpperBound(1e10)
    parameters.setlambdaLowerBound(1e-8)
    parameters.setRelativeErrorTol(1e-5)
    parameters.setAbsoluteErrorTol(1e-5)

    # create optimizer
    optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate,
                                                  parameters)

    # run optimizer
    result = optimizer.optimize()
コード例 #30
0
ファイル: SimpleRotation.py プロジェクト: yangdegang/gtsam-1
def main():
    """
    Step 1: Create a factor to express a unary constraint

    The "prior" in this case is the measurement from a sensor,
    with a model of the noise on the measurement.

    The "Key" created here is a label used to associate parts of the
    state (stored in "RotValues") with particular factors.  They require
    an index to allow for lookup, and should be unique.

    In general, creating a factor requires:
    - A key or set of keys labeling the variables that are acted upon
    - A measurement value
    - A measurement model with the correct dimensionality for the factor
    """
    prior = gtsam.Rot2.fromAngle(np.deg2rad(30))
    prior.print_('goal angle')
    model = gtsam.noiseModel_Isotropic.Sigma(dim=1, sigma=np.deg2rad(1))
    key = gtsam.symbol(ord('x'), 1)
    factor = gtsam.PriorFactorRot2(key, prior, model)
    """
    Step 2: Create a graph container and add the factor to it

    Before optimizing, all factors need to be added to a Graph container,
    which provides the necessary top-level functionality for defining a
    system of constraints.

    In this case, there is only one factor, but in a practical scenario,
    many more factors would be added.
    """
    graph = gtsam.NonlinearFactorGraph()
    graph.push_back(factor)
    graph.print_('full graph')
    """
    Step 3: Create an initial estimate

    An initial estimate of the solution for the system is necessary to
    start optimization.  This system state is the "Values" instance,
    which is similar in structure to a dictionary, in that it maps
    keys (the label created in step 1) to specific values.

    The initial estimate provided to optimization will be used as
    a linearization point for optimization, so it is important that
    all of the variables in the graph have a corresponding value in
    this structure.
    """
    initial = gtsam.Values()
    initial.insert(key, gtsam.Rot2.fromAngle(np.deg2rad(20)))
    initial.print_('initial estimate')
    """
    Step 4: Optimize

    After formulating the problem with a graph of constraints
    and an initial estimate, executing optimization is as simple
    as calling a general optimization function with the graph and
    initial estimate.  This will yield a new RotValues structure
    with the final state of the optimization.
    """
    result = gtsam.LevenbergMarquardtOptimizer(graph, initial).optimize()
    result.print_('final result')