示例#1
0
    def __init__(self,
                 p,
                 q,
                 r,
                 alphas=np.array([0.001, 0.0001]),
                 sensor_offset=np.zeros(2)):

        # Add noise models
        self.odometry_noise = gtsam.noiseModel.Diagonal.Sigmas(q)
        self.measurement_noise = gtsam.noiseModel.Diagonal.Sigmas(r)

        self.alphas = alphas
        self.sensor_offset = sensor_offset

        # Create graph and initilize newest pose
        self.graph = gtsam.NonlinearFactorGraph()
        self.poses = gtsam.Values()

        # To enumerate all poses and landmarks
        self.kx = 1
        self.kl = 1

        self.landmarks = np.empty(0)

        # Initilize graph with prior
        prior_noise = gtsam.noiseModel.Diagonal.Sigmas(p)
        self.graph.add(
            gtsam.PriorFactorPose2(X(self.kx), gtsam.Pose2(0.0, 0.0, 0.0),
                                   prior_noise))
示例#2
0
    def __init__(self, initial_state, covariance, alphas, beta):
        self._initial_state = gtsam.Pose2(initial_state[0], initial_state[1], initial_state[2])
        self._prior_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([covariance, covariance, covariance]))
        # self.observation_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([beta[0] ** 2, np.deg2rad(beta[1]) ** 2]))
        #self.observation_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([beta[0] ** 2, np.deg2rad(beta[1]) ** 2]))
        self.observation_noise = gtsam.noiseModel_Diagonal.Sigmas(
            np.array(([np.deg2rad(beta[1]) ** 2, (beta[0] / 100) ** 2])))
        self.beta = beta
        self.alphas = alphas ** 2
        self.pose_num = 0
        self.observation_num = 1000
        self.landmark_indexes = list()
        self.states_new = np.array([[]])
        self.observation_new = np.array([[]])

        self.graph = gtsam.NonlinearFactorGraph()
        self.estimations = gtsam.Values()
        self.result = gtsam.Values()
        self.parameters = gtsam.ISAM2Params()
        self.parameters.setRelinearizeThreshold(1e-4)
        self.parameters.setRelinearizeSkip(1)
        self.slam = gtsam.ISAM2(self.parameters)

        self.graph.add(gtsam.PriorFactorPose2(self.pose_num, self._initial_state, self._prior_noise))
        self.estimations.insert(self.pose_num, self._initial_state)
def init_smoother(request):
    """
    Runs a batch fixed smoother on an agent with two odometry
    sensors that is simply moving along the x axis in constant
    speed.
    """
    global SMOOTHER_BATCH

    # Define a batch fixed lag smoother, which uses
    # Levenberg-Marquardt to perform the nonlinear optimization
    lag = request.lag
    smoother_batch = gtsam_unstable.BatchFixedLagSmoother(lag)

    new_factors = gtsam.NonlinearFactorGraph()
    new_values = gtsam.Values()
    new_timestamps = gtsam_unstable.FixedLagSmootherKeyTimestampMap()

    prior_mean = request.prior_mean
    prior_noise = request.prior_noise
    X1 = 0
    new_factors.push_back(gtsam.PriorFactorPose2(X1, prior_mean, prior_noise))
    new_values.insert(X1, prior_mean)
    new_timestamps.insert(_timestamp_key_value(X1, 0.0))

    SMOOTHER_BATCH = smoother_batch
    SMOOTHER_BATCH.update(new_factors, new_values, new_timestamps)

    return X1
示例#4
0
    def test_Pose2SLAMExample(self):
        # Assumptions
        #  - All values are axis aligned
        #  - Robot poses are facing along the X axis (horizontal, to the right in images)
        #  - We have full odometry for measurements
        #  - The robot is on a grid, moving 2 meters each step

        # Create graph container and add factors to it
        graph = gtsam.NonlinearFactorGraph()

        # Add prior
        # gaussian for prior
        priorMean = gtsam.Pose2(0.0, 0.0, 0.0)  # prior at origin
        priorNoise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3,
                                                                0.1]))
        # add directly to graph
        graph.add(gtsam.PriorFactorPose2(1, priorMean, priorNoise))

        # Add odometry
        # general noisemodel for odometry
        odometryNoise = gtsam.noiseModel.Diagonal.Sigmas(
            np.array([0.2, 0.2, 0.1]))
        graph.add(
            gtsam.BetweenFactorPose2(1, 2, gtsam.Pose2(2.0, 0.0, 0.0),
                                     odometryNoise))
        graph.add(
            gtsam.BetweenFactorPose2(2, 3, gtsam.Pose2(2.0, 0.0, pi / 2),
                                     odometryNoise))
        graph.add(
            gtsam.BetweenFactorPose2(3, 4, gtsam.Pose2(2.0, 0.0, pi / 2),
                                     odometryNoise))
        graph.add(
            gtsam.BetweenFactorPose2(4, 5, gtsam.Pose2(2.0, 0.0, pi / 2),
                                     odometryNoise))

        # Add pose constraint
        model = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1]))
        graph.add(
            gtsam.BetweenFactorPose2(5, 2, gtsam.Pose2(2.0, 0.0, pi / 2),
                                     model))

        # Initialize to noisy points
        initialEstimate = gtsam.Values()
        initialEstimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
        initialEstimate.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
        initialEstimate.insert(3, gtsam.Pose2(4.1, 0.1, pi / 2))
        initialEstimate.insert(4, gtsam.Pose2(4.0, 2.0, pi))
        initialEstimate.insert(5, gtsam.Pose2(2.1, 2.1, -pi / 2))

        # Optimize using Levenberg-Marquardt optimization with an ordering from
        # colamd
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate)
        result = optimizer.optimizeSafely()

        # Plot Covariance Ellipses
        marginals = gtsam.Marginals(graph, result)
        P = marginals.marginalCovariance(1)

        pose_1 = result.atPose2(1)
        self.gtsamAssertEquals(pose_1, gtsam.Pose2(), 1e-4)
示例#5
0
    def estimate_global(self):
        np.random.seed(2)
        n0 = 0.000001
        n03 = np.array([n0, n0, n0])
        nNoiseFactor3 = np.array(
            [0.2, 0.2,
             0.2])  # TODO: something about floats and major row? check cython

        # Create an empty nonlinear factor graph
        graph = gtsam.NonlinearFactorGraph()

        # Add a prior on the first pose, setting it to the origin
        priorMean = self.odom_global[0]
        priorNoise = gtsam.noiseModel_Diagonal.Sigmas(n03)
        graph.add(gtsam.PriorFactorPose2(self.X(1), priorMean, priorNoise))

        # Add odometry factors
        odometryNoise = gtsam.noiseModel_Diagonal.Sigmas(nNoiseFactor3)
        for i, pose in enumerate(self.odom_relative):
            graph.add(
                gtsam.BetweenFactorPose2(self.X(i + 1), self.X(i + 2), pose,
                                         odometryNoise))

        # Add visual factors
        visualNoise = gtsam.noiseModel_Diagonal.Sigmas(nNoiseFactor3)
        for i, pose in enumerate(self.visual_relative):
            graph.add(
                gtsam.BetweenFactorPose2(self.X(i + 1), self.X(i + 2), pose,
                                         visualNoise))

        # set initial guess to odometry estimates
        initialEstimate = gtsam.Values()
        for i, pose in enumerate(self.odom_global):
            initialEstimate.insert(self.X(i + 1), pose)

        # optimize using Levenberg-Marquardt optimization
        params = gtsam.LevenbergMarquardtParams()
        params.setVerbosityLM("SUMMARY")
        # gtsam_quadrics.setMaxIterations(params, iter)
        # gtsam_quadrics.setRelativeErrorTol(params, 1e-8)
        # gtsam_quadrics.setAbsoluteErrorTol(params, 1e-8)

        # graph.print_('graph')
        # initialEstimate.print_('initialEstimate ')
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate,
                                                      params)

        # parameters = gtsam.GaussNewtonParams()
        # parameters.relativeErrorTol = 1e-8
        # parameters.maxIterations = 300
        # optimizer = gtsam.GaussNewtonOptimizer(graph, initialEstimate, parameters)

        result = optimizer.optimize()

        # result.print_('result ')
        # self.draw_trajectories([self.odom_global, self.visual_global], ['b', 'r'], 2)

        return self.unwrap_results(result)
示例#6
0
def add_unary_factor(graph, keys, factor_cov, factor_meas):

    factor_noise_model = get_noise_model(factor_cov)
    factor_meas_pose = tf_utils.vec3_to_pose2(factor_meas)
    factor = gtsam.PriorFactorPose2(
        keys[0], factor_meas_pose, factor_noise_model)

    graph.push_back(factor)

    return graph
def build_graph():

    print("build_graph !!!")
    # Create noise models
    ODOMETRY_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2,
                                                                0.1]))
    PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1]))

    # Create an empty nonlinear factor graph
    graph = gtsam.NonlinearFactorGraph()

    # Add a prior on the first pose, setting it to the origin
    # A prior factor consists of a mean and a noise model (covariance matrix)
    priorMean = gtsam.Pose2(0.0, 0.0, 0.0)  # prior at origin
    graph.add(gtsam.PriorFactorPose2(1, priorMean, PRIOR_NOISE))

    # Add odometry factors
    odometry = gtsam.Pose2(2.0, 0.0, 0.0)
    odometry1 = gtsam.Pose2(edge_list[0].position.x, edge_list[0].position.y,
                            edge_list[0].position.z)
    odometry2 = gtsam.Pose2(edge_list[1].position.x, edge_list[1].position.y,
                            edge_list[1].position.z)

    # For simplicity, we will use the same noise model for each odometry factor
    # Create odometry (Between) factors between consecutive poses
    graph.add(gtsam.BetweenFactorPose2(1, 2, odometry1, ODOMETRY_NOISE))
    graph.add(gtsam.BetweenFactorPose2(2, 3, odometry2, ODOMETRY_NOISE))
    print("\nFactor Graph:\n{}".format(graph))

    # Create the data structure to hold the initialEstimate estimate to the solution
    # For illustrative purposes, these have been deliberately set to incorrect values
    initial = gtsam.Values()
    initial.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
    initial.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
    initial.insert(3, gtsam.Pose2(4.1, 0.1, 0.1))
    print("\nInitial Estimate:\n{}".format(initial))

    # optimize using Levenberg-Marquardt optimization
    params = gtsam.LevenbergMarquardtParams()
    optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
    result = optimizer.optimize()
    print("\nFinal Result:\n{}".format(result))

    # 5. Calculate and print marginal covariances for all variables
    marginals = gtsam.Marginals(graph, result)
    for i in range(1, 4):
        print("X{} covariance:\n{}\n".format(i,
                                             marginals.marginalCovariance(i)))

    fig = plt.figure(0)
    for i in range(1, 4):
        gtsam_plot.plot_pose2(0, result.atPose2(i), 0.5,
                              marginals.marginalCovariance(i))
    plt.axis('equal')
    plt.show()
示例#8
0
    def initialize(self, priorMean=[0, 0, 0], priorCov=[0, 0, 0]):
        # init the prior
        priorMean = gtsam.Pose2(priorMean[0], priorMean[1], priorMean[2])

        priorCov = gtsam.noiseModel_Diagonal.Sigmas(np.array(priorCov))

        self.graph.add(
            gtsam.PriorFactorPose2(X(self.currentKey), priorMean, priorCov))
        self.initialValues.insert(X(self.currentKey), priorMean)

        return
示例#9
0
    def addObs(self, measurement, measurementNoise):

        measurement = gtsam.Pose2(float(measurement[0]), float(measurement[1]),
                                  float(measurement[2]))
        measurementNoise = gtsam.noiseModel_Diagonal.Variances(
            np.array(measurementNoise))

        self.graph.add(
            gtsam.PriorFactorPose2(X(self.currentKey), measurement,
                                   measurementNoise))

        return
def main():
    graph = gtsam.NonlinearFactorGraph()
    initialEstimate = gtsam.Values()

    priorNoise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.10]))
    graph.add(gtsam.PriorFactorPose2(0, gtsam.Pose2(0, 0, 0), priorNoise))

    # read data from .g2o file
    # and initialize nodes/edges
    data_file = 'input_INTEL_g2o.g2o'
    with open(data_file, 'r') as f:
        for line in f:
            line_split = line.split()
            if line_split[0] == 'VERTEX_SE2':
                node = int(line_split[1])
                x, y, th = make_pose(line_split[2:])
                initialEstimate.insert(node, gtsam.Pose2(x, y, th))
            elif line_split[0] == 'EDGE_SE2':
                node1 = int(line_split[1])
                node2 = int(line_split[2])
                dx, dy, dth = make_pose(line_split[3:6])
                noise = gtsam.noiseModel.Gaussian.Information(
                    create_information_matrix(line_split[6:]))
                graph.add(
                    gtsam.BetweenFactorPose2(node1, node2,
                                             gtsam.Pose2(dx, dy, dth), noise))
    f.close()

    # initialEstimate.print("\nInitial Estimate:\n")

    parameters = gtsam.GaussNewtonParams()

    # Stop iterating once the change in error between steps is less than this value
    parameters.relativeErrorTol = 1e-5
    # Do not perform more than N iteration steps
    parameters.maxIterations = 100
    # Create the optimizer ...
    optimizer = gtsam.GaussNewtonOptimizer(graph, initialEstimate, parameters)
    # ... and optimize
    result = optimizer.optimize()
    result.print("Final Result:\n")

    keys = result.keys()
    # x = []
    # y = []
    # theta = []
    for key in keys:
        print(key)
        if result.exists(key):
            print(result.atPose2(key))
示例#11
0
    def test_optimization(self):
        """Tests if a factor graph with a CustomFactor can be properly optimized"""
        gT1 = Pose2(1, 2, np.pi / 2)
        gT2 = Pose2(-1, 4, np.pi)

        expected = Pose2(2, 2, np.pi / 2)

        def error_func(this: CustomFactor, v: gtsam.Values,
                       H: List[np.ndarray]):
            """
            Error function that mimics a BetweenFactor
            :param this: reference to the current CustomFactor being evaluated
            :param v: Values object
            :param H: list of references to the Jacobian arrays
            :return: the non-linear error
            """
            key0 = this.keys()[0]
            key1 = this.keys()[1]
            gT1, gT2 = v.atPose2(key0), v.atPose2(key1)
            error = expected.localCoordinates(gT1.between(gT2))

            if H is not None:
                result = gT1.between(gT2)
                H[0] = -result.inverse().AdjointMap()
                H[1] = np.eye(3)
            return error

        noise_model = gtsam.noiseModel.Unit.Create(3)
        cf = CustomFactor(noise_model, [0, 1], error_func)

        fg = gtsam.NonlinearFactorGraph()
        fg.add(cf)
        fg.add(gtsam.PriorFactorPose2(0, gT1, noise_model))

        v = Values()
        v.insert(0, Pose2(0, 0, 0))
        v.insert(1, Pose2(0, 0, 0))

        params = gtsam.LevenbergMarquardtParams()
        optimizer = gtsam.LevenbergMarquardtOptimizer(fg, v, params)
        result = optimizer.optimize()

        self.gtsamAssertEquals(result.atPose2(0), gT1, tol=1e-5)
        self.gtsamAssertEquals(result.atPose2(1), gT2, tol=1e-5)
示例#12
0
    def __init__(self, initial_state, variance, alphas, beta):
        self._initial_state = gtsam.Pose2(initial_state[0], initial_state[1],
                                          initial_state[2])
        self._prior_noise = gtsam.noiseModel_Diagonal.Sigmas(
            np.array([variance, variance, variance]))
        self.observation_noise = gtsam.noiseModel_Diagonal.Sigmas(
            np.array(([np.deg2rad(beta[1])**2, (beta[0] / 100)**2])))
        self.alphas = alphas**2 / 100
        self.pose_num = 0
        self.landmark_indexes = list()
        self.states_new = np.array([[]])

        self.graph = gtsam.NonlinearFactorGraph()
        self.estimations = gtsam.Values()
        self.result = gtsam.Values()

        self.graph.add(
            gtsam.PriorFactorPose2(self.pose_num, self._initial_state,
                                   self._prior_noise))
        self.estimations.insert(self.pose_num, self._initial_state)
示例#13
0
    def test_LocalizationExample(self):
        # Create the graph (defined in pose2SLAM.h, derived from
        # NonlinearFactorGraph)
        graph = gtsam.NonlinearFactorGraph()

        # Add two odometry factors
        # create a measurement for both factors (the same in this case)
        odometry = gtsam.Pose2(2.0, 0.0, 0.0)
        odometryNoise = gtsam.noiseModel.Diagonal.Sigmas(
            np.array([0.2, 0.2, 0.1]))  # 20cm std on x,y, 0.1 rad on theta
        graph.add(gtsam.BetweenFactorPose2(0, 1, odometry, odometryNoise))
        graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, odometryNoise))

        # Add three "GPS" measurements
        # We use Pose2 Priors here with high variance on theta
        groundTruth = gtsam.Values()
        groundTruth.insert(0, gtsam.Pose2(0.0, 0.0, 0.0))
        groundTruth.insert(1, gtsam.Pose2(2.0, 0.0, 0.0))
        groundTruth.insert(2, gtsam.Pose2(4.0, 0.0, 0.0))
        model = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.1, 10.]))
        for i in range(3):
            graph.add(gtsam.PriorFactorPose2(i, groundTruth.atPose2(i), model))

        # Initialize to noisy points
        initialEstimate = gtsam.Values()
        initialEstimate.insert(0, gtsam.Pose2(0.5, 0.0, 0.2))
        initialEstimate.insert(1, gtsam.Pose2(2.3, 0.1, -0.2))
        initialEstimate.insert(2, gtsam.Pose2(4.1, 0.1, 0.1))

        # Optimize using Levenberg-Marquardt optimization with an ordering from
        # colamd
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate)
        result = optimizer.optimizeSafely()

        # Plot Covariance Ellipses
        marginals = gtsam.Marginals(graph, result)
        P = [None] * result.size()
        for i in range(0, result.size()):
            pose_i = result.atPose2(i)
            self.gtsamAssertEquals(pose_i, groundTruth.atPose2(i), 1e-4)
            P[i] = marginals.marginalCovariance(i)
示例#14
0
    def step1_initialize():
        # Create an empty nonlinear factor graph.
        # We will need to do this for every update.
        graph = gtsam.NonlinearFactorGraph()

        # Create a key for the first pose.
        X1 = X(1)

        # Update the list with the new pose variable key.
        pose_variables.append(X1)

        # Add a prior on pose X1 at the origin.
        prior_noise = gtsam.noiseModel.Diagonal.Sigmas(
            np.array([0.1, 0.1, 0.1]))
        graph.add(
            gtsam.PriorFactorPose2(X1, gtsam.Pose2(0.0, 0.0, 0.0),
                                   prior_noise))

        # Set an initial estimate for the first pose.
        initial_estimate = gtsam.Values()
        initial_estimate.insert(X1, gtsam.Pose2(-0.25, 0.20, 0.15))

        # Update ISAM2 with the initial factor graph.
        isam.update(graph, initial_estimate)
示例#15
0
    def test_OdometryExample(self):
        # Create the graph (defined in pose2SLAM.h, derived from
        # NonlinearFactorGraph)
        graph = gtsam.NonlinearFactorGraph()

        # Add a Gaussian prior on pose x_1
        priorMean = gtsam.Pose2(0.0, 0.0, 0.0)  # prior mean is at origin
        priorNoise = gtsam.noiseModel_Diagonal.Sigmas(np.array(
            [0.3, 0.3, 0.1]))  # 30cm std on x,y, 0.1 rad on theta
        # add directly to graph
        graph.add(gtsam.PriorFactorPose2(1, priorMean, priorNoise))

        # Add two odometry factors
        # create a measurement for both factors (the same in this case)
        odometry = gtsam.Pose2(2.0, 0.0, 0.0)
        odometryNoise = gtsam.noiseModel_Diagonal.Sigmas(
            np.array([0.2, 0.2, 0.1]))  # 20cm std on x,y, 0.1 rad on theta
        graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, odometryNoise))
        graph.add(gtsam.BetweenFactorPose2(2, 3, odometry, odometryNoise))

        # Initialize to noisy points
        initialEstimate = gtsam.Values()
        initialEstimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
        initialEstimate.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
        initialEstimate.insert(3, gtsam.Pose2(4.1, 0.1, 0.1))

        # Optimize using Levenberg-Marquardt optimization with an ordering from
        # colamd
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate)
        result = optimizer.optimizeSafely()
        marginals = gtsam.Marginals(graph, result)
        marginals.marginalCovariance(1)

        # Check first pose equality
        pose_1 = result.atPose2(1)
        self.assertTrue(pose_1.equals(gtsam.Pose2(), 1e-4))
示例#16
0
# Helper function to create a pose
def vector3(x, y, z):
    """Create 3d double numpy array."""
    return np.array([x, y, z], dtype=np.float)


# Create noise model
priorNoise = gtsam.noiseModel_Diagonal.Sigmas(vector3(0.3, 0.3, 0.1))
model = gtsam.noiseModel_Diagonal.Sigmas(vector3(0.2, 0.2, 0.1))

# Instantiate the factor graph
example_graph = gtsam.NonlinearFactorGraph()

# Adding a prior on the first pose
example_graph.add(gtsam.PriorFactorPose2(1, gtsam.Pose2(0, 0, 0), priorNoise))

# Create odometry (Between) factors between consecutive poses
example_graph.add(gtsam.BetweenFactorPose2(1, 2, gtsam.Pose2(2, 0, 0), model))
example_graph.add(
    gtsam.BetweenFactorPose2(2, 3, gtsam.Pose2(2, 0, math.pi / 2), model))
example_graph.add(
    gtsam.BetweenFactorPose2(3, 4, gtsam.Pose2(2, 0, math.pi / 2), model))
example_graph.add(
    gtsam.BetweenFactorPose2(4, 5, gtsam.Pose2(2, 0, math.pi / 2), model))

# Add the loop closure constraint
example_graph.add(
    gtsam.BetweenFactorPose2(5, 2, gtsam.Pose2(2, 0, math.pi / 2), model))

# Create the initial estimate
示例#17
0
def vector3(x, y, z):
    """Create 3d double numpy array."""
    return np.array([x, y, z], dtype=np.float)


# Create noise models
PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(vector3(0.3, 0.3, 0.1))
ODOMETRY_NOISE = gtsam.noiseModel_Diagonal.Sigmas(vector3(0.2, 0.2, 0.1))

# 1. Create a factor graph container and add factors to it
graph = gtsam.NonlinearFactorGraph()

# 2a. Add a prior on the first pose, setting it to the origin
# A prior factor consists of a mean and a noise ODOMETRY_NOISE (covariance matrix)
graph.add(gtsam.PriorFactorPose2(1, gtsam.Pose2(0, 0, 0), PRIOR_NOISE))

# 2b. Add odometry factors
# Create odometry (Between) factors between consecutive poses
graph.add(gtsam.BetweenFactorPose2(1, 2, gtsam.Pose2(2, 0, 0), ODOMETRY_NOISE))
graph.add(
    gtsam.BetweenFactorPose2(2, 3, gtsam.Pose2(2, 0, math.pi / 2),
                             ODOMETRY_NOISE))
graph.add(
    gtsam.BetweenFactorPose2(3, 4, gtsam.Pose2(2, 0, math.pi / 2),
                             ODOMETRY_NOISE))
graph.add(
    gtsam.BetweenFactorPose2(4, 5, gtsam.Pose2(2, 0, math.pi / 2),
                             ODOMETRY_NOISE))

# 2c. Add the loop closure constraint
def batch_factorgraph_example():
    # Create an empty nonlinear factor graph.
    graph = gtsam.NonlinearFactorGraph()

    # Create the keys for the poses.
    X1 = X(1)
    X2 = X(2)
    X3 = X(3)
    pose_variables = [X1, X2, X3]

    # Create keys for the landmarks.
    L1 = L(1)
    L2 = L(2)
    landmark_variables = [L1, L2]

    # Add a prior on pose X1 at the origin.
    prior_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.1, 0.1]))
    graph.add(
        gtsam.PriorFactorPose2(X1, gtsam.Pose2(0.0, 0.0, 0.0), prior_noise))

    # Add odometry factors between X1,X2 and X2,X3, respectively
    odometry_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.1,
                                                                0.1]))
    graph.add(
        gtsam.BetweenFactorPose2(X1, X2, gtsam.Pose2(2.0, 0.0, 0.0),
                                 odometry_noise))
    graph.add(
        gtsam.BetweenFactorPose2(X2, X3, gtsam.Pose2(2.0, 0.0, 0.0),
                                 odometry_noise))

    # Add Range-Bearing measurements to two different landmarks L1 and L2
    measurement_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.05, 0.1]))
    graph.add(
        gtsam.BearingRangeFactor2D(X1, L1, gtsam.Rot2.fromDegrees(45),
                                   np.sqrt(4.0 + 4.0), measurement_noise))
    graph.add(
        gtsam.BearingRangeFactor2D(X2, L1, gtsam.Rot2.fromDegrees(90), 2.0,
                                   measurement_noise))
    graph.add(
        gtsam.BearingRangeFactor2D(X3, L2, gtsam.Rot2.fromDegrees(90), 2.0,
                                   measurement_noise))

    # Create (deliberately inaccurate) initial estimate
    initial_estimate = gtsam.Values()
    initial_estimate.insert(X1, gtsam.Pose2(-0.25, 0.20, 0.15))
    initial_estimate.insert(X2, gtsam.Pose2(2.30, 0.10, -0.20))
    initial_estimate.insert(X3, gtsam.Pose2(4.10, 0.10, 0.10))
    initial_estimate.insert(L1, gtsam.Point2(1.80, 2.10))
    initial_estimate.insert(L2, gtsam.Point2(4.10, 1.80))

    # Create an optimizer.
    params = gtsam.LevenbergMarquardtParams()
    optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate,
                                                  params)

    # Solve the MAP problem.
    result = optimizer.optimize()

    # Calculate marginal covariances for all variables.
    marginals = gtsam.Marginals(graph, result)

    # Extract marginals
    pose_marginals = []
    for var in pose_variables:
        pose_marginals.append(
            MultivariateNormalParameters(result.atPose2(var),
                                         marginals.marginalCovariance(var)))

    landmark_marginals = []
    for var in landmark_variables:
        landmark_marginals.append(
            MultivariateNormalParameters(result.atPoint2(var),
                                         marginals.marginalCovariance(var)))

    # You can extract the joint marginals like this.
    joint_all = marginals.jointMarginalCovariance(
        gtsam.KeyVector(pose_variables + landmark_variables))
    print("Joint covariance over all variables:")
    print(joint_all.fullMatrix())

    # Plot the marginals.
    plot_result(pose_marginals, landmark_marginals)
示例#19
0
import gtsam

import matplotlib.pyplot as plt
import gtsam.utils.plot as gtsam_plot

# Create noise models
ODOMETRY_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1]))
PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1]))

# Create an empty nonlinear factor graph
graph = gtsam.NonlinearFactorGraph()

# Add a prior on the first pose, setting it to the origin
# A prior factor consists of a mean and a noise model (covariance matrix)
priorMean = gtsam.Pose2(0.0, 0.0, 0.0)  # prior at origin
graph.add(gtsam.PriorFactorPose2(1, priorMean, PRIOR_NOISE))

# Add odometry factors
odometry = gtsam.Pose2(2.0, 0.0, 0.0)
# For simplicity, we will use the same noise model for each odometry factor
# Create odometry (Between) factors between consecutive poses
graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, ODOMETRY_NOISE))
graph.add(gtsam.BetweenFactorPose2(2, 3, odometry, ODOMETRY_NOISE))
print("\nFactor Graph:\n{}".format(graph))

# Create the data structure to hold the initialEstimate estimate to the solution
# For illustrative purposes, these have been deliberately set to incorrect values
initial = gtsam.Values()
initial.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
initial.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
initial.insert(3, gtsam.Pose2(4.1, 0.1, 0.1))
示例#20
0
文件: slam.py 项目: programath/mhjcbb
def mhjcbb(sim,
           num_tracks=10,
           prob=0.95,
           posterior_pose_md_threshold=1.5,
           prune2_skip=10,
           max_observed_diff=3):
    slams = [[gtsam.ISAM2(), set()]]

    prune2_count = 1
    observed = set()
    for x, (odom, obs) in enumerate(sim.step()):
        for isam2, observed in slams:
            graph = gtsam.NonlinearFactorGraph()
            values = gtsam.Values()
            if x == 0:
                prior_model = gtsam.noiseModel_Diagonal.Sigmas(
                    np.array([sim.sigma_x, sim.sigma_y, sim.sigma_theta]))
                prior_factor = gtsam.PriorFactorPose2(X(0), odom, prior_model)
                graph.add(prior_factor)
                values.insert(X(0), odom)
            else:
                odom_model = gtsam.noiseModel_Diagonal.Sigmas(
                    np.array([sim.sigma_x, sim.sigma_y, sim.sigma_theta]))
                odom_factor = gtsam.BetweenFactorPose2(X(x - 1), X(x), odom,
                                                       odom_model)
                graph.add(odom_factor)
                pose0 = isam2.calculateEstimatePose2(X(x - 1))
                values.insert(X(x), pose0.compose(odom))

            isam2.update(graph, values)

        ################################################################################
        mhjcbb = gtsam.da_MHJCBB2(num_tracks, prob,
                                  posterior_pose_md_threshold)
        for isam2, observed, in slams:
            mhjcbb.initialize(isam2)

        for l, br in obs.items():
            br_model = gtsam.noiseModel_Diagonal.Sigmas(
                np.array([sim.sigma_bearing, sim.sigma_range]))
            mhjcbb.add(gtsam.Rot2(br[0]), br[1], br_model)

        mhjcbb.match()
        ################################################################################

        new_slams = []
        for i in range(mhjcbb.size()):
            track, keys = mhjcbb.get(i)
            keys = [gtsam.symbolIndex(keys.at(i)) for i in range(keys.size())]

            isam2 = gtsam.ISAM2()
            isam2.update(slams[track][0].getFactorsUnsafe(),
                         slams[track][0].calculateEstimate())
            graph = gtsam.NonlinearFactorGraph()
            values = gtsam.Values()
            observed = set(slams[track][1])
            for (l_true, br), l in zip(obs.items(), keys):
                br_model = gtsam.noiseModel_Diagonal.Sigmas(
                    np.array([sim.sigma_bearing, sim.sigma_range]))
                br_factor = gtsam.BearingRangeFactor2D(X(x), L(l),
                                                       gtsam.Rot2(br[0]),
                                                       br[1], br_model)

                graph.add(br_factor)
                if l not in observed:
                    pose1 = isam2.calculateEstimatePose2(X(x))
                    point = gtsam.Point2(br[1] * np.cos(br[0]),
                                         br[1] * np.sin(br[0]))
                    values.insert(L(l), pose1.transform_from(point))
                    observed.add(l)
            isam2.update(graph, values)
            new_slams.append([isam2, observed])
        slams = new_slams
        slams = prune1(slams, x, posterior_pose_md_threshold)

        if len(slams[0][1]) > prune2_count * prune2_skip:
            slams = prune2(slams, max_observed_diff)
            prune2_count += 1

    result = []
    for isam2, observed in slams:
        traj_est = [
            isam2.calculateEstimatePose2(X(x)) for x in range(len(sim.traj))
        ]
        traj_est = np.array([(p.x(), p.y(), p.theta()) for p in traj_est])
        landmark_est = [isam2.calculateEstimatePoint2(L(l)) for l in observed]
        landmark_est = np.array([(p.x(), p.y()) for p in landmark_est])
        result.append((traj_est, landmark_est))
    return result
示例#21
0
文件: slam.py 项目: programath/mhjcbb
def slam(sim, da='jcbb', prob=0.95):
    isam2 = gtsam.ISAM2()
    graph = gtsam.NonlinearFactorGraph()
    values = gtsam.Values()

    observed = set()
    for x, (odom, obs) in enumerate(sim.step()):
        if x == 0:
            prior_model = gtsam.noiseModel_Diagonal.Sigmas(
                np.array([sim.sigma_x, sim.sigma_y, sim.sigma_theta]))
            prior_factor = gtsam.PriorFactorPose2(X(0), odom, prior_model)
            graph.add(prior_factor)
            values.insert(X(0), odom)
        else:
            odom_model = gtsam.noiseModel_Diagonal.Sigmas(
                np.array([sim.sigma_x, sim.sigma_y, sim.sigma_theta]))
            odom_factor = gtsam.BetweenFactorPose2(X(x - 1), X(x), odom,
                                                   odom_model)
            graph.add(odom_factor)
            pose0 = isam2.calculateEstimatePose2(X(x - 1))
            values.insert(X(x), pose0.compose(odom))

        isam2.update(graph, values)
        graph.resize(0)
        values.clear()
        estimate = isam2.calculateEstimate()

        if da == 'dr':
            for l_true, br in obs.items():
                l = len(observed)
                br_model = gtsam.noiseModel_Diagonal.Sigmas(
                    np.array([sim.sigma_bearing, sim.sigma_range]))
                br_factor = gtsam.BearingRangeFactor2D(X(x), L(l),
                                                       gtsam.Rot2(br[0]),
                                                       br[1], br_model)
                graph.add(br_factor)
                if l not in observed:
                    pose1 = isam2.calculateEstimatePose2(X(x))
                    point = gtsam.Point2(br[1] * np.cos(br[0]),
                                         br[1] * np.sin(br[0]))
                    values.insert(L(l), pose1.transform_from(point))
                    observed.add(l)
        elif da == 'perfect':
            for l_true, br in obs.items():
                br_model = gtsam.noiseModel_Diagonal.Sigmas(
                    np.array([sim.sigma_bearing, sim.sigma_range]))
                br_factor = gtsam.BearingRangeFactor2D(X(x), L(l_true),
                                                       gtsam.Rot2(br[0]),
                                                       br[1], br_model)
                graph.add(br_factor)
                if l_true not in observed:
                    pose1 = isam2.calculateEstimatePose2(X(x))
                    point = gtsam.Point2(br[1] * np.cos(br[0]),
                                         br[1] * np.sin(br[0]))
                    values.insert(L(l_true), pose1.transform_from(point))
                    observed.add(l_true)
        elif da == 'jcbb':
            ################################################################################
            jcbb = gtsam.da_JCBB2(isam2, prob)
            for l, br in obs.items():
                br_model = gtsam.noiseModel_Diagonal.Sigmas(
                    np.array([sim.sigma_bearing, sim.sigma_range]))
                jcbb.add(gtsam.Rot2(br[0]), br[1], br_model)

            keys = jcbb.match()
            ################################################################################

            keys = [gtsam.symbolIndex(keys.at(i)) for i in range(keys.size())]
            for (l_true, br), l in zip(obs.items(), keys):
                br_model = gtsam.noiseModel_Diagonal.Sigmas(
                    np.array([sim.sigma_bearing, sim.sigma_range]))
                br_factor = gtsam.BearingRangeFactor2D(X(x), L(l),
                                                       gtsam.Rot2(br[0]),
                                                       br[1], br_model)
                graph.add(br_factor)
                if l not in observed:
                    pose1 = isam2.calculateEstimatePose2(X(x))
                    point = gtsam.Point2(br[1] * np.cos(br[0]),
                                         br[1] * np.sin(br[0]))
                    values.insert(L(l), pose1.transform_from(point))
                    observed.add(l)

        isam2.update(graph, values)
        graph.resize(0)
        values.clear()

    traj_est = [
        isam2.calculateEstimatePose2(X(x)) for x in range(len(sim.traj))
    ]
    traj_est = np.array([(p.x(), p.y(), p.theta()) for p in traj_est])
    landmark_est = [isam2.calculateEstimatePoint2(L(l)) for l in observed]
    landmark_est = np.array([(p.x(), p.y()) for p in landmark_est])
    return [[traj_est, landmark_est]]
示例#22
0
args = parser.parse_args()

g2oFile = gtsam.findExampleDataFile("noisyToyGraph.txt") if args.input is None\
    else args.input

maxIterations = 100 if args.maxiter is None else args.maxiter

is3D = False

graph, initial = gtsam.readG2o(g2oFile, is3D)

assert args.kernel == "none", "Supplied kernel type is not yet implemented"

# Add prior on the pose having index (key) = 0
priorModel = gtsam.noiseModel.Diagonal.Variances(vector3(1e-6, 1e-6, 1e-8))
graph.add(gtsam.PriorFactorPose2(0, gtsam.Pose2(), priorModel))

params = gtsam.GaussNewtonParams()
params.setVerbosity("Termination")
params.setMaxIterations(maxIterations)
# parameters.setRelativeErrorTol(1e-5)
# Create the optimizer ...
optimizer = gtsam.GaussNewtonOptimizer(graph, initial, params)
# ... and optimize
result = optimizer.optimize()

print("Optimization complete")
print("initial error = ", graph.error(initial))
print("final error = ", graph.error(result))

if args.output is None:
示例#23
0
PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1]))
ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1]))
MEASUREMENT_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.2]))

# Create an empty nonlinear factor graph
graph = gtsam.NonlinearFactorGraph()

# Create the keys corresponding to unknown variables in the factor graph
X1 = X(1)
X2 = X(2)
X3 = X(3)
L1 = L(4)
L2 = L(5)

# Add a prior on pose X1 at the origin. A prior factor consists of a mean and a noise model
graph.add(gtsam.PriorFactorPose2(X1, gtsam.Pose2(0.0, 0.0, 0.0), PRIOR_NOISE))

# Add odometry factors between X1,X2 and X2,X3, respectively
graph.add(gtsam.BetweenFactorPose2(
    X1, X2, gtsam.Pose2(2.0, 0.0, 0.0), ODOMETRY_NOISE))
graph.add(gtsam.BetweenFactorPose2(
    X2, X3, gtsam.Pose2(2.0, 0.0, 0.0), ODOMETRY_NOISE))

# Add Range-Bearing measurements to two different landmarks L1 and L2
graph.add(gtsam.BearingRangeFactor2D(
    X1, L1, gtsam.Rot2.fromDegrees(45), np.sqrt(4.0+4.0), MEASUREMENT_NOISE))
graph.add(gtsam.BearingRangeFactor2D(
    X2, L1, gtsam.Rot2.fromDegrees(90), 2.0, MEASUREMENT_NOISE))
graph.add(gtsam.BearingRangeFactor2D(
    X3, L2, gtsam.Rot2.fromDegrees(90), 2.0, MEASUREMENT_NOISE))
示例#24
0
# Duplicating example from the tutorial
import gtsam
import numpy as np
import matplotlib.pyplot as plt
import gtsam.utils.plot as gtsam_plot

# Create an empty nonlinear factor graph
graph = gtsam.NonlinearFactorGraph()

# Add a Gaussian prior on pose x1
priorMean = gtsam.Pose2(0.0, 0.0, 0.0)
priorNoise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1]))
graph.add(gtsam.PriorFactorPose2(1, priorMean, priorNoise))

# Add two odometry factors
odometry = gtsam.Pose2(2.0, 0.0, 0.0)
odometryNoise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1]))
graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, odometryNoise))
graph.add(gtsam.BetweenFactorPose2(2, 3, odometry, odometryNoise))

# Create (deliberately inaccurate) initial estimate
initial = gtsam.Values()
initial.insert(1, gtsam.Pose2(0.5, 0.0,  0.2))
initial.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
initial.insert(3, gtsam.Pose2(4.1, 0.1,  0.1))

# Optimize using Levenberg-Marquardt optimization
result = gtsam.DoglegOptimizer(graph, initial).optimize()

# Print results
np.set_printoptions(precision=4, suppress=True)
示例#25
0
def main():
    """Main runner."""

    parser = argparse.ArgumentParser(
        description="A 2D Pose SLAM example that reads input from g2o, "
        "converts it to a factor graph and does the optimization. "
        "Output is written on a file, in g2o format")
    parser.add_argument('-i', '--input', help='input file g2o format')
    parser.add_argument(
        '-o',
        '--output',
        help="the path to the output file with optimized graph")
    parser.add_argument('-m',
                        '--maxiter',
                        type=int,
                        help="maximum number of iterations for optimizer")
    parser.add_argument('-k',
                        '--kernel',
                        choices=['none', 'huber', 'tukey'],
                        default="none",
                        help="Type of kernel used")
    parser.add_argument("-p",
                        "--plot",
                        action="store_true",
                        help="Flag to plot results")
    args = parser.parse_args()

    g2oFile = gtsam.findExampleDataFile("noisyToyGraph.txt") if args.input is None\
        else args.input

    maxIterations = 100 if args.maxiter is None else args.maxiter

    is3D = False

    graph, initial = gtsam.readG2o(g2oFile, is3D)

    assert args.kernel == "none", "Supplied kernel type is not yet implemented"

    # Add prior on the pose having index (key) = 0
    priorModel = gtsam.noiseModel.Diagonal.Variances(
        gtsam.Point3(1e-6, 1e-6, 1e-8))
    graph.add(gtsam.PriorFactorPose2(0, gtsam.Pose2(), priorModel))

    params = gtsam.GaussNewtonParams()
    params.setVerbosity("Termination")
    params.setMaxIterations(maxIterations)
    # parameters.setRelativeErrorTol(1e-5)
    # Create the optimizer ...
    optimizer = gtsam.GaussNewtonOptimizer(graph, initial, params)
    # ... and optimize
    result = optimizer.optimize()

    print("Optimization complete")
    print("initial error = ", graph.error(initial))
    print("final error = ", graph.error(result))

    if args.output is None:
        print("\nFactor Graph:\n{}".format(graph))
        print("\nInitial Estimate:\n{}".format(initial))
        print("Final Result:\n{}".format(result))
    else:
        outputFile = args.output
        print("Writing results to file: ", outputFile)
        graphNoKernel, _ = gtsam.readG2o(g2oFile, is3D)
        gtsam.writeG2o(graphNoKernel, result, outputFile)
        print("Done!")

    if args.plot:
        resultPoses = gtsam.utilities.extractPose2(result)
        for i in range(resultPoses.shape[0]):
            plot.plot_pose2(1, gtsam.Pose2(resultPoses[i, :]))
        plt.show()
示例#26
0
def icp(clouda, cloudb, initial_transform=gtsam.Pose3(), max_iterations=25):
    """Runs ICP on two clouds by calling
    all five steps implemented above.
    Iterates until close enough or max
    iterations.

    Returns a series of intermediate clouds
    for visualization purposes.

    Args:
        clouda (ndarray):                point cloud A
        cloudb (ndarray):                point cloud B
        initial_transform (gtsam.Pose3): the initial estimate of transform between clouda and cloudb (step 1 of icp)
        max_iterations (int):            maximum iters of ICP to run before breaking

    Ret:
        bTa (gtsam.Pose3): the final transform
        icp_series (list): visualized icp for debugging
    """
    
    
    icp_series = []
    bTa = initial_transform
    i = 0
    temp = True
    while  i < max_iterations and temp:

      newClTr = transform_cloud(bTa,clouda)
      newCloudb = assign_closest_pairs(newClTr, cloudb)
      transform = estimate_transform(newClTr,newCloudb)

      if transform.equals(gtsam.Pose3.identity(),tol=1e-2:
        temp = False
      else:
        bTa = gtsam.Pose3(np.matmul(bTa.matrix(),transform.matrix()))
        i += 1
        icp_series.append([newClTr, cloudb])
    
    return bTa, icp_series

"""The animation shows how clouda has moved after each iteration of ICP. You should see stationary landmarks, like walls and parked cars, converge onto each other."""

aTb, icp_series = icp(clouda, cloudb)
visualize_clouds_animation(icp_series, speed=400, show_grid_lines=True)

"""ICP is a computationally intense algorithm and we plan to run it between each cloud pair in our 180 clouds dataset. Use the python profiler to identify the computationally expensive subroutines in your algorithm and use numpy to reduce your runtime. The TAs get ~6.5 seconds."""

import cProfile
cProfile.run('icp(clouda, cloudb)')

"""These unit tests will verify the basic functionality of the functions you've implemented in this section. Keep in mind that these are not exhaustive."""

import unittest

class TestICP(unittest.TestCase):

    def setUp(self):
        self.testclouda = np.array([[1], [1], [1]])
        self.testcloudb = np.array([[2, 10], [1, 1], [1, 1]])
        self.testcloudc = np.array([[2], [1], [1]])
        self.testbTa = gtsam.Pose3(gtsam.Rot3(), gtsam.Point3(1, 0, 0))
        self.testcloudd = np.array([[0, 20, 10], [0, 10, 20], [0, 0, 0]])
        self.testcloude = np.array([[10, 30, 20], [10, 20, 30], [0, 0, 0]])

    def test_assign_closest_pairs1(self):
        expected = (3, 1)
        actual = assign_closest_pairs(self.testclouda, self.testcloudb).shape
        self.assertEqual(expected, actual)

    def test_assign_closest_pairs2(self):
        expected = 2
        actual = assign_closest_pairs(self.testclouda, self.testcloudb)[0][0]
        self.assertEqual(expected, actual)

    def test_estimate_transform1(self):
        expected = 1
        actual = estimate_transform(self.testclouda, self.testcloudc).x()
        self.assertEqual(expected, actual)

    def test_estimate_transform2(self):
        expected = 10
        actual = estimate_transform(self.testcloudd, self.testcloude).x()
        self.assertAlmostEqual(expected, actual, places=7)
        actua2 = estimate_transform(self.testcloudd, self.testcloude).y()
        self.assertAlmostEqual(expected, actua2, places=7)

    def test_transform_cloud1(self):
        expected = 2
        actual = transform_cloud(self.testbTa, self.testclouda)[0][0]
        self.assertEqual(expected, actual)

    def test_icp1(self):
        ret = icp(self.testclouda, self.testcloudb)
        expected1 = type(gtsam.Pose3())
        actual1 = type(ret[0])
        self.assertEqual(expected1, actual1)
        expected2 = type([])
        actual2 = type(ret[1])
        self.assertEqual(expected2, actual2)
        expected3 = type([])
        actual3 = type(ret[1][0])
        self.assertEqual(expected3, actual3)

    def test_icp2(self):
        expected = 1
        actual = icp(self.testclouda, self.testcloudb)[0].x()
        self.assertEqual(expected, actual)

    def test_icp3(self):
        expected = 1
        actual = icp(self.testclouda, self.testcloudc)[0].x()
        self.assertEqual(expected, actual)

if __name__ == "__main__":
    unittest.main(argv=['first-arg-is-ignored'], exit=False)

"""# Factor Graph

In this section, we'll build a factor graph to estimate the pose of our vechicle using the transforms our ICP algorithm gives us between frames. These ICP transforms are the factors that tie the pose variables together.

We will be using GTSAM to construct the factor graph as well as perform a optimization for the pose of the car as it travels down the street. Let's start with a simple example first. Recall from PoseSLAM describe in the LIDAR slides how we could add a factor (aka constraint) between two state variables. When we revisited a state, we could add a loop closure. Since the car in our dataset never revisits a previous pose, there is not loop closure. Here is that example from the slides copied here. Note how the graph is initialized and how factors are added.
"""

# # Factor graph example 

# Helper function to create a pose
def vector3(x, y, z):
    """Create 3d double numpy array."""
    return np.array([x, y, z], dtype=np.float)

# Create noise model
priorNoise = gtsam.noiseModel_Diagonal.Sigmas(vector3(0.3, 0.3, 0.1))
model = gtsam.noiseModel_Diagonal.Sigmas(vector3(0.2, 0.2, 0.1))

# Instantiate the factor graph
example_graph = gtsam.NonlinearFactorGraph()

# Adding a prior on the first pose
example_graph.add(gtsam.PriorFactorPose2(1, gtsam.Pose2(0, 0, 0), priorNoise))

# Create odometry (Between) factors between consecutive poses
example_graph.add(gtsam.BetweenFactorPose2( 1, 2, gtsam.Pose2(2, 0, 0), model)) 
example_graph.add(gtsam.BetweenFactorPose2(2, 3, gtsam.Pose2(2, 0, math.pi / 2), model)) 
example_graph.add(gtsam.BetweenFactorPose2(3, 4, gtsam.Pose2(2, 0, math.pi / 2), model)) 
example_graph.add(gtsam.BetweenFactorPose2(4, 5, gtsam.Pose2(2, 0, math.pi / 2), model)) 

# Add the loop closure constraint
example_graph.add(gtsam.BetweenFactorPose2(5, 2, gtsam.Pose2(2, 0, math.pi / 2), model)) 

# Create the initial estimate
example_initial_estimate = gtsam.Values()
example_initial_estimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
example_initial_estimate.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
example_initial_estimate.insert(3, gtsam.Pose2(4.1, 0.1, math.pi / 2))
example_initial_estimate.insert(4, gtsam.Pose2(4.0, 2.0, math.pi))
example_initial_estimate.insert(5, gtsam.Pose2(2.1, 2.1, -math.pi / 2))

# 4. Optimize the initial values using a Gauss-Newton nonlinear optimizer
ex_parameters = gtsam.GaussNewtonParams()
ex_parameters.setRelativeErrorTol(1e-5)
ex_parameters.setMaxIterations(100)
ex_optimizer = gtsam.GaussNewtonOptimizer(example_graph, example_initial_estimate, ex_parameters)
ex_result = ex_optimizer.optimize()
print("Final Result:\n{}".format(ex_result))

# Plot your graph
marginals = gtsam.Marginals(example_graph, ex_result)
fig = plt.figure(0)
for i in range(1, 6):
    gtsam_plot.plot_pose2(0, ex_result.atPose2(i), 0.5, marginals.marginalCovariance(i))

plt.axis('equal')
plt.show()

"""**TODO** [25 points]

You will be using your ICP implementation here to find the transform between two subsequent clouds. These transforms become the factors between pose variables in the graph. So, you will need to go through all the point clouds and run icp pair-wise to find the relative movement of the car. With these transformation, create a factor representing the transform between the pose variables.

We talked about how loop closure helps us consolidate conflicting data into a better global estimate. Unfortunately, our car does not perform a loop closure. So, our graph would just be a long series of poses connected by icp-returned transforms. However, our lidar scans are noisy, which means that our icp-returned transforms are not perfect either. This ultimately results in incorrect vehicle poses and overall map. One way that we can augment our graph is through "skipping". We simply run ICP between every other cloud, and add these skip connections into the graph. You can basically perform ICP between two non-consecutive point clouds and add that transform as a factor in the factor graph.
"""

def populate_factor_graph(graph, initial_estimates, initial_pose, clouds):
    """Populates a gtsam.NonlinearFactorGraph with
    factors between state variables. Populates
    initial_estimates for state variables as well.

    Args:
        graph (gtsam.NonlinearFactorGraph): the factor graph populated with ICP constraints
        initial_estimates (gtsam.Values):   the populated estimates for vehicle poses
        initial_pose (gtsam.Pose3):         the starting pose for the estimates in world coordinates
        clouds (np.ndarray):                the numpy array with all our point clouds
    """
    ICP_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4]))
    factor_pose = initial_pose

    # Add ICP Factors between each pair of clouds
    prev_T = gtsam.Pose3()
    for i in range(len(clouds) - 1):
        # TODO: Run ICP between clouds (hint: use inital_tranform argument)
        bta, icp_series = icp(clouds[i], clouds[i+1], initial_transform=prev_T)
        #T = initial_transform(initial_pose, clouds)

        # TODO: Set T to its inverse: use `gtsam.Pose3.inverse()`
        T = bta.inverse()
        # TODO: Add a `gtsam.BetweenFactorPose3()` to the graph
        graph.add(gtsam.BetweenFactorPose3(i, i+1, T, ICP_NOISE))



        factor_pose = factor_pose.compose(T)
        initial_estimates.insert(i+1, factor_pose)
        print(".", end="")

    # Add skip connections between every other frame
    prev_T = gtsam.Pose3()
    for i in range(0, len(clouds) - 2, 2):
        # TODO: Run ICP between clouds (hint: use inital_tranform argument)
        bta, icp_series = icp(clouds[i], clouds[i+2],initial_transform=prev_T)
        # TODO: Set T to its inverse: use `gtsam.Pose3.inverse()`
        T = bta.inverse()
        # TODO: Add a `gtsam.BetweenFactorPose3()` to the graph
        graph.add(gtsam.BetweenFactorPose3(i, i+2, T, ICP_NOISE))

        print(".", end="")

    return graph, initial_estimates

"""The real power of GTSAM will show here. In five lines, we'll setup a Gauss Newton nonlinear optimizer and optimize for the vehicle's poses in world coordinates.

Note: This cell runs your ICP implementation 180 times. If you've implemented your ICP similarly to the TAs, expect this cell to take 2 minutes. If you're missing the `initial_transform` argument for icp, it may take ~1 hour.
"""

# load in all clouds in our dataset
clouds = read_ply(*scans_fnames)

# Setting up our factor graph
graph = gtsam.NonlinearFactorGraph()
initial_estimates = gtsam.Values()

# We get the initial pose of the car from Argo AI's dataset, and we add it to the graph as such
PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4]))
initial_pose = gtsam.Pose3(gtsam.Rot3(0.9982740, -0.0572837,  0.0129474, 0.0575611,  0.9980955, -0.0221840, -0.0116519,  0.0228910,  0.9996701),
                           gtsam.Point3(-263.9464864482589, 2467.3015467381383, -19.374652610889633))
graph.add(gtsam.PriorFactorPose3(0, initial_pose, PRIOR_NOISE))
initial_estimates.insert(0, initial_pose)

# We'll use your function to populate the factor graph
graph, initial_estimates = populate_factor_graph(graph, initial_estimates, initial_pose, clouds)

# Now optimize for the states
parameters = gtsam.GaussNewtonParams()
parameters.setRelativeErrorTol(1e-5)
parameters.setMaxIterations(100)
optimizer = gtsam.GaussNewtonOptimizer(graph, initial_estimates, parameters)
result = optimizer.optimize()

"""Let's plot these poses to see how our vechicle moves.

Screenshot this for your reflection.
"""

poses_cloud = np.array([[], [], []])
for i in range(len(clouds)):
    poses_cloud = np.hstack([poses_cloud, np.array([[result.atPose3(i).x()], [result.atPose3(i).y()], [result.atPose3(i).z()]])])

init_car_pose = gtsam.Pose3(gtsam.Rot3(0.9982740, -0.0572837,  0.0129474, 0.0575611,  0.9980955, -0.0221840, -0.0116519,  0.0228910,  0.9996701),
                            gtsam.Point3(-263.9464864482589, 2467.3015467381383, -19.374652610889633))
visualize_clouds([poses_cloud, transform_cloud(init_car_pose, clouds[0])], show_grid_lines=True)

"""These unit tests will verify the basic functionality of the function you've implemented in this section. Keep in mind that these are not exhaustive."""

import unittest

class TestFactorGraph(unittest.TestCase):

    def setUp(cls):
      test_clouds = read_ply(*scans_fnames)[:6]
      PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4]))
      ICP_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4]))
      test_graph = gtsam.NonlinearFactorGraph()
      test_initial_estimates = gtsam.Values()
      initial_pose = gtsam.Pose3(gtsam.Rot3(0.9982740, -0.0572837,  0.0129474, 0.0575611,  0.9980955, -0.0221840, -0.0116519,  0.0228910,  0.9996701),
                          gtsam.Point3(-263.9464864482589, 2467.3015467381383, -19.374652610889633))
      test_graph.add(gtsam.PriorFactorPose3(0, initial_pose, PRIOR_NOISE))
      test_initial_estimates.insert(0, initial_pose)
      test_graph, test_initial_estimates = populate_factor_graph(test_graph, test_initial_estimates, initial_pose, test_clouds)
      cls.graph = test_graph
      cls.initial_estimates = test_initial_estimates
    
    def test_graph_params(self):
      self.assertTrue(type(self.graph) == gtsam.NonlinearFactorGraph)
    
    def test_initial_estimates_params(self):
      self.assertTrue(type(self.initial_estimates) == gtsam.Values)

def suite():
  functions = ['test_graph_params', 'test_initial_estimates_params']
  suite = unittest.TestSuite()
  for func in functions:
    suite.addTest(TestFactorGraph(func))
  return suite
    
if __name__ == "__main__":
    runner = unittest.TextTestRunner()
    runner.run(suite())

"""# Mapping

In this section, we'll tackle the mapping component of SLAM (Simulataneous Localization and Mapping). The previous section used a factor graph to localize our vehicle's poses in world coordinates. We'll now use those poses to form a map of the street from the point clouds.

Given the poses and the clouds, this task is easy. We'll use your `transform_cloud` method from the ICP section to transform every other cloud in our dataset to be centered at the corresponding pose where the cloud was captured. Visualizing all of these clouds yields the complete map. We don't use every cloud in our dataset to reduce the amount of noise in our map while retaining plenty of detail.

Screenshot this for your reflection.
"""

cloud_map = []
for i in range(0, len(clouds), 2):
    cloud_map.append(transform_cloud(result.atPose3(i), clouds[i-1]))

visualize_clouds(cloud_map, show_grid_lines=True, single_color="#C6C6C6")

"""# Reflection
示例#27
0
def Pose2SLAM_ISAM2_example():
    """Perform 2D SLAM given the ground truth changes in pose as well as
    simple loop closure detection."""
    plt.ion()

    # Declare the 2D translational standard deviations of the prior factor's Gaussian model, in meters.
    prior_xy_sigma = 0.3

    # Declare the 2D rotational standard deviation of the prior factor's Gaussian model, in degrees.
    prior_theta_sigma = 5

    # Declare the 2D translational standard deviations of the odometry factor's Gaussian model, in meters.
    odometry_xy_sigma = 0.2

    # Declare the 2D rotational standard deviation of the odometry factor's Gaussian model, in degrees.
    odometry_theta_sigma = 5

    # Although this example only uses linear measurements and Gaussian noise models, it is important
    # to note that iSAM2 can be utilized to its full potential during nonlinear optimization. This example
    # simply showcases how iSAM2 may be applied to a Pose2 SLAM problem.
    PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([prior_xy_sigma,
                                                            prior_xy_sigma,
                                                            prior_theta_sigma*np.pi/180]))
    ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([odometry_xy_sigma,
                                                                odometry_xy_sigma,
                                                                odometry_theta_sigma*np.pi/180]))

    # Create a Nonlinear factor graph as well as the data structure to hold state estimates.
    graph = gtsam.NonlinearFactorGraph()
    initial_estimate = gtsam.Values()

    # Create iSAM2 parameters which can adjust the threshold necessary to force relinearization and how many
    # update calls are required to perform the relinearization.
    parameters = gtsam.ISAM2Params()
    parameters.setRelinearizeThreshold(0.1)
    parameters.relinearizeSkip = 1
    isam = gtsam.ISAM2(parameters)

    # Create the ground truth odometry measurements of the robot during the trajectory.
    true_odometry = [(2, 0, 0),
                    (2, 0, math.pi/2),
                    (2, 0, math.pi/2),
                    (2, 0, math.pi/2),
                    (2, 0, math.pi/2)]

    # Corrupt the odometry measurements with gaussian noise to create noisy odometry measurements.
    odometry_measurements = [np.random.multivariate_normal(true_odom, ODOMETRY_NOISE.covariance())
                                for true_odom in true_odometry]

    # Add the prior factor to the factor graph, and poorly initialize the prior pose to demonstrate
    # iSAM2 incremental optimization.
    graph.push_back(gtsam.PriorFactorPose2(1, gtsam.Pose2(0, 0, 0), PRIOR_NOISE))
    initial_estimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))

    # Initialize the current estimate which is used during the incremental inference loop.
    current_estimate = initial_estimate

    for i in range(len(true_odometry)):

        # Obtain the noisy odometry that is received by the robot and corrupted by gaussian noise.
        noisy_odom_x, noisy_odom_y, noisy_odom_theta = odometry_measurements[i]

        # Determine if there is loop closure based on the odometry measurement and the previous estimate of the state.
        loop = determine_loop_closure(odometry_measurements[i], current_estimate, i, xy_tol=0.8, theta_tol=25)

        # Add a binary factor in between two existing states if loop closure is detected.
        # Otherwise, add a binary factor between a newly observed state and the previous state.
        if loop:
            graph.push_back(gtsam.BetweenFactorPose2(i + 1, loop, 
                gtsam.Pose2(noisy_odom_x, noisy_odom_y, noisy_odom_theta), ODOMETRY_NOISE))
        else:
            graph.push_back(gtsam.BetweenFactorPose2(i + 1, i + 2, 
                gtsam.Pose2(noisy_odom_x, noisy_odom_y, noisy_odom_theta), ODOMETRY_NOISE))

            # Compute and insert the initialization estimate for the current pose using the noisy odometry measurement.
            computed_estimate = current_estimate.atPose2(i + 1).compose(gtsam.Pose2(noisy_odom_x,
                                                                                    noisy_odom_y,
                                                                                    noisy_odom_theta))
            initial_estimate.insert(i + 2, computed_estimate)

        # Perform incremental update to iSAM2's internal Bayes tree, optimizing only the affected variables.
        isam.update(graph, initial_estimate)
        current_estimate = isam.calculateEstimate()

        # Report all current state estimates from the iSAM2 optimzation.
        report_on_progress(graph, current_estimate, i)
        initial_estimate.clear()

    # Print the final covariance matrix for each pose after completing inference on the trajectory.
    marginals = gtsam.Marginals(graph, current_estimate)
    i = 1
    for i in range(1, len(true_odometry)+1):
        print(f"X{i} covariance:\n{marginals.marginalCovariance(i)}\n")
    
    plt.ioff()
    plt.show()
    def test_FixedLagSmootherExample(self):
        '''
        Simple test that checks for equality between C++ example
        file and the Python implementation. See
        gtsam_unstable/examples/FixedLagSmootherExample.cpp
        '''
        # Define a batch fixed lag smoother, which uses
        # Levenberg-Marquardt to perform the nonlinear optimization
        lag = 2.0
        smoother_batch = gtsam_unstable.BatchFixedLagSmoother(lag)

        # Create containers to store the factors and linearization points
        # that will be sent to the smoothers
        new_factors = gtsam.NonlinearFactorGraph()
        new_values = gtsam.Values()
        new_timestamps = gtsam_unstable.FixedLagSmootherKeyTimestampMap()

        # Create  a prior on the first pose, placing it at the origin
        prior_mean = gtsam.Pose2(0, 0, 0)
        prior_noise = gtsam.noiseModel_Diagonal.Sigmas(
            np.array([0.3, 0.3, 0.1]))
        X1 = 0
        new_factors.push_back(
            gtsam.PriorFactorPose2(X1, prior_mean, prior_noise))
        new_values.insert(X1, prior_mean)
        new_timestamps.insert(_timestamp_key_value(X1, 0.0))

        delta_time = 0.25
        time = 0.25

        i = 0

        ground_truth = [
            gtsam.Pose2(0.995821, 0.0231012, 0.0300001),
            gtsam.Pose2(1.49284, 0.0457247, 0.045),
            gtsam.Pose2(1.98981, 0.0758879, 0.06),
            gtsam.Pose2(2.48627, 0.113502, 0.075),
            gtsam.Pose2(2.98211, 0.158558, 0.09),
            gtsam.Pose2(3.47722, 0.211047, 0.105),
            gtsam.Pose2(3.97149, 0.270956, 0.12),
            gtsam.Pose2(4.4648, 0.338272, 0.135),
            gtsam.Pose2(4.95705, 0.41298, 0.15),
            gtsam.Pose2(5.44812, 0.495063, 0.165),
            gtsam.Pose2(5.9379, 0.584503, 0.18),
        ]

        # Iterates from 0.25s to 3.0s, adding 0.25s each loop
        # In each iteration, the agent moves at a constant speed
        # and its two odometers measure the change. The smoothed
        # result is then compared to the ground truth
        while time <= 3.0:
            previous_key = 1000 * (time - delta_time)
            current_key = 1000 * time

            # assign current key to the current timestamp
            new_timestamps.insert(_timestamp_key_value(current_key, time))

            # Add a guess for this pose to the new values
            # Assume that the robot moves at 2 m/s. Position is time[s] *
            # 2[m/s]
            current_pose = gtsam.Pose2(time * 2, 0, 0)
            new_values.insert(current_key, current_pose)

            # Add odometry factors from two different sources with different
            # error stats
            odometry_measurement_1 = gtsam.Pose2(0.61, -0.08, 0.02)
            odometry_noise_1 = gtsam.noiseModel_Diagonal.Sigmas(
                np.array([0.1, 0.1, 0.05]))
            new_factors.push_back(
                gtsam.BetweenFactorPose2(previous_key, current_key,
                                         odometry_measurement_1,
                                         odometry_noise_1))

            odometry_measurement_2 = gtsam.Pose2(0.47, 0.03, 0.01)
            odometry_noise_2 = gtsam.noiseModel_Diagonal.Sigmas(
                np.array([0.05, 0.05, 0.05]))
            new_factors.push_back(
                gtsam.BetweenFactorPose2(previous_key, current_key,
                                         odometry_measurement_2,
                                         odometry_noise_2))

            # Update the smoothers with the new factors. In this case,
            # one iteration must pass for Levenberg-Marquardt to accurately
            # estimate
            if time >= 0.50:
                smoother_batch.update(new_factors, new_values, new_timestamps)

                estimate = smoother_batch.calculateEstimatePose2(current_key)
                self.assertTrue(estimate.equals(ground_truth[i], 1e-4))
                i += 1

                new_timestamps.clear()
                new_values.clear()
                new_factors.resize(0)

            time += delta_time
示例#29
0
def main():
    """Main runner."""
    # Create noise models
    PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(gtsam.Point3(0.3, 0.3, 0.1))
    ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(
        gtsam.Point3(0.2, 0.2, 0.1))

    # 1. Create a factor graph container and add factors to it
    graph = gtsam.NonlinearFactorGraph()

    # 2a. Add a prior on the first pose, setting it to the origin
    # A prior factor consists of a mean and a noise ODOMETRY_NOISE (covariance matrix)
    graph.add(gtsam.PriorFactorPose2(1, gtsam.Pose2(0, 0, 0), PRIOR_NOISE))

    # 2b. Add odometry factors
    # Create odometry (Between) factors between consecutive poses
    graph.add(
        gtsam.BetweenFactorPose2(1, 2, gtsam.Pose2(2, 0, 0), ODOMETRY_NOISE))
    graph.add(
        gtsam.BetweenFactorPose2(2, 3, gtsam.Pose2(2, 0, math.pi / 2),
                                 ODOMETRY_NOISE))
    graph.add(
        gtsam.BetweenFactorPose2(3, 4, gtsam.Pose2(2, 0, math.pi / 2),
                                 ODOMETRY_NOISE))
    graph.add(
        gtsam.BetweenFactorPose2(4, 5, gtsam.Pose2(2, 0, math.pi / 2),
                                 ODOMETRY_NOISE))

    # 2c. Add the loop closure constraint
    # This factor encodes the fact that we have returned to the same pose. In real
    # systems, these constraints may be identified in many ways, such as appearance-based
    # techniques with camera images. We will use another Between Factor to enforce this constraint:
    graph.add(
        gtsam.BetweenFactorPose2(5, 2, gtsam.Pose2(2, 0, math.pi / 2),
                                 ODOMETRY_NOISE))
    print("\nFactor Graph:\n{}".format(graph))  # print

    # 3. Create the data structure to hold the initial_estimate estimate to the
    # solution. For illustrative purposes, these have been deliberately set to incorrect values
    initial_estimate = gtsam.Values()
    initial_estimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
    initial_estimate.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
    initial_estimate.insert(3, gtsam.Pose2(4.1, 0.1, math.pi / 2))
    initial_estimate.insert(4, gtsam.Pose2(4.0, 2.0, math.pi))
    initial_estimate.insert(5, gtsam.Pose2(2.1, 2.1, -math.pi / 2))
    print("\nInitial Estimate:\n{}".format(initial_estimate))  # print

    # 4. Optimize the initial values using a Gauss-Newton nonlinear optimizer
    # The optimizer accepts an optional set of configuration parameters,
    # controlling things like convergence criteria, the type of linear
    # system solver to use, and the amount of information displayed during
    # optimization. We will set a few parameters as a demonstration.
    parameters = gtsam.GaussNewtonParams()

    # Stop iterating once the change in error between steps is less than this value
    parameters.setRelativeErrorTol(1e-5)
    # Do not perform more than N iteration steps
    parameters.setMaxIterations(100)
    # Create the optimizer ...
    optimizer = gtsam.GaussNewtonOptimizer(graph, initial_estimate, parameters)
    # ... and optimize
    result = optimizer.optimize()
    print("Final Result:\n{}".format(result))

    # 5. Calculate and print marginal covariances for all variables
    marginals = gtsam.Marginals(graph, result)
    for i in range(1, 6):
        print("X{} covariance:\n{}\n".format(i,
                                             marginals.marginalCovariance(i)))

    for i in range(1, 6):
        gtsam_plot.plot_pose2(0, result.atPose2(i), 0.5,
                              marginals.marginalCovariance(i))

    plt.axis('equal')
    plt.show()
def run(args):
    """Test Dogleg vs LM, inspired by issue #452."""

    # print parameters
    print("num samples = {}, deltaInitial = {}".format(args.num_samples,
                                                       args.delta))

    # Ground truth solution
    T11 = gtsam.Pose2(0, 0, 0)
    T12 = gtsam.Pose2(1, 0, 0)
    T21 = gtsam.Pose2(0, 1, 0)
    T22 = gtsam.Pose2(1, 1, 0)

    # Factor graph
    graph = gtsam.NonlinearFactorGraph()

    # Priors
    prior = gtsam.noiseModel.Isotropic.Sigma(3, 1)
    graph.add(gtsam.PriorFactorPose2(11, T11, prior))
    graph.add(gtsam.PriorFactorPose2(21, T21, prior))

    # Odometry
    model = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.01, 0.01, 0.3]))
    graph.add(gtsam.BetweenFactorPose2(11, 12, T11.between(T12), model))
    graph.add(gtsam.BetweenFactorPose2(21, 22, T21.between(T22), model))

    # Range
    model_rho = gtsam.noiseModel.Isotropic.Sigma(1, 0.01)
    graph.add(gtsam.RangeFactorPose2(12, 22, 1.0, model_rho))

    params = gtsam.DoglegParams()
    params.setDeltaInitial(args.delta)  # default is 10

    # Add progressively more noise to ground truth
    sigmas = [0.01, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20]
    n = len(sigmas)
    p_dl, s_dl, p_lm, s_lm = [0] * n, [0] * n, [0] * n, [0] * n
    for i, sigma in enumerate(sigmas):
        dl_fails, lm_fails = 0, 0
        # Attempt num_samples optimizations for both DL and LM
        for _attempt in range(args.num_samples):
            initial = gtsam.Values()
            initial.insert(11, T11.retract(np.random.normal(0, sigma, 3)))
            initial.insert(12, T12.retract(np.random.normal(0, sigma, 3)))
            initial.insert(21, T21.retract(np.random.normal(0, sigma, 3)))
            initial.insert(22, T22.retract(np.random.normal(0, sigma, 3)))

            # Run dogleg optimizer
            dl = gtsam.DoglegOptimizer(graph, initial, params)
            result = dl.optimize()
            dl_fails += graph.error(result) > 1e-9

            # Run
            lm = gtsam.LevenbergMarquardtOptimizer(graph, initial)
            result = lm.optimize()
            lm_fails += graph.error(result) > 1e-9

        # Calculate Bayes estimate of success probability
        # using a beta prior of alpha=0.5, beta=0.5
        alpha, beta = 0.5, 0.5
        v = args.num_samples + alpha + beta
        p_dl[i] = (args.num_samples - dl_fails + alpha) / v
        p_lm[i] = (args.num_samples - lm_fails + alpha) / v

        def stddev(p):
            """Calculate standard deviation."""
            return math.sqrt(p * (1 - p) / (1 + v))

        s_dl[i] = stddev(p_dl[i])
        s_lm[i] = stddev(p_lm[i])

        fmt = "sigma= {}:\tDL success {:.2f}% +/- {:.2f}%, LM success {:.2f}% +/- {:.2f}%"
        print(
            fmt.format(sigma, 100 * p_dl[i], 100 * s_dl[i], 100 * p_lm[i],
                       100 * s_lm[i]))

    if args.plot:
        fig, ax = plt.subplots()
        dl_plot = plt.errorbar(sigmas, p_dl, yerr=s_dl, label="Dogleg")
        lm_plot = plt.errorbar(sigmas, p_lm, yerr=s_lm, label="LM")
        plt.title("Dogleg emprical success vs. LM")
        plt.legend(handles=[dl_plot, lm_plot])
        ax.set_xlim(0, sigmas[-1] + 1)
        ax.set_ylim(0, 1)
        plt.show()