Esempio n. 1
0
def report_on_progress(graph: gtsam.NonlinearFactorGraph, current_estimate: gtsam.Values,
                        key: int):
    """Print and plot incremental progress of the robot for 2D Pose SLAM using iSAM2."""

    # Print the current estimates computed using iSAM2.
    print("*"*50 + f"\nInference after State {key+1}:\n")
    print(current_estimate)

    # Compute the marginals for all states in the graph.
    marginals = gtsam.Marginals(graph, current_estimate)

    # Plot the newly updated iSAM2 inference.
    fig = plt.figure(0)
    axes = fig.gca()
    plt.cla()

    i = 1
    while current_estimate.exists(i):
        gtsam_plot.plot_pose2(0, current_estimate.atPose2(i), 0.5, marginals.marginalCovariance(i))
        i += 1

    plt.axis('equal')
    axes.set_xlim(-1, 5)
    axes.set_ylim(-1, 3)
    plt.pause(1)
Esempio n. 2
0
    def test_Pose2SLAMExample(self):
        # Assumptions
        #  - All values are axis aligned
        #  - Robot poses are facing along the X axis (horizontal, to the right in images)
        #  - We have full odometry for measurements
        #  - The robot is on a grid, moving 2 meters each step

        # Create graph container and add factors to it
        graph = gtsam.NonlinearFactorGraph()

        # Add prior
        # gaussian for prior
        priorMean = gtsam.Pose2(0.0, 0.0, 0.0)  # prior at origin
        priorNoise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3,
                                                                0.1]))
        # add directly to graph
        graph.add(gtsam.PriorFactorPose2(1, priorMean, priorNoise))

        # Add odometry
        # general noisemodel for odometry
        odometryNoise = gtsam.noiseModel.Diagonal.Sigmas(
            np.array([0.2, 0.2, 0.1]))
        graph.add(
            gtsam.BetweenFactorPose2(1, 2, gtsam.Pose2(2.0, 0.0, 0.0),
                                     odometryNoise))
        graph.add(
            gtsam.BetweenFactorPose2(2, 3, gtsam.Pose2(2.0, 0.0, pi / 2),
                                     odometryNoise))
        graph.add(
            gtsam.BetweenFactorPose2(3, 4, gtsam.Pose2(2.0, 0.0, pi / 2),
                                     odometryNoise))
        graph.add(
            gtsam.BetweenFactorPose2(4, 5, gtsam.Pose2(2.0, 0.0, pi / 2),
                                     odometryNoise))

        # Add pose constraint
        model = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1]))
        graph.add(
            gtsam.BetweenFactorPose2(5, 2, gtsam.Pose2(2.0, 0.0, pi / 2),
                                     model))

        # Initialize to noisy points
        initialEstimate = gtsam.Values()
        initialEstimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
        initialEstimate.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
        initialEstimate.insert(3, gtsam.Pose2(4.1, 0.1, pi / 2))
        initialEstimate.insert(4, gtsam.Pose2(4.0, 2.0, pi))
        initialEstimate.insert(5, gtsam.Pose2(2.1, 2.1, -pi / 2))

        # Optimize using Levenberg-Marquardt optimization with an ordering from
        # colamd
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate)
        result = optimizer.optimizeSafely()

        # Plot Covariance Ellipses
        marginals = gtsam.Marginals(graph, result)
        P = marginals.marginalCovariance(1)

        pose_1 = result.atPose2(1)
        self.gtsamAssertEquals(pose_1, gtsam.Pose2(), 1e-4)
    def action_step(self, action, action_noise):

        # Setting path and initiating DA realization.
        path_length = len(self.daRealization)
        self.daRealization.append(0)

        # Advancing the belief by camera
        self.graph.add(
            gtsam.BetweenFactorPose3(self.X(path_length),
                                     self.X(path_length + 1), action,
                                     action_noise))

        # Setting initial values, save an initial value vector for the propagated pose, need it later
        if self.initial.exists(self.X(path_length + 1)) is False:
            self.initial.insert(
                self.X(path_length + 1),
                self.initial.atPose3(self.X(path_length)).compose(action))

        # Setting propagated belief object for weight computation
        self.prop_belief = self.graph.clone()
        self.prop_initial = gtsam.Values()
        for key in list(self.initial.keys()):
            self.prop_initial.insert(key, self.initial.atPose3(key))

        # self.optimize() #TODO: SEE IF WORKS
        self.optimize(graph_to_optimize=self.prop_belief)

        # non-existent object issue is resolved.
        self.marginals = gtsam.Marginals(self.prop_belief, self.result)

        # Setting resultsFlag to false to require optimization
        self.resultsFlag = False

        # Multi robot flag for twin measurements
        self.sim_measurement_flag = list()
    def lambda_covariance_numeric(self, key, number_of_samples=20):

        marginals = gtsam.Marginals(self.graph, self.result)
        covariance = marginals.marginalCovariance(self.Lam(key))
        exp_point = self.result.atVector(self.Lam(key))
        exp = exp_point

        exp_lambda = np.zeros(len(exp) + 1)
        cov_lambda = np.zeros((len(exp) + 1, len(exp) + 1))

        for idx in range(number_of_samples):

            sample = np.random.multivariate_normal(exp, covariance)
            sample_aug = np.concatenate((sample, 0), axis=None)
            sample_cpv = np.exp(sample_aug) / (np.sum(np.exp(sample_aug)))
            exp_lambda += sample_cpv / number_of_samples

        for idx in range(number_of_samples):

            sample = np.random.multivariate_normal(exp, covariance)
            sample_aug = np.concatenate((sample, 0), axis=None)
            sample_cpv = np.exp(sample_aug) / (np.sum(np.exp(sample_aug)))
            #print(sample_cpv)
            cov_lambda += np.outer(sample_cpv - exp_lambda,
                                   sample_cpv - exp_lambda) / number_of_samples

        return cov_lambda
    def lambda_entropy_individual_numeric(self,
                                          key,
                                          number_of_samples=20,
                                          entropy_lower_limit=None):

        marginals = gtsam.Marginals(self.graph, self.result)
        covariance = marginals.marginalCovariance(self.Lam(key))
        exp_point = self.result.atVector(self.Lam(key))
        #exp = np.array([exp_point[0], exp_point[1]])
        exp = exp_point

        for idx in range(len(exp)):
            if exp[idx] > 250:
                exp[idx] = 250
            if exp[idx] < -250:
                exp[idx] = 250

        entropy = 0
        for idx in range(number_of_samples):

            sample = np.random.multivariate_normal(exp, covariance)
            sample_aug = np.concatenate((sample, 0), axis=None)
            sample_cpv = np.exp(sample_aug) / (np.sum(np.exp(sample_aug)))
            log_pdf_value = np.log(stats.multivariate_normal.pdf(sample, exp, covariance)) - \
                        np.sum(np.log(sample_cpv))
            entropy -= log_pdf_value / number_of_samples

        if entropy_lower_limit is not None and entropy < entropy_lower_limit:
            entropy = entropy_lower_limit

        return entropy
Esempio n. 6
0
    def bundle_adjustment(self):
        """
        Parameters:
            calibration - gtsam.Cal3_S2, camera calibration
            landmark_map - list, A map of landmarks and their correspondence
        """
        # Initialize factor Graph
        graph = gtsam.NonlinearFactorGraph()

        initial_estimate = self.create_initial_estimate()

        # Create Projection Factors
        # """
        #   Measurement noise for bundle adjustment:
        #   sigma = 1.0
        #   measurement_noise = gtsam.noiseModel_Isotropic.Sigma(2, sigma)
        # """
        for landmark_idx, observation_list in enumerate(self._landmark_map):
            for obersvation in observation_list:
                pose_idx = obersvation[0]
                key_point = obersvation[1]
                # To test indeterminate system
                # if(landmark_idx == 2 or landmark_idx == 480):
                #     continue
                # if(pose_idx == 6):
                #     print("yes2")
                #     continue
                graph.add(
                    gtsam.GenericProjectionFactorCal3_S2(
                        key_point, self._measurement_noise, X(pose_idx),
                        P(landmark_idx), self._calibration))

        # Create priors for first two poses
        # """
        #   Pose prior noise:
        #   rotation_sigma = np.radians(60)
        #   translation_sigma = 1
        #   pose_noise_sigmas = np.array([rotation_sigma, rotation_sigma, rotation_sigma,
        #                             translation_sigma, translation_sigma, translation_sigma])
        # """
        for idx in (0, 1):
            pose_i = initial_estimate.atPose3(X(idx))
            graph.add(
                gtsam.PriorFactorPose3(X(idx), pose_i, self._pose_prior_noise))

        # Optimization
        # Using QR rather than Cholesky decomposition
        # params = gtsam.LevenbergMarquardtParams()
        # params.setLinearSolverType("MULTIFRONTAL_QR")
        # optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate, params)
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate)

        sfm_result = optimizer.optimize()
        # Check if factor covariances are under constrain
        marginals = gtsam.Marginals(  # pylint: disable=unused-variable
            graph, sfm_result)
        return sfm_result
Esempio n. 7
0
    def test_SFMExample(self):
        options = generator.Options()
        options.triangle = False
        options.nrCameras = 10

        [data, truth] = generator.generate_data(options)

        measurementNoiseSigma = 1.0
        pointNoiseSigma = 0.1
        poseNoiseSigmas = np.array([0.001, 0.001, 0.001, 0.1, 0.1, 0.1])

        graph = gtsam.NonlinearFactorGraph()

        # Add factors for all measurements
        measurementNoise = Isotropic.Sigma(2, measurementNoiseSigma)
        for i in range(len(data.Z)):
            for k in range(len(data.Z[i])):
                j = data.J[i][k]
                graph.add(gtsam.GenericProjectionFactorCal3_S2(
                    data.Z[i][k], measurementNoise,
                    X(i), P(j), data.K))

        posePriorNoise = Diagonal.Sigmas(poseNoiseSigmas)
        graph.add(gtsam.PriorFactorPose3(X(0),
                                   truth.cameras[0].pose(), posePriorNoise))
        pointPriorNoise = Isotropic.Sigma(3, pointNoiseSigma)
        graph.add(gtsam.PriorFactorPoint3(P(0),
                                    truth.points[0], pointPriorNoise))

        # Initial estimate
        initialEstimate = gtsam.Values()
        for i in range(len(truth.cameras)):
            pose_i = truth.cameras[i].pose()
            initialEstimate.insert(X(i), pose_i)
        for j in range(len(truth.points)):
            point_j = truth.points[j]
            initialEstimate.insert(P(j), point_j)

        # Optimization
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate)
        for i in range(5):
            optimizer.iterate()
        result = optimizer.values()

        # Marginalization
        marginals = gtsam.Marginals(graph, result)
        marginals.marginalCovariance(P(0))
        marginals.marginalCovariance(X(0))

        # Check optimized results, should be equal to ground truth
        for i in range(len(truth.cameras)):
            pose_i = result.atPose3(X(i))
            self.gtsamAssertEquals(pose_i, truth.cameras[i].pose(), 1e-5)

        for j in range(len(truth.points)):
            point_j = result.atPoint3(P(j))
            self.gtsamAssertEquals(point_j, truth.points[j], 1e-5)
def build_graph():

    print("build_graph !!!")
    # Create noise models
    ODOMETRY_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2,
                                                                0.1]))
    PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1]))

    # Create an empty nonlinear factor graph
    graph = gtsam.NonlinearFactorGraph()

    # Add a prior on the first pose, setting it to the origin
    # A prior factor consists of a mean and a noise model (covariance matrix)
    priorMean = gtsam.Pose2(0.0, 0.0, 0.0)  # prior at origin
    graph.add(gtsam.PriorFactorPose2(1, priorMean, PRIOR_NOISE))

    # Add odometry factors
    odometry = gtsam.Pose2(2.0, 0.0, 0.0)
    odometry1 = gtsam.Pose2(edge_list[0].position.x, edge_list[0].position.y,
                            edge_list[0].position.z)
    odometry2 = gtsam.Pose2(edge_list[1].position.x, edge_list[1].position.y,
                            edge_list[1].position.z)

    # For simplicity, we will use the same noise model for each odometry factor
    # Create odometry (Between) factors between consecutive poses
    graph.add(gtsam.BetweenFactorPose2(1, 2, odometry1, ODOMETRY_NOISE))
    graph.add(gtsam.BetweenFactorPose2(2, 3, odometry2, ODOMETRY_NOISE))
    print("\nFactor Graph:\n{}".format(graph))

    # Create the data structure to hold the initialEstimate estimate to the solution
    # For illustrative purposes, these have been deliberately set to incorrect values
    initial = gtsam.Values()
    initial.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
    initial.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
    initial.insert(3, gtsam.Pose2(4.1, 0.1, 0.1))
    print("\nInitial Estimate:\n{}".format(initial))

    # optimize using Levenberg-Marquardt optimization
    params = gtsam.LevenbergMarquardtParams()
    optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
    result = optimizer.optimize()
    print("\nFinal Result:\n{}".format(result))

    # 5. Calculate and print marginal covariances for all variables
    marginals = gtsam.Marginals(graph, result)
    for i in range(1, 4):
        print("X{} covariance:\n{}\n".format(i,
                                             marginals.marginalCovariance(i)))

    fig = plt.figure(0)
    for i in range(1, 4):
        gtsam_plot.plot_pose2(0, result.atPose2(i), 0.5,
                              marginals.marginalCovariance(i))
    plt.axis('equal')
    plt.show()
Esempio n. 9
0
    def test_linearMarginalization(self):
        """Marginalize a linear factor graph"""
        graph, X = create_graph()
        result = graph.optimize()

        EXPECTEDM = [1, 2, 3]

        # linear factor graph marginalize
        marginals = gtsam.Marginals(graph, result)
        m = [marginals.marginalCovariance(x) for x in X]

        # check linear marginalizations
        self.assertAlmostEqual(EXPECTEDM[0], m[0], delta=1e-8)
        self.assertAlmostEqual(EXPECTEDM[1], m[1], delta=1e-8)
        self.assertAlmostEqual(EXPECTEDM[2], m[2], delta=1e-8)
    def lambda_entropy_individual_bounds(self, key, entropy_lower_limit=None):

        marginals = gtsam.Marginals(self.graph, self.result)
        covariance = marginals.marginalCovariance(self.Lam(key))
        exp_point = self.result.atVector(self.Lam(key))
        #exp = np.array([exp_point[0], exp_point[1]])
        exp = exp_point
        num_of_classes = self.lambda_prior_mean.size + 1
        entropy_upper = 0.5 * np.log(np.linalg.det(2 * np.exp(1) * np.pi * covariance)) \
                        + np.sum(exp) - num_of_classes * np.max(np.abs(exp))
        entropy_lower = entropy_upper - num_of_classes * np.log(num_of_classes)

        if entropy_lower_limit is not None and entropy_upper < entropy_lower_limit:
            entropy_upper = entropy_lower_limit
        if entropy_lower_limit is not None and entropy_lower < entropy_lower_limit:
            entropy_lower = entropy_lower_limit

        return entropy_upper, entropy_lower
Esempio n. 11
0
    def test_convertNonlinear(self):
        """Test converting a linear factor graph to a nonlinear one"""
        graph, X = create_graph()

        EXPECTEDM = [1, 2, 3]

        # create nonlinear factor graph for marginalization
        nfg = gtsam.LinearContainerFactor.ConvertLinearGraph(graph)
        optimizer = gtsam.LevenbergMarquardtOptimizer(nfg, gtsam.Values())
        nlresult = optimizer.optimizeSafely()

        # marginalize
        marginals = gtsam.Marginals(nfg, nlresult)
        m = [marginals.marginalCovariance(x) for x in X]

        # check linear marginalizations
        self.assertAlmostEqual(EXPECTEDM[0], m[0], delta=1e-8)
        self.assertAlmostEqual(EXPECTEDM[1], m[1], delta=1e-8)
        self.assertAlmostEqual(EXPECTEDM[2], m[2], delta=1e-8)
Esempio n. 12
0
    def test_LocalizationExample(self):
        # Create the graph (defined in pose2SLAM.h, derived from
        # NonlinearFactorGraph)
        graph = gtsam.NonlinearFactorGraph()

        # Add two odometry factors
        # create a measurement for both factors (the same in this case)
        odometry = gtsam.Pose2(2.0, 0.0, 0.0)
        odometryNoise = gtsam.noiseModel.Diagonal.Sigmas(
            np.array([0.2, 0.2, 0.1]))  # 20cm std on x,y, 0.1 rad on theta
        graph.add(gtsam.BetweenFactorPose2(0, 1, odometry, odometryNoise))
        graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, odometryNoise))

        # Add three "GPS" measurements
        # We use Pose2 Priors here with high variance on theta
        groundTruth = gtsam.Values()
        groundTruth.insert(0, gtsam.Pose2(0.0, 0.0, 0.0))
        groundTruth.insert(1, gtsam.Pose2(2.0, 0.0, 0.0))
        groundTruth.insert(2, gtsam.Pose2(4.0, 0.0, 0.0))
        model = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.1, 10.]))
        for i in range(3):
            graph.add(gtsam.PriorFactorPose2(i, groundTruth.atPose2(i), model))

        # Initialize to noisy points
        initialEstimate = gtsam.Values()
        initialEstimate.insert(0, gtsam.Pose2(0.5, 0.0, 0.2))
        initialEstimate.insert(1, gtsam.Pose2(2.3, 0.1, -0.2))
        initialEstimate.insert(2, gtsam.Pose2(4.1, 0.1, 0.1))

        # Optimize using Levenberg-Marquardt optimization with an ordering from
        # colamd
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate)
        result = optimizer.optimizeSafely()

        # Plot Covariance Ellipses
        marginals = gtsam.Marginals(graph, result)
        P = [None] * result.size()
        for i in range(0, result.size()):
            pose_i = result.atPose2(i)
            self.gtsamAssertEquals(pose_i, groundTruth.atPose2(i), 1e-4)
            P[i] = marginals.marginalCovariance(i)
    def print_results(self, show_entropy=False):

        # Optimizing if not done before
        if self.resultsFlag is False:
            self.optimize()

        # Printing results
        print("\nFactor Graph:\n-----------------\n\n")
        print(self.graph)
        print("\nInitial Estimate:\n----------------\n\n")
        print(self.initial)
        print("\nFinal Result:\n----------------\n\n")
        print(self.result)

        marginals = gtsam.Marginals(self.graph, self.result)
        if self.cls_enable is True:
            for obj in self.object_lambda_dict:
                print("\nLambda Expectation of object " + str(obj) + ":")
                print(
                    self.result.atVector(self.Lam(
                        self.object_lambda_dict[obj])))
                print("\nLambda Covariance of object " + str(obj) + ":")
                print(
                    marginals.marginalCovariance(
                        self.Lam(self.object_lambda_dict[obj])))
                print("\n")

                if show_entropy is True:
                    entropy_upper, entropy_lower = self.lambda_entropy_individual_bounds(
                        self.object_lambda_dict[obj])
                    entropy_num = self.lambda_entropy_individual_numeric(
                        self.object_lambda_dict[obj])
                    print("Entropy upper limit: " + str(entropy_upper))
                    print("Entropy numerical value: " + str(entropy_num))
                    print("Entropy lower limit: " + str(entropy_lower))

        print("\n**-----------------------------------------")
Esempio n. 14
0
    def test_OdometryExample(self):
        # Create the graph (defined in pose2SLAM.h, derived from
        # NonlinearFactorGraph)
        graph = gtsam.NonlinearFactorGraph()

        # Add a Gaussian prior on pose x_1
        priorMean = gtsam.Pose2(0.0, 0.0, 0.0)  # prior mean is at origin
        priorNoise = gtsam.noiseModel_Diagonal.Sigmas(np.array(
            [0.3, 0.3, 0.1]))  # 30cm std on x,y, 0.1 rad on theta
        # add directly to graph
        graph.add(gtsam.PriorFactorPose2(1, priorMean, priorNoise))

        # Add two odometry factors
        # create a measurement for both factors (the same in this case)
        odometry = gtsam.Pose2(2.0, 0.0, 0.0)
        odometryNoise = gtsam.noiseModel_Diagonal.Sigmas(
            np.array([0.2, 0.2, 0.1]))  # 20cm std on x,y, 0.1 rad on theta
        graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, odometryNoise))
        graph.add(gtsam.BetweenFactorPose2(2, 3, odometry, odometryNoise))

        # Initialize to noisy points
        initialEstimate = gtsam.Values()
        initialEstimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
        initialEstimate.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
        initialEstimate.insert(3, gtsam.Pose2(4.1, 0.1, 0.1))

        # Optimize using Levenberg-Marquardt optimization with an ordering from
        # colamd
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate)
        result = optimizer.optimizeSafely()
        marginals = gtsam.Marginals(graph, result)
        marginals.marginalCovariance(1)

        # Check first pose equality
        pose_1 = result.atPose2(1)
        self.assertTrue(pose_1.equals(gtsam.Pose2(), 1e-4))
Esempio n. 15
0
example_initial_estimate.insert(3, gtsam.Pose2(4.1, 0.1, math.pi / 2))
example_initial_estimate.insert(4, gtsam.Pose2(4.0, 2.0, math.pi))
example_initial_estimate.insert(5, gtsam.Pose2(2.1, 2.1, -math.pi / 2))

# 4. Optimize the initial values using a Gauss-Newton nonlinear optimizer
ex_parameters = gtsam.GaussNewtonParams()
ex_parameters.setRelativeErrorTol(1e-5)
ex_parameters.setMaxIterations(100)
ex_optimizer = gtsam.GaussNewtonOptimizer(example_graph,
                                          example_initial_estimate,
                                          ex_parameters)
ex_result = ex_optimizer.optimize()
print("Final Result:\n{}".format(ex_result))

# Plot your graph
marginals = gtsam.Marginals(example_graph, ex_result)
fig = plt.figure(0)
for i in range(1, 6):
    gtsam_plot.plot_pose2(0, ex_result.atPose2(i), 0.5,
                          marginals.marginalCovariance(i))

plt.axis('equal')
plt.show()
"""**TODO** [25 points]

You will be using your ICP implementation here to find the transform between two subsequent clouds. These transforms become the factors between pose variables in the graph. So, you will need to go through all the point clouds and run icp pair-wise to find the relative movement of the car. With these transformation, create a factor representing the transform between the pose variables.

We talked about how loop closure helps us consolidate conflicting data into a better global estimate. Unfortunately, our car does not perform a loop closure. So, our graph would just be a long series of poses connected by icp-returned transforms. However, our lidar scans are noisy, which means that our icp-returned transforms are not perfect either. This ultimately results in incorrect vehicle poses and overall map. One way that we can augment our graph is through "skipping". We simply run ICP between every other cloud, and add these skip connections into the graph. You can basically perform ICP between two non-consecutive point clouds and add that transform as a factor in the factor graph.
"""

Esempio n. 16
0
    def weight_update_ML(self, da_current_step, geo_measurements,
                         sem_measurements):  #[WIP]

        self.weight_memory.append(np.exp(self.logWeight))

        # Computing marginal distributions for the required variables
        path_length = len(self.daRealization)
        #self.optimize(graph_to_optimize=self.prop_belief)
        self.optimize()
        self.marginals = gtsam.Marginals(self.prop_belief, self.result)

        # Add initial values if objects are not present in the current initial values (by adding multi-robot measurements)

        # Extracting expectation and covariance from last camera pose
        camera_pose_marginal_covariance = self.marginals.marginalCovariance(
            self.X(path_length))

        camera_pose = self.result.atPose3(self.X(path_length)).translation()
        camera_pose_rotation = self.result.atPose3(
            self.X(path_length)).rotation().rpy()
        #camera_pose_rotation = np.flip(camera_pose_rotation, 0)
        camera_pose = np.concatenate((camera_pose_rotation, camera_pose))

        # Rotate covariance matrix
        camera_pose_marginal_covariance_rotated = self.rotate_cov_6x6(
            camera_pose_marginal_covariance,
            self.result.atPose3(self.X(path_length)).rotation().matrix())

        # Extracting expectation and covariance from object poses + sampling
        object_marginal_covariance = dict()
        object_pose = dict()
        object_pose_rotation = dict()
        object_samples = dict()
        object_marginal_covariance_rotated = dict()
        for selected_object in da_current_step:
            selected_object = int(selected_object)
            #self.result.print()
            #print(selected_object)
            object_marginal_covariance[self.XO(selected_object)] = \
                self.marginals.marginalCovariance(self.XO(selected_object))
            object_pose[self.XO(selected_object)] = \
                self.result.atPose3(self.XO(selected_object)).translation()
            object_pose_rotation[self.XO(selected_object)] = \
                self.result.atPose3(self.XO(selected_object)).rotation().rpy()
            #object_pose_rotation[self.XO(selected_object)] = \
            #    np.flip(object_pose_rotation[self.XO(selected_object)], 0)
            object_pose[self.XO(selected_object)] = np.concatenate(
                (object_pose_rotation[self.XO(selected_object)],
                 object_pose[self.XO(selected_object)]))

            # Rotate covariance matrix
            object_marginal_covariance_rotated[self.XO(
                selected_object)] = self.rotate_cov_6x6(
                    object_marginal_covariance[self.XO(selected_object)],
                    self.result.atPose3(
                        self.XO(selected_object)).rotation().matrix())

        # Initializing the weight update, will use the below loop to sum all possible updates
        weight_update = 0
        # running over all samples

        x_pose = self.vec_to_pose3(camera_pose)
        da_probability = 1
        log_likelihood_geo = 0
        log_likelihood_sem = 0
        # running index for the next loop
        i = 0

        for selected_object in da_current_step:

            selected_object = int(selected_object)
            # compute P(C,beta|H) TODO: Add semantic measurement likelihood
            xo_pose = self.vec_to_pose3(object_pose[self.XO(selected_object)])

            log_likelihood_geo -= self.geoModel.log_pdf_value(
                geo_measurements[i], x_pose, xo_pose)
            log_likelihood_sem -= self.clsModel.log_pdf_value_new(
                sem_measurements[i], self.classRealization[selected_object],
                x_pose, xo_pose)

            # print(self.classRealization[selected_object])
            # print(log_likelihood_sem)

            i += 1

        if self.cls_enable is True:
            weight_update += da_probability * math.exp(0.5 * log_likelihood_geo) \
                             * math.exp(0.5 * log_likelihood_sem)
        else:
            weight_update += da_probability * math.exp(
                0.5 * log_likelihood_geo)

        if weight_update > 10**-322:
            self.logWeight += math.log(weight_update)  #+ 150
        else:
            self.logWeight = -math.inf
Esempio n. 17
0
    def add_measurements(self,
                         geo_measurements,
                         sem_measurements,
                         da_current_step,
                         class_realization_current_step,
                         number_of_samples=10,
                         weight_update_flag=True,
                         new_input_object_prior=None,
                         new_input_object_covariance=None,
                         ML_update=False):

        graph_add = gtsam.NonlinearFactorGraph()
        initial_add = gtsam.Values()

        # Setting up complete class realization
        running_index = 0
        for obj in da_current_step:
            if obj not in self.classRealization:

                obj = int(obj)

                # Create a class realization dict from the realization key
                for obj_index in class_realization_current_step:
                    if obj == obj_index[0]:
                        self.classRealization[obj] = obj_index[1]

                self.logWeight += np.log(
                    self.classProbabilityPrior[self.classRealization[obj] - 1])

                # System is ill-defined without a prior for objects, so we define a prior.
                # Default is very uninformative prior
                new_object_prior = gtsam.Pose3(gtsam.Rot3.Ypr(0, 0, 0),
                                               np.array([0, 0, 0]))
                if new_input_object_prior is None:
                    #new_object_prior = gtsam.Pose3(gtsam.Rot3.Ypr(0, 0, 0), np.array(0, 0, 0))
                    pass
                else:
                    new_object_prior = new_input_object_prior[obj]
                    self.graph.add(
                        gtsam.PriorFactorPose3(self.XO(obj), new_object_prior,
                                               new_object_covariance))
                    self.prop_belief.add(
                        gtsam.PriorFactorPose3(self.XO(obj), new_object_prior,
                                               new_object_covariance))
                    graph_add.add(
                        gtsam.PriorFactorPose3(self.XO(obj), new_object_prior,
                                               new_object_covariance))

                if new_input_object_covariance is None:
                    pass
                    #new_object_covariance_diag = np.array([1000, 1000, 1000, 1000, 1000, 1000])
                    #new_object_covariance = gtsam.noiseModel.Diagonal.Variances(new_object_covariance_diag)
                else:
                    new_object_covariance = new_input_object_covariance[obj]
                    self.graph.add(
                        gtsam.PriorFactorPose3(self.XO(obj), new_object_prior,
                                               new_object_covariance))
                    self.prop_belief.add(
                        gtsam.PriorFactorPose3(self.XO(obj), new_object_prior,
                                               new_object_covariance))
                    graph_add.add(
                        gtsam.PriorFactorPose3(self.XO(obj), new_object_prior,
                                               new_object_covariance))
                # Add to total graphs

                if self.initial.exists(self.XO(obj)) is False:
                    self.initial.insert(self.XO(obj), new_object_prior)
                    if self.prop_initial.exists(self.XO(obj)) is False:
                        self.prop_initial.insert(self.XO(obj),
                                                 new_object_prior)
                    if initial_add.exists(self.XO(obj)) is False:
                        initial_add.insert(self.XO(obj), new_object_prior)

            running_index += 1

        for obj in self.classRealization:
            if self.initial.exists(self.XO(obj)) is False:
                self.initial.insert(self.XO(obj),
                                    gtsam.Pose3(gtsam.Pose2(0.0, 0.0, 0.0)))
                if initial_add.exists(self.XO(obj)) is False:
                    initial_add.insert(self.XO(obj),
                                       gtsam.Pose3(gtsam.Pose2(0.0, 0.0, 0.0)))

        # Geometric measurement model
        self.geoModel.add_measurement(geo_measurements, da_current_step,
                                      self.graph, self.initial,
                                      self.daRealization)
        self.geoModel.add_measurement(geo_measurements, da_current_step,
                                      self.prop_belief, self.initial,
                                      self.daRealization)
        self.geoModel.add_measurement(geo_measurements, da_current_step,
                                      graph_add, self.initial,
                                      self.daRealization)

        self.isam.update(graph_add, initial_add)
        #self.isam_copy.update(graph_add, initial_add)
        graph_add = gtsam.NonlinearFactorGraph()
        initial_add = gtsam.Values()

        # Semantic measurement model
        if self.cls_enable is True:
            self.clsModel.add_measurement(sem_measurements, da_current_step,
                                          self.graph, self.initial,
                                          self.daRealization,
                                          class_realization_current_step)
            #self.clsModel.add_measurement(sem_measurements, da_current_step, graph_add,
            #                              self.initial, self.daRealization, class_realization_current_step)

        # Updating weights
        if weight_update_flag is True:
            if ML_update is False:
                self.weight_update(da_current_step, geo_measurements,
                                   sem_measurements, number_of_samples)
            else:
                self.weight_update_ML(da_current_step, geo_measurements,
                                      sem_measurements)

        # Semantic measurement model isam2
        # if self.cls_enable is True:
        #     self.clsModel.add_measurement(sem_measurements, da_current_step, graph_add,
        #                                   self.initial, self.daRealization, class_realization_current_step)

        self.isam.update(graph_add, initial_add)
        self.optimize()
        self.marginals_after = gtsam.Marginals(self.graph, self.result)

        # Setting resultsFlag to false to require optimization
        self.resultsFlag = False
Esempio n. 18
0
    def run(self):
        graph = gtsam.NonlinearFactorGraph()

        # initialize data structure for pre-integrated IMU measurements
        pim = gtsam.PreintegratedImuMeasurements(self.params, self.actualBias)

        H9 = gtsam.OptionalJacobian9()

        T = 12
        num_poses = T + 1  # assumes 1 factor per second
        initial = gtsam.Values()
        initial.insert(BIAS_KEY, self.actualBias)
        for i in range(num_poses):
            state_i = self.scenario.navState(float(i))
            initial.insert(X(i), state_i.pose())
            initial.insert(V(i), state_i.velocity())

        # simulate the loop
        i = 0  # state index
        actual_state_i = self.scenario.navState(0)
        for k, t in enumerate(np.arange(0, T, self.dt)):
            # get measurements and add them to PIM
            measuredOmega = self.runner.measuredAngularVelocity(t)
            measuredAcc = self.runner.measuredSpecificForce(t)
            pim.integrateMeasurement(measuredAcc, measuredOmega, self.dt)

            # Plot IMU many times
            if k % 10 == 0:
                self.plotImu(t, measuredOmega, measuredAcc)

            # Plot every second
            if k % int(1 / self.dt) == 0:
                self.plotGroundTruthPose(t)

            # create IMU factor every second
            if (k + 1) % int(1 / self.dt) == 0:
                factor = gtsam.ImuFactor(X(i), V(i), X(i + 1), V(i + 1),
                                         BIAS_KEY, pim)
                graph.push_back(factor)
                if True:
                    print(factor)
                    H2 = gtsam.OptionalJacobian96()
                    print(pim.predict(actual_state_i, self.actualBias, H9, H2))
                pim.resetIntegration()
                actual_state_i = self.scenario.navState(t + self.dt)
                i += 1

        # add priors on beginning and end
        self.addPrior(0, graph)
        self.addPrior(num_poses - 1, graph)

        # optimize using Levenberg-Marquardt optimization
        params = gtsam.LevenbergMarquardtParams()
        params.setVerbosityLM("SUMMARY")
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
        result = optimizer.optimize()

        # Calculate and print marginal covariances
        marginals = gtsam.Marginals(graph, result)
        print("Covariance on bias:\n", marginals.marginalCovariance(BIAS_KEY))
        for i in range(num_poses):
            print("Covariance on pose {}:\n{}\n".format(
                i, marginals.marginalCovariance(X(i))))
            print("Covariance on vel {}:\n{}\n".format(
                i, marginals.marginalCovariance(V(i))))

        # Plot resulting poses
        i = 0
        while result.exists(X(i)):
            pose_i = result.atPose3(X(i))
            plotPose3(POSES_FIG, pose_i, 0.1)
            i += 1
        print(result.atConstantBias(BIAS_KEY))

        plt.ioff()
        plt.show()
Esempio n. 19
0
    def run(self, T=12, compute_covariances=False, verbose=True):
        graph = gtsam.NonlinearFactorGraph()

        # initialize data structure for pre-integrated IMU measurements
        pim = gtsam.PreintegratedImuMeasurements(self.params, self.actualBias)

        # T = 12
        num_poses = T  # assumes 1 factor per second
        initial = gtsam.Values()
        initial.insert(BIAS_KEY, self.actualBias)

        # simulate the loop
        i = 0  # state index
        initial_state_i = self.scenario.navState(0)
        initial.insert(X(i), initial_state_i.pose())
        initial.insert(V(i), initial_state_i.velocity())

        # add prior on beginning
        self.addPrior(0, graph)

        gtNavStates = []
        predictedNavStates = []
        integrationTime = []
        for k, t in enumerate(np.arange(0, T, self.dt)):
            # get measurements and add them to PIM
            measuredOmega = self.runner.measuredAngularVelocity(t)
            measuredAcc = self.runner.measuredSpecificForce(t)
            start = time()
            pim.integrateMeasurement(measuredAcc, measuredOmega, self.dt)
            integrationTime.append(time() - start)
            # Plot IMU many times
            if k % 10 == 0 and not DISABLE_VISUAL:
                self.plotImu(t, measuredOmega, measuredAcc)

            if (k + 1) % int(1 / self.dt) == 0:
                # Plot every second
                gtNavState = self.plotGroundTruthPose(t, scale=0.3)
                gtNavStates.append(gtNavState)
                plt.title("Ground Truth + Estimated Trajectory")

                # create IMU factor every second
                factor = gtsam.ImuFactor(X(i), V(i), X(i + 1), V(i + 1),
                                         BIAS_KEY, pim)
                graph.push_back(factor)

                if verbose:
                    print(factor)
                    print(pim.predict(initial_state_i, self.actualBias))

                pim.resetIntegration()

                rotationNoise = gtsam.Rot3.Expmap(np.random.randn(3) * 0.1 * 1)
                translationNoise = gtsam.Point3(*np.random.randn(3) * 1 * 1)
                poseNoise = gtsam.Pose3(rotationNoise, translationNoise)

                actual_state_i = self.scenario.navState(t + self.dt)
                if not DISABLE_VISUAL:
                    print("Actual state at {0}:\n{1}".format(
                        t + self.dt, actual_state_i))

                noisy_state_i = gtsam.NavState(
                    actual_state_i.pose().compose(poseNoise),
                    actual_state_i.velocity() + np.random.randn(3) * 0.1)

                initial.insert(X(i + 1), noisy_state_i.pose())
                initial.insert(V(i + 1), noisy_state_i.velocity())
                i += 1

        # add priors on end
        self.addPrior(num_poses - 1, graph)

        initial.print_("Initial values:")

        # optimize using Levenberg-Marquardt optimization
        params = gtsam.LevenbergMarquardtParams()
        params.setVerbosityLM("SUMMARY")
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
        result = optimizer.optimize()

        result.print_("Optimized values:")

        if compute_covariances:
            # Calculate and print marginal covariances
            marginals = gtsam.Marginals(graph, result)
            print("Covariance on bias:\n",
                  marginals.marginalCovariance(BIAS_KEY))
            for i in range(num_poses):
                print("Covariance on pose {}:\n{}\n".format(
                    i, marginals.marginalCovariance(X(i))))
                print("Covariance on vel {}:\n{}\n".format(
                    i, marginals.marginalCovariance(V(i))))

        # Plot resulting poses
        i = 0
        while result.exists(X(i)):
            pose_i = result.atPose3(X(i))
            predictedNavStates.append(pose_i)
            if not DISABLE_VISUAL:
                plot_pose3(POSES_FIG, pose_i, 1)
            i += 1
        # plt.title("Estimated Trajectory")

        if not DISABLE_VISUAL:
            gtsam.utils.plot.set_axes_equal(POSES_FIG)

        print("Bias Values", result.atConstantBias(BIAS_KEY))

        ATE = []
        # import ipdb; ipdb.set_trace()
        for gt, pred in zip(gtNavStates, predictedNavStates[1:]):
            delta = gt.inverse().compose(pred)
            ATE.append(np.linalg.norm(delta.Logmap(delta))**2)
        print("ATE={}".format(np.sqrt(np.mean(ATE))))

        print("Run time={}".format(np.median(integrationTime)))
        plt.ioff()
        plt.show()
Esempio n. 20
0
def Pose2SLAM_ISAM2_example():
    """Perform 2D SLAM given the ground truth changes in pose as well as
    simple loop closure detection."""
    plt.ion()

    # Declare the 2D translational standard deviations of the prior factor's Gaussian model, in meters.
    prior_xy_sigma = 0.3

    # Declare the 2D rotational standard deviation of the prior factor's Gaussian model, in degrees.
    prior_theta_sigma = 5

    # Declare the 2D translational standard deviations of the odometry factor's Gaussian model, in meters.
    odometry_xy_sigma = 0.2

    # Declare the 2D rotational standard deviation of the odometry factor's Gaussian model, in degrees.
    odometry_theta_sigma = 5

    # Although this example only uses linear measurements and Gaussian noise models, it is important
    # to note that iSAM2 can be utilized to its full potential during nonlinear optimization. This example
    # simply showcases how iSAM2 may be applied to a Pose2 SLAM problem.
    PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([prior_xy_sigma,
                                                            prior_xy_sigma,
                                                            prior_theta_sigma*np.pi/180]))
    ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([odometry_xy_sigma,
                                                                odometry_xy_sigma,
                                                                odometry_theta_sigma*np.pi/180]))

    # Create a Nonlinear factor graph as well as the data structure to hold state estimates.
    graph = gtsam.NonlinearFactorGraph()
    initial_estimate = gtsam.Values()

    # Create iSAM2 parameters which can adjust the threshold necessary to force relinearization and how many
    # update calls are required to perform the relinearization.
    parameters = gtsam.ISAM2Params()
    parameters.setRelinearizeThreshold(0.1)
    parameters.relinearizeSkip = 1
    isam = gtsam.ISAM2(parameters)

    # Create the ground truth odometry measurements of the robot during the trajectory.
    true_odometry = [(2, 0, 0),
                    (2, 0, math.pi/2),
                    (2, 0, math.pi/2),
                    (2, 0, math.pi/2),
                    (2, 0, math.pi/2)]

    # Corrupt the odometry measurements with gaussian noise to create noisy odometry measurements.
    odometry_measurements = [np.random.multivariate_normal(true_odom, ODOMETRY_NOISE.covariance())
                                for true_odom in true_odometry]

    # Add the prior factor to the factor graph, and poorly initialize the prior pose to demonstrate
    # iSAM2 incremental optimization.
    graph.push_back(gtsam.PriorFactorPose2(1, gtsam.Pose2(0, 0, 0), PRIOR_NOISE))
    initial_estimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))

    # Initialize the current estimate which is used during the incremental inference loop.
    current_estimate = initial_estimate

    for i in range(len(true_odometry)):

        # Obtain the noisy odometry that is received by the robot and corrupted by gaussian noise.
        noisy_odom_x, noisy_odom_y, noisy_odom_theta = odometry_measurements[i]

        # Determine if there is loop closure based on the odometry measurement and the previous estimate of the state.
        loop = determine_loop_closure(odometry_measurements[i], current_estimate, i, xy_tol=0.8, theta_tol=25)

        # Add a binary factor in between two existing states if loop closure is detected.
        # Otherwise, add a binary factor between a newly observed state and the previous state.
        if loop:
            graph.push_back(gtsam.BetweenFactorPose2(i + 1, loop, 
                gtsam.Pose2(noisy_odom_x, noisy_odom_y, noisy_odom_theta), ODOMETRY_NOISE))
        else:
            graph.push_back(gtsam.BetweenFactorPose2(i + 1, i + 2, 
                gtsam.Pose2(noisy_odom_x, noisy_odom_y, noisy_odom_theta), ODOMETRY_NOISE))

            # Compute and insert the initialization estimate for the current pose using the noisy odometry measurement.
            computed_estimate = current_estimate.atPose2(i + 1).compose(gtsam.Pose2(noisy_odom_x,
                                                                                    noisy_odom_y,
                                                                                    noisy_odom_theta))
            initial_estimate.insert(i + 2, computed_estimate)

        # Perform incremental update to iSAM2's internal Bayes tree, optimizing only the affected variables.
        isam.update(graph, initial_estimate)
        current_estimate = isam.calculateEstimate()

        # Report all current state estimates from the iSAM2 optimzation.
        report_on_progress(graph, current_estimate, i)
        initial_estimate.clear()

    # Print the final covariance matrix for each pose after completing inference on the trajectory.
    marginals = gtsam.Marginals(graph, current_estimate)
    i = 1
    for i in range(1, len(true_odometry)+1):
        print(f"X{i} covariance:\n{marginals.marginalCovariance(i)}\n")
    
    plt.ioff()
    plt.show()
Esempio n. 21
0
def run(slam_request=DEFAULT_REQUEST):
    graph = gtsam.NonlinearFactorGraph()

    # Create the keys corresponding to unknown variables in the factor graph
    unknowns = slam_request.symbols

    # Add a prior on pose X1 at the origin. A prior factor consists of a mean
    # and a noise model
    for prior in slam_request.priors:
        graph.add(prior)

    # Add odometry factors between X1,X2 and X2,X3, respectively
    for factor in slam_request.between_pose_factors:
        graph.add(factor)

    for factor in slam_request.bearing_range_factors:
        graph.add(factor)

    # Print graph
    print("Factor Graph:\n{}".format(graph))

    # Create (deliberately inaccurate) initial estimate
    initial_estimate = gtsam.Values()
    for variable, estimate in slam_request.initial_estimates.iteritems():
        initial_estimate.insert(variable, estimate)

    # Print
    print("Initial Estimate:\n{}".format(initial_estimate))

    # Optimize using Levenberg-Marquardt optimization. The optimizer
    # accepts an optional set of configuration parameters, controlling
    # things like convergence criteria, the type of linear system solver
    # to use, and the amount of information displayed during optimization.
    # Here we will use the default set of parameters.  See the
    # documentation for the full set of parameters.
    params = gtsam.LevenbergMarquardtParams()
    optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate,
                                                  params)
    result = optimizer.optimize()
    print("\nFinal Result:\n{}".format(result))

    output_estimations = {}
    reverse_lookup = dict(
        (value, key) for (key, value) in unknowns.iteritems())

    for variable, initial_estimate in slam_request.initial_estimates.iteritems(
    ):
        if isinstance(initial_estimate, gtsam.Pose2):
            pose = result.atPose2(variable)
            estimation = [pose.x(), pose.y(), pose.theta()]

        elif isinstance(initial_estimate, gtsam.Point2):
            point = result.atPoint2(variable)
            estimation = [point.x(), point.y()]
        else:
            raise Exception("Unsupported type: " + type(variable))

        output_estimations[reverse_lookup[variable]] = estimation

    # Calculate and print marginal covariances for all variables
    marginals = gtsam.Marginals(graph, result)
    covariance_dict = {}

    for (string, variable) in unknowns.iteritems():
        covariance = marginals.marginalCovariance(variable)
        print("{} covariance:\n{}\n".format(string, covariance))
        covariance_dict[string] = covariance

    return PlanarSlamOutput(
        result=output_estimations,
        covariance=covariance_dict,
    )
Esempio n. 22
0
def icp(clouda, cloudb, initial_transform=gtsam.Pose3(), max_iterations=25):
    """Runs ICP on two clouds by calling
    all five steps implemented above.
    Iterates until close enough or max
    iterations.

    Returns a series of intermediate clouds
    for visualization purposes.

    Args:
        clouda (ndarray):                point cloud A
        cloudb (ndarray):                point cloud B
        initial_transform (gtsam.Pose3): the initial estimate of transform between clouda and cloudb (step 1 of icp)
        max_iterations (int):            maximum iters of ICP to run before breaking

    Ret:
        bTa (gtsam.Pose3): the final transform
        icp_series (list): visualized icp for debugging
    """
    
    
    icp_series = []
    bTa = initial_transform
    i = 0
    temp = True
    while  i < max_iterations and temp:

      newClTr = transform_cloud(bTa,clouda)
      newCloudb = assign_closest_pairs(newClTr, cloudb)
      transform = estimate_transform(newClTr,newCloudb)

      if transform.equals(gtsam.Pose3.identity(),tol=1e-2:
        temp = False
      else:
        bTa = gtsam.Pose3(np.matmul(bTa.matrix(),transform.matrix()))
        i += 1
        icp_series.append([newClTr, cloudb])
    
    return bTa, icp_series

"""The animation shows how clouda has moved after each iteration of ICP. You should see stationary landmarks, like walls and parked cars, converge onto each other."""

aTb, icp_series = icp(clouda, cloudb)
visualize_clouds_animation(icp_series, speed=400, show_grid_lines=True)

"""ICP is a computationally intense algorithm and we plan to run it between each cloud pair in our 180 clouds dataset. Use the python profiler to identify the computationally expensive subroutines in your algorithm and use numpy to reduce your runtime. The TAs get ~6.5 seconds."""

import cProfile
cProfile.run('icp(clouda, cloudb)')

"""These unit tests will verify the basic functionality of the functions you've implemented in this section. Keep in mind that these are not exhaustive."""

import unittest

class TestICP(unittest.TestCase):

    def setUp(self):
        self.testclouda = np.array([[1], [1], [1]])
        self.testcloudb = np.array([[2, 10], [1, 1], [1, 1]])
        self.testcloudc = np.array([[2], [1], [1]])
        self.testbTa = gtsam.Pose3(gtsam.Rot3(), gtsam.Point3(1, 0, 0))
        self.testcloudd = np.array([[0, 20, 10], [0, 10, 20], [0, 0, 0]])
        self.testcloude = np.array([[10, 30, 20], [10, 20, 30], [0, 0, 0]])

    def test_assign_closest_pairs1(self):
        expected = (3, 1)
        actual = assign_closest_pairs(self.testclouda, self.testcloudb).shape
        self.assertEqual(expected, actual)

    def test_assign_closest_pairs2(self):
        expected = 2
        actual = assign_closest_pairs(self.testclouda, self.testcloudb)[0][0]
        self.assertEqual(expected, actual)

    def test_estimate_transform1(self):
        expected = 1
        actual = estimate_transform(self.testclouda, self.testcloudc).x()
        self.assertEqual(expected, actual)

    def test_estimate_transform2(self):
        expected = 10
        actual = estimate_transform(self.testcloudd, self.testcloude).x()
        self.assertAlmostEqual(expected, actual, places=7)
        actua2 = estimate_transform(self.testcloudd, self.testcloude).y()
        self.assertAlmostEqual(expected, actua2, places=7)

    def test_transform_cloud1(self):
        expected = 2
        actual = transform_cloud(self.testbTa, self.testclouda)[0][0]
        self.assertEqual(expected, actual)

    def test_icp1(self):
        ret = icp(self.testclouda, self.testcloudb)
        expected1 = type(gtsam.Pose3())
        actual1 = type(ret[0])
        self.assertEqual(expected1, actual1)
        expected2 = type([])
        actual2 = type(ret[1])
        self.assertEqual(expected2, actual2)
        expected3 = type([])
        actual3 = type(ret[1][0])
        self.assertEqual(expected3, actual3)

    def test_icp2(self):
        expected = 1
        actual = icp(self.testclouda, self.testcloudb)[0].x()
        self.assertEqual(expected, actual)

    def test_icp3(self):
        expected = 1
        actual = icp(self.testclouda, self.testcloudc)[0].x()
        self.assertEqual(expected, actual)

if __name__ == "__main__":
    unittest.main(argv=['first-arg-is-ignored'], exit=False)

"""# Factor Graph

In this section, we'll build a factor graph to estimate the pose of our vechicle using the transforms our ICP algorithm gives us between frames. These ICP transforms are the factors that tie the pose variables together.

We will be using GTSAM to construct the factor graph as well as perform a optimization for the pose of the car as it travels down the street. Let's start with a simple example first. Recall from PoseSLAM describe in the LIDAR slides how we could add a factor (aka constraint) between two state variables. When we revisited a state, we could add a loop closure. Since the car in our dataset never revisits a previous pose, there is not loop closure. Here is that example from the slides copied here. Note how the graph is initialized and how factors are added.
"""

# # Factor graph example 

# Helper function to create a pose
def vector3(x, y, z):
    """Create 3d double numpy array."""
    return np.array([x, y, z], dtype=np.float)

# Create noise model
priorNoise = gtsam.noiseModel_Diagonal.Sigmas(vector3(0.3, 0.3, 0.1))
model = gtsam.noiseModel_Diagonal.Sigmas(vector3(0.2, 0.2, 0.1))

# Instantiate the factor graph
example_graph = gtsam.NonlinearFactorGraph()

# Adding a prior on the first pose
example_graph.add(gtsam.PriorFactorPose2(1, gtsam.Pose2(0, 0, 0), priorNoise))

# Create odometry (Between) factors between consecutive poses
example_graph.add(gtsam.BetweenFactorPose2( 1, 2, gtsam.Pose2(2, 0, 0), model)) 
example_graph.add(gtsam.BetweenFactorPose2(2, 3, gtsam.Pose2(2, 0, math.pi / 2), model)) 
example_graph.add(gtsam.BetweenFactorPose2(3, 4, gtsam.Pose2(2, 0, math.pi / 2), model)) 
example_graph.add(gtsam.BetweenFactorPose2(4, 5, gtsam.Pose2(2, 0, math.pi / 2), model)) 

# Add the loop closure constraint
example_graph.add(gtsam.BetweenFactorPose2(5, 2, gtsam.Pose2(2, 0, math.pi / 2), model)) 

# Create the initial estimate
example_initial_estimate = gtsam.Values()
example_initial_estimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
example_initial_estimate.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
example_initial_estimate.insert(3, gtsam.Pose2(4.1, 0.1, math.pi / 2))
example_initial_estimate.insert(4, gtsam.Pose2(4.0, 2.0, math.pi))
example_initial_estimate.insert(5, gtsam.Pose2(2.1, 2.1, -math.pi / 2))

# 4. Optimize the initial values using a Gauss-Newton nonlinear optimizer
ex_parameters = gtsam.GaussNewtonParams()
ex_parameters.setRelativeErrorTol(1e-5)
ex_parameters.setMaxIterations(100)
ex_optimizer = gtsam.GaussNewtonOptimizer(example_graph, example_initial_estimate, ex_parameters)
ex_result = ex_optimizer.optimize()
print("Final Result:\n{}".format(ex_result))

# Plot your graph
marginals = gtsam.Marginals(example_graph, ex_result)
fig = plt.figure(0)
for i in range(1, 6):
    gtsam_plot.plot_pose2(0, ex_result.atPose2(i), 0.5, marginals.marginalCovariance(i))

plt.axis('equal')
plt.show()

"""**TODO** [25 points]

You will be using your ICP implementation here to find the transform between two subsequent clouds. These transforms become the factors between pose variables in the graph. So, you will need to go through all the point clouds and run icp pair-wise to find the relative movement of the car. With these transformation, create a factor representing the transform between the pose variables.

We talked about how loop closure helps us consolidate conflicting data into a better global estimate. Unfortunately, our car does not perform a loop closure. So, our graph would just be a long series of poses connected by icp-returned transforms. However, our lidar scans are noisy, which means that our icp-returned transforms are not perfect either. This ultimately results in incorrect vehicle poses and overall map. One way that we can augment our graph is through "skipping". We simply run ICP between every other cloud, and add these skip connections into the graph. You can basically perform ICP between two non-consecutive point clouds and add that transform as a factor in the factor graph.
"""

def populate_factor_graph(graph, initial_estimates, initial_pose, clouds):
    """Populates a gtsam.NonlinearFactorGraph with
    factors between state variables. Populates
    initial_estimates for state variables as well.

    Args:
        graph (gtsam.NonlinearFactorGraph): the factor graph populated with ICP constraints
        initial_estimates (gtsam.Values):   the populated estimates for vehicle poses
        initial_pose (gtsam.Pose3):         the starting pose for the estimates in world coordinates
        clouds (np.ndarray):                the numpy array with all our point clouds
    """
    ICP_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4]))
    factor_pose = initial_pose

    # Add ICP Factors between each pair of clouds
    prev_T = gtsam.Pose3()
    for i in range(len(clouds) - 1):
        # TODO: Run ICP between clouds (hint: use inital_tranform argument)
        bta, icp_series = icp(clouds[i], clouds[i+1], initial_transform=prev_T)
        #T = initial_transform(initial_pose, clouds)

        # TODO: Set T to its inverse: use `gtsam.Pose3.inverse()`
        T = bta.inverse()
        # TODO: Add a `gtsam.BetweenFactorPose3()` to the graph
        graph.add(gtsam.BetweenFactorPose3(i, i+1, T, ICP_NOISE))



        factor_pose = factor_pose.compose(T)
        initial_estimates.insert(i+1, factor_pose)
        print(".", end="")

    # Add skip connections between every other frame
    prev_T = gtsam.Pose3()
    for i in range(0, len(clouds) - 2, 2):
        # TODO: Run ICP between clouds (hint: use inital_tranform argument)
        bta, icp_series = icp(clouds[i], clouds[i+2],initial_transform=prev_T)
        # TODO: Set T to its inverse: use `gtsam.Pose3.inverse()`
        T = bta.inverse()
        # TODO: Add a `gtsam.BetweenFactorPose3()` to the graph
        graph.add(gtsam.BetweenFactorPose3(i, i+2, T, ICP_NOISE))

        print(".", end="")

    return graph, initial_estimates

"""The real power of GTSAM will show here. In five lines, we'll setup a Gauss Newton nonlinear optimizer and optimize for the vehicle's poses in world coordinates.

Note: This cell runs your ICP implementation 180 times. If you've implemented your ICP similarly to the TAs, expect this cell to take 2 minutes. If you're missing the `initial_transform` argument for icp, it may take ~1 hour.
"""

# load in all clouds in our dataset
clouds = read_ply(*scans_fnames)

# Setting up our factor graph
graph = gtsam.NonlinearFactorGraph()
initial_estimates = gtsam.Values()

# We get the initial pose of the car from Argo AI's dataset, and we add it to the graph as such
PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4]))
initial_pose = gtsam.Pose3(gtsam.Rot3(0.9982740, -0.0572837,  0.0129474, 0.0575611,  0.9980955, -0.0221840, -0.0116519,  0.0228910,  0.9996701),
                           gtsam.Point3(-263.9464864482589, 2467.3015467381383, -19.374652610889633))
graph.add(gtsam.PriorFactorPose3(0, initial_pose, PRIOR_NOISE))
initial_estimates.insert(0, initial_pose)

# We'll use your function to populate the factor graph
graph, initial_estimates = populate_factor_graph(graph, initial_estimates, initial_pose, clouds)

# Now optimize for the states
parameters = gtsam.GaussNewtonParams()
parameters.setRelativeErrorTol(1e-5)
parameters.setMaxIterations(100)
optimizer = gtsam.GaussNewtonOptimizer(graph, initial_estimates, parameters)
result = optimizer.optimize()

"""Let's plot these poses to see how our vechicle moves.

Screenshot this for your reflection.
"""

poses_cloud = np.array([[], [], []])
for i in range(len(clouds)):
    poses_cloud = np.hstack([poses_cloud, np.array([[result.atPose3(i).x()], [result.atPose3(i).y()], [result.atPose3(i).z()]])])

init_car_pose = gtsam.Pose3(gtsam.Rot3(0.9982740, -0.0572837,  0.0129474, 0.0575611,  0.9980955, -0.0221840, -0.0116519,  0.0228910,  0.9996701),
                            gtsam.Point3(-263.9464864482589, 2467.3015467381383, -19.374652610889633))
visualize_clouds([poses_cloud, transform_cloud(init_car_pose, clouds[0])], show_grid_lines=True)

"""These unit tests will verify the basic functionality of the function you've implemented in this section. Keep in mind that these are not exhaustive."""

import unittest

class TestFactorGraph(unittest.TestCase):

    def setUp(cls):
      test_clouds = read_ply(*scans_fnames)[:6]
      PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4]))
      ICP_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4]))
      test_graph = gtsam.NonlinearFactorGraph()
      test_initial_estimates = gtsam.Values()
      initial_pose = gtsam.Pose3(gtsam.Rot3(0.9982740, -0.0572837,  0.0129474, 0.0575611,  0.9980955, -0.0221840, -0.0116519,  0.0228910,  0.9996701),
                          gtsam.Point3(-263.9464864482589, 2467.3015467381383, -19.374652610889633))
      test_graph.add(gtsam.PriorFactorPose3(0, initial_pose, PRIOR_NOISE))
      test_initial_estimates.insert(0, initial_pose)
      test_graph, test_initial_estimates = populate_factor_graph(test_graph, test_initial_estimates, initial_pose, test_clouds)
      cls.graph = test_graph
      cls.initial_estimates = test_initial_estimates
    
    def test_graph_params(self):
      self.assertTrue(type(self.graph) == gtsam.NonlinearFactorGraph)
    
    def test_initial_estimates_params(self):
      self.assertTrue(type(self.initial_estimates) == gtsam.Values)

def suite():
  functions = ['test_graph_params', 'test_initial_estimates_params']
  suite = unittest.TestSuite()
  for func in functions:
    suite.addTest(TestFactorGraph(func))
  return suite
    
if __name__ == "__main__":
    runner = unittest.TextTestRunner()
    runner.run(suite())

"""# Mapping

In this section, we'll tackle the mapping component of SLAM (Simulataneous Localization and Mapping). The previous section used a factor graph to localize our vehicle's poses in world coordinates. We'll now use those poses to form a map of the street from the point clouds.

Given the poses and the clouds, this task is easy. We'll use your `transform_cloud` method from the ICP section to transform every other cloud in our dataset to be centered at the corresponding pose where the cloud was captured. Visualizing all of these clouds yields the complete map. We don't use every cloud in our dataset to reduce the amount of noise in our map while retaining plenty of detail.

Screenshot this for your reflection.
"""

cloud_map = []
for i in range(0, len(clouds), 2):
    cloud_map.append(transform_cloud(result.atPose3(i), clouds[i-1]))

visualize_clouds(cloud_map, show_grid_lines=True, single_color="#C6C6C6")

"""# Reflection
Esempio n. 23
0
def main():
    """Main runner"""

    # Create an empty nonlinear factor graph
    graph = gtsam.NonlinearFactorGraph()

    # Create the keys corresponding to unknown variables in the factor graph
    X1 = X(1)
    X2 = X(2)
    X3 = X(3)
    L1 = L(4)
    L2 = L(5)

    # Add a prior on pose X1 at the origin. A prior factor consists of a mean and a noise model
    graph.add(
        gtsam.PriorFactorPose2(X1, gtsam.Pose2(0.0, 0.0, 0.0), PRIOR_NOISE))

    # Add odometry factors between X1,X2 and X2,X3, respectively
    graph.add(
        gtsam.BetweenFactorPose2(X1, X2, gtsam.Pose2(2.0, 0.0, 0.0),
                                 ODOMETRY_NOISE))
    graph.add(
        gtsam.BetweenFactorPose2(X2, X3, gtsam.Pose2(2.0, 0.0, 0.0),
                                 ODOMETRY_NOISE))

    # Add Range-Bearing measurements to two different landmarks L1 and L2
    graph.add(
        gtsam.BearingRangeFactor2D(X1, L1, gtsam.Rot2.fromDegrees(45),
                                   np.sqrt(4.0 + 4.0), MEASUREMENT_NOISE))
    graph.add(
        gtsam.BearingRangeFactor2D(X2, L1, gtsam.Rot2.fromDegrees(90), 2.0,
                                   MEASUREMENT_NOISE))
    graph.add(
        gtsam.BearingRangeFactor2D(X3, L2, gtsam.Rot2.fromDegrees(90), 2.0,
                                   MEASUREMENT_NOISE))

    # Print graph
    print("Factor Graph:\n{}".format(graph))

    # Create (deliberately inaccurate) initial estimate
    initial_estimate = gtsam.Values()
    initial_estimate.insert(X1, gtsam.Pose2(-0.25, 0.20, 0.15))
    initial_estimate.insert(X2, gtsam.Pose2(2.30, 0.10, -0.20))
    initial_estimate.insert(X3, gtsam.Pose2(4.10, 0.10, 0.10))
    initial_estimate.insert(L1, gtsam.Point2(1.80, 2.10))
    initial_estimate.insert(L2, gtsam.Point2(4.10, 1.80))

    # Print
    print("Initial Estimate:\n{}".format(initial_estimate))

    # Optimize using Levenberg-Marquardt optimization. The optimizer
    # accepts an optional set of configuration parameters, controlling
    # things like convergence criteria, the type of linear system solver
    # to use, and the amount of information displayed during optimization.
    # Here we will use the default set of parameters.  See the
    # documentation for the full set of parameters.
    params = gtsam.LevenbergMarquardtParams()
    optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate,
                                                  params)
    result = optimizer.optimize()
    print("\nFinal Result:\n{}".format(result))

    # Calculate and print marginal covariances for all variables
    marginals = gtsam.Marginals(graph, result)
    for (key, s) in [(X1, "X1"), (X2, "X2"), (X3, "X3"), (L1, "L1"),
                     (L2, "L2")]:
        print("{} covariance:\n{}\n".format(s,
                                            marginals.marginalCovariance(key)))
def batch_factorgraph_example():
    # Create an empty nonlinear factor graph.
    graph = gtsam.NonlinearFactorGraph()

    # Create the keys for the poses.
    X1 = X(1)
    X2 = X(2)
    X3 = X(3)
    pose_variables = [X1, X2, X3]

    # Create keys for the landmarks.
    L1 = L(1)
    L2 = L(2)
    landmark_variables = [L1, L2]

    # Add a prior on pose X1 at the origin.
    prior_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.1, 0.1]))
    graph.add(
        gtsam.PriorFactorPose2(X1, gtsam.Pose2(0.0, 0.0, 0.0), prior_noise))

    # Add odometry factors between X1,X2 and X2,X3, respectively
    odometry_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.1,
                                                                0.1]))
    graph.add(
        gtsam.BetweenFactorPose2(X1, X2, gtsam.Pose2(2.0, 0.0, 0.0),
                                 odometry_noise))
    graph.add(
        gtsam.BetweenFactorPose2(X2, X3, gtsam.Pose2(2.0, 0.0, 0.0),
                                 odometry_noise))

    # Add Range-Bearing measurements to two different landmarks L1 and L2
    measurement_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.05, 0.1]))
    graph.add(
        gtsam.BearingRangeFactor2D(X1, L1, gtsam.Rot2.fromDegrees(45),
                                   np.sqrt(4.0 + 4.0), measurement_noise))
    graph.add(
        gtsam.BearingRangeFactor2D(X2, L1, gtsam.Rot2.fromDegrees(90), 2.0,
                                   measurement_noise))
    graph.add(
        gtsam.BearingRangeFactor2D(X3, L2, gtsam.Rot2.fromDegrees(90), 2.0,
                                   measurement_noise))

    # Create (deliberately inaccurate) initial estimate
    initial_estimate = gtsam.Values()
    initial_estimate.insert(X1, gtsam.Pose2(-0.25, 0.20, 0.15))
    initial_estimate.insert(X2, gtsam.Pose2(2.30, 0.10, -0.20))
    initial_estimate.insert(X3, gtsam.Pose2(4.10, 0.10, 0.10))
    initial_estimate.insert(L1, gtsam.Point2(1.80, 2.10))
    initial_estimate.insert(L2, gtsam.Point2(4.10, 1.80))

    # Create an optimizer.
    params = gtsam.LevenbergMarquardtParams()
    optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate,
                                                  params)

    # Solve the MAP problem.
    result = optimizer.optimize()

    # Calculate marginal covariances for all variables.
    marginals = gtsam.Marginals(graph, result)

    # Extract marginals
    pose_marginals = []
    for var in pose_variables:
        pose_marginals.append(
            MultivariateNormalParameters(result.atPose2(var),
                                         marginals.marginalCovariance(var)))

    landmark_marginals = []
    for var in landmark_variables:
        landmark_marginals.append(
            MultivariateNormalParameters(result.atPoint2(var),
                                         marginals.marginalCovariance(var)))

    # You can extract the joint marginals like this.
    joint_all = marginals.jointMarginalCovariance(
        gtsam.KeyVector(pose_variables + landmark_variables))
    print("Joint covariance over all variables:")
    print(joint_all.fullMatrix())

    # Plot the marginals.
    plot_result(pose_marginals, landmark_marginals)
Esempio n. 25
0
    def bundle_adjustment(self):
        MIN_LANDMARK_SEEN = 3

        depth = 15

        # Initialize factor Graph
        graph = gtsam.NonlinearFactorGraph()
        initialEstimate = gtsam.Values()

        # Add factors for all measurements
        measurementNoiseSigma = 1.0
        measurementNoise = gtsam.noiseModel_Isotropic.Sigma(
            2, measurementNoiseSigma)

        landmark_id_list = {}
        img_pose_id_list = {}
        for i in range(self.row*self.col*self.angles):
            ir,i_mod = divmod(i,self.angles*self.col) 
            ic, ia = divmod(i_mod, self.angles)
            img_pose = self.img_pose[ir][ic][ia]
            for k in range(len(img_pose.kp)):
                if img_pose.kp_3d_exist(k):
                    landmark_id = img_pose.kp_3d(k)
                    if(self.landmark[landmark_id].seen >= MIN_LANDMARK_SEEN):
                        key_point = Point2(img_pose.kp[k][0],img_pose.kp[k][1])
                        if landmark_id_list.get(landmark_id) == None:
                            pose = self.pose_initial[ir][ic][ia]
                            landmark_point = self.back_projection(key_point,pose,depth)
                            landmark_id_list[landmark_id] = landmark_point
                            # print("careful",ir,ic,ia,pose,landmark_point)
                        if img_pose_id_list.get(i) == None:
                            img_pose_id_list[i] = True           
                        graph.add(gtsam.GenericProjectionFactorCal3_S2(
                        key_point, measurementNoise,
                        X(i), P(landmark_id), self.cal))

        s = np.radians(60)
        poseNoiseSigmas = np.array([s, s, s, 10, 10, 10])
        # poseNoiseSigmas = np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
        posePriorNoise = gtsam.noiseModel_Diagonal.Sigmas(poseNoiseSigmas)
        print(img_pose_id_list)
        for i,idx in enumerate(img_pose_id_list): 
            ir,i_mod = divmod(idx,self.angles*self.col) 
            ic, ia = divmod(i_mod, self.angles)
            # Initialize estimate for poses
            pose_i = self.pose_initial[ir][ic][ia]
            initialEstimate.insert(X(idx), pose_i)
            # Create priors for poses
            # counter = 0
            # if(counter == 0 and ia == 4):
            #     temp_a = ia
            #     temp_r = ir
            #     temp_c = ic
            #     graph.add(gtsam.PriorFactorPose3(X(idx), pose_i, posePriorNoise))
            #     counter+=1 
            # if(counter == 1 and temp_a == ia and (temp_r-ir)**2 + (temp_c-ic)**2>=4):
            #     graph.add(gtsam.PriorFactorPose3(X(idx), pose_i, posePriorNoise)) 
            #     counter +=1
            # if i <2:
            #     graph.add(gtsam.PriorFactorPose3(X(idx), pose_i, posePriorNoise)) 
            graph.add(gtsam.PriorFactorPose3(X(idx), pose_i, posePriorNoise)) 

        # Initialize estimate for landmarks
        print(landmark_id_list)
        for idx in landmark_id_list:
            point_j = landmark_id_list.get(idx)
            initialEstimate.insert(P(idx), point_j)
            self.landmark_descriptor.append(self.landmark[idx].desc)

        # Optimization
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate)
        sfm_result = optimizer.optimize()
        # Marginalization
        marginals = gtsam.Marginals(graph, sfm_result)
        for idx in img_pose_id_list: 
            marginals.marginalCovariance(X(idx))
        for idx in landmark_id_list: 
            marginals.marginalCovariance(P(idx))

        return sfm_result,img_pose_id_list,landmark_id_list
Esempio n. 26
0
 def optimize(self):
     optimizer = gt.LevenbergMarquardtOptimizer(self.graph,
                                                self.initialEstimate)
     result = optimizer.optimize()
     marginals = gt.Marginals(self.graph, result)
     return result, marginals
Esempio n. 27
0
graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, odometryNoise))
graph.add(gtsam.BetweenFactorPose2(2, 3, odometry, odometryNoise))

# Create (deliberately inaccurate) initial estimate
initial = gtsam.Values()
initial.insert(1, gtsam.Pose2(0.5, 0.0,  0.2))
initial.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
initial.insert(3, gtsam.Pose2(4.1, 0.1,  0.1))

# Optimize using Levenberg-Marquardt optimization
result = gtsam.DoglegOptimizer(graph, initial).optimize()

# Print results
np.set_printoptions(precision=4, suppress=True)
print(result)
marginals = gtsam.Marginals(graph, result)
print('x1 covariance: '), print(marginals.marginalCovariance(1))
print('x2 covariance: '), print(marginals.marginalCovariance(2))
print('x3 covariance: '), print(marginals.marginalCovariance(3))

# Plot results
plt.figure(0)
plt.xlabel('x')
plt.ylabel('y')
plt.title('Odometry Estimates')
plt.grid(True)
plt.axis('equal')
for i in range(1,4):
    gtsam_plot.plot_pose2(0, result.atPose2(i), 0.5, marginals.marginalCovariance(i))
plt.show()
Esempio n. 28
0
    def run(self,
            T: int = 12,
            compute_covariances: bool = False,
            verbose: bool = True):
        """
        Main runner.

        Args:
            T: Total trajectory time.
            compute_covariances: Flag indicating whether to compute marginal covariances.
            verbose: Flag indicating if printing should be verbose.
        """
        graph = gtsam.NonlinearFactorGraph()

        # initialize data structure for pre-integrated IMU measurements
        pim = gtsam.PreintegratedImuMeasurements(self.params, self.actualBias)

        num_poses = T  # assumes 1 factor per second
        initial = gtsam.Values()
        initial.insert(BIAS_KEY, self.actualBias)

        # simulate the loop
        i = 0  # state index
        initial_state_i = self.scenario.navState(0)
        initial.insert(X(i), initial_state_i.pose())
        initial.insert(V(i), initial_state_i.velocity())

        # add prior on beginning
        self.addPrior(0, graph)

        for k, t in enumerate(np.arange(0, T, self.dt)):
            # get measurements and add them to PIM
            measuredOmega = self.runner.measuredAngularVelocity(t)
            measuredAcc = self.runner.measuredSpecificForce(t)
            pim.integrateMeasurement(measuredAcc, measuredOmega, self.dt)

            # Plot IMU many times
            if k % 10 == 0:
                self.plotImu(t, measuredOmega, measuredAcc)

            if (k + 1) % int(1 / self.dt) == 0:
                # Plot every second
                self.plotGroundTruthPose(t, scale=1)
                plt.title("Ground Truth Trajectory")

                # create IMU factor every second
                factor = gtsam.ImuFactor(X(i), V(i), X(i + 1), V(i + 1),
                                         BIAS_KEY, pim)
                graph.push_back(factor)

                if verbose:
                    print(factor)
                    print(pim.predict(initial_state_i, self.actualBias))

                pim.resetIntegration()

                rotationNoise = gtsam.Rot3.Expmap(np.random.randn(3) * 0.1)
                translationNoise = gtsam.Point3(*np.random.randn(3) * 1)
                poseNoise = gtsam.Pose3(rotationNoise, translationNoise)

                actual_state_i = self.scenario.navState(t + self.dt)
                print("Actual state at {0}:\n{1}".format(
                    t + self.dt, actual_state_i))

                noisy_state_i = gtsam.NavState(
                    actual_state_i.pose().compose(poseNoise),
                    actual_state_i.velocity() + np.random.randn(3) * 0.1)

                initial.insert(X(i + 1), noisy_state_i.pose())
                initial.insert(V(i + 1), noisy_state_i.velocity())
                i += 1

        # add priors on end
        self.addPrior(num_poses - 1, graph)

        initial.print("Initial values:")

        result = self.optimize(graph, initial)

        result.print("Optimized values:")
        print("------------------")
        print(graph.error(initial))
        print(graph.error(result))
        print("------------------")

        if compute_covariances:
            # Calculate and print marginal covariances
            marginals = gtsam.Marginals(graph, result)
            print("Covariance on bias:\n",
                  marginals.marginalCovariance(BIAS_KEY))
            for i in range(num_poses):
                print("Covariance on pose {}:\n{}\n".format(
                    i, marginals.marginalCovariance(X(i))))
                print("Covariance on vel {}:\n{}\n".format(
                    i, marginals.marginalCovariance(V(i))))

        self.plot(result, show=True)
Esempio n. 29
0
def main():
    """Main runner."""
    # Create noise models
    PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(gtsam.Point3(0.3, 0.3, 0.1))
    ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(
        gtsam.Point3(0.2, 0.2, 0.1))

    # 1. Create a factor graph container and add factors to it
    graph = gtsam.NonlinearFactorGraph()

    # 2a. Add a prior on the first pose, setting it to the origin
    # A prior factor consists of a mean and a noise ODOMETRY_NOISE (covariance matrix)
    graph.add(gtsam.PriorFactorPose2(1, gtsam.Pose2(0, 0, 0), PRIOR_NOISE))

    # 2b. Add odometry factors
    # Create odometry (Between) factors between consecutive poses
    graph.add(
        gtsam.BetweenFactorPose2(1, 2, gtsam.Pose2(2, 0, 0), ODOMETRY_NOISE))
    graph.add(
        gtsam.BetweenFactorPose2(2, 3, gtsam.Pose2(2, 0, math.pi / 2),
                                 ODOMETRY_NOISE))
    graph.add(
        gtsam.BetweenFactorPose2(3, 4, gtsam.Pose2(2, 0, math.pi / 2),
                                 ODOMETRY_NOISE))
    graph.add(
        gtsam.BetweenFactorPose2(4, 5, gtsam.Pose2(2, 0, math.pi / 2),
                                 ODOMETRY_NOISE))

    # 2c. Add the loop closure constraint
    # This factor encodes the fact that we have returned to the same pose. In real
    # systems, these constraints may be identified in many ways, such as appearance-based
    # techniques with camera images. We will use another Between Factor to enforce this constraint:
    graph.add(
        gtsam.BetweenFactorPose2(5, 2, gtsam.Pose2(2, 0, math.pi / 2),
                                 ODOMETRY_NOISE))
    print("\nFactor Graph:\n{}".format(graph))  # print

    # 3. Create the data structure to hold the initial_estimate estimate to the
    # solution. For illustrative purposes, these have been deliberately set to incorrect values
    initial_estimate = gtsam.Values()
    initial_estimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
    initial_estimate.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
    initial_estimate.insert(3, gtsam.Pose2(4.1, 0.1, math.pi / 2))
    initial_estimate.insert(4, gtsam.Pose2(4.0, 2.0, math.pi))
    initial_estimate.insert(5, gtsam.Pose2(2.1, 2.1, -math.pi / 2))
    print("\nInitial Estimate:\n{}".format(initial_estimate))  # print

    # 4. Optimize the initial values using a Gauss-Newton nonlinear optimizer
    # The optimizer accepts an optional set of configuration parameters,
    # controlling things like convergence criteria, the type of linear
    # system solver to use, and the amount of information displayed during
    # optimization. We will set a few parameters as a demonstration.
    parameters = gtsam.GaussNewtonParams()

    # Stop iterating once the change in error between steps is less than this value
    parameters.setRelativeErrorTol(1e-5)
    # Do not perform more than N iteration steps
    parameters.setMaxIterations(100)
    # Create the optimizer ...
    optimizer = gtsam.GaussNewtonOptimizer(graph, initial_estimate, parameters)
    # ... and optimize
    result = optimizer.optimize()
    print("Final Result:\n{}".format(result))

    # 5. Calculate and print marginal covariances for all variables
    marginals = gtsam.Marginals(graph, result)
    for i in range(1, 6):
        print("X{} covariance:\n{}\n".format(i,
                                             marginals.marginalCovariance(i)))

    for i in range(1, 6):
        gtsam_plot.plot_pose2(0, result.atPose2(i), 0.5,
                              marginals.marginalCovariance(i))

    plt.axis('equal')
    plt.show()
Esempio n. 30
0
    def bundle_adjustment(self):
        MIN_LANDMARK_SEEN = 3

        # Initialize factor Graph
        graph = gtsam.NonlinearFactorGraph()
        initialEstimate = gtsam.Values()

        # Add factors for all measurements
        measurementNoiseSigma = 1.0
        measurementNoise = gtsam.noiseModel_Isotropic.Sigma(
            2, measurementNoiseSigma)

        landmark_id_list = {}
        img_pose_id_list = {}
        for i, img_pose in enumerate(self.img_pose):
            for k in range(len(img_pose.kp)):
                if img_pose.kp_3d_exist(k):
                    landmark_id = img_pose.kp_3d(k)
                    if (self.landmark[landmark_id].seen >= MIN_LANDMARK_SEEN):
                        landmark_id_list[landmark_id] = True
                        img_pose_id_list[i] = True
                        graph.add(
                            gtsam.GenericProjectionFactorCal3_S2(
                                Point2(img_pose.kp[k][0],
                                       img_pose.kp[k][1]), measurementNoise,
                                X(i), P(landmark_id), self.cal))

        # Initialize estimate for poses
        # Create priors
        s = np.radians(60)
        poseNoiseSigmas = np.array([s, s, s, 10, 10, 10])
        # poseNoiseSigmas = np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
        posePriorNoise = gtsam.noiseModel_Diagonal.Sigmas(poseNoiseSigmas)

        # theta = np.radians(30)
        # wRc = gtsam.Rot3(np.array([[-math.sin(theta), 0 , math.cos(theta)],
        #                 [-math.cos(theta), 0 , -math.sin(theta)],
        #                 [0, 1 , 0]]))
        wRc = gtsam.Rot3(np.identity(3))

        for i, idx in enumerate(img_pose_id_list):
            initialEstimate.insert(X(idx), self.img_pose[idx].T)
            if i < 2:
                # graph.add(gtsam.PriorFactorPose3(X(idx), gtsam.Pose3(
                # wRc, gtsam.Point3(-math.sin(theta)*1.58*idx, -math.cos(theta)*1.58*idx, 1.2)), posePriorNoise))
                graph.add(
                    gtsam.PriorFactorPose3(
                        X(idx),
                        gtsam.Pose3(wRc,
                                    gtsam.Point3(1.38 * idx, 0, -0.29 * idx)),
                        posePriorNoise))
                # i+=1

        # Initialize estimate for landmarks
        for idx in landmark_id_list:
            p = self.landmark[idx].point

            initialEstimate.insert(P(idx), Point3(p[0], p[1], p[2]))

        # Optimization
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate)
        # sfm_result = optimizer.optimize()
        for i in range(1000):
            optimizer.iterate()
        sfm_result = optimizer.values()
        # print(sfm_result)
        # Marginalization
        marginals = gtsam.Marginals(graph, sfm_result)
        for idx in img_pose_id_list:
            marginals.marginalCovariance(X(idx))
        for idx in landmark_id_list:
            marginals.marginalCovariance(P(idx))

        nrCamera = len(img_pose_id_list)
        nrPoint = len(landmark_id_list)

        return sfm_result, img_pose_id_list, landmark_id_list