Esempio n. 1
0
    def action_step(self, action, action_noise):

        graph_add = gtsam.NonlinearFactorGraph()
        initial_add = gtsam.Values()

        # Setting path and initiating DA realization.
        path_length = len(self.daRealization)
        self.daRealization.append(0)

        # Advancing the belief by camera
        self.graph.add(
            gtsam.BetweenFactorPose3(self.X(path_length),
                                     self.X(path_length + 1), action,
                                     action_noise))
        graph_add.add(
            gtsam.BetweenFactorPose3(self.X(path_length),
                                     self.X(path_length + 1), action,
                                     action_noise))

        # Setting initial values, save an initial value vector for the propagated pose, need it later
        if self.initial.exists(self.X(path_length + 1)) is False:
            self.initial.insert(
                self.X(path_length + 1),
                self.initial.atPose3(self.X(path_length)).compose(action))
            # self.initial.insert(self.X(path_length + 1), gtsam.Pose3(gtsam.Pose2(0.0, 0.0, 0.0)))
        if initial_add.exists(self.X(path_length + 1)) is False:
            initial_add.insert(
                self.X(path_length + 1),
                self.initial.atPose3(self.X(path_length)).compose(action))
            # initial_add.insert(self.X(path_length + 1), gtsam.Pose3(gtsam.Pose2(0.0, 0.0, 0.0)))

        # Setting propagated belief object for weight computation
        self.prop_belief = self.graph.clone()
        self.prop_initial = gtsam.Values()
        for key in list(self.initial.keys()):
            self.prop_initial.insert(key, self.initial.atPose3(key))

        # self.optimize() #TODO: SEE IF WORKS
        #print(initial_add.exists(self.X(2)))
        self.isam.update(graph_add, initial_add)
        self.optimize()

        # non-existent object issue is resolved.
        self.marginals = gtsam.Marginals(self.prop_belief, self.result)

        # Setting resultsFlag to false to require optimization
        self.resultsFlag = False

        # Multi robot flag for twin measurements
        self.sim_measurement_flag = list()
Esempio n. 2
0
    def add_measurement(self, measurement, new_da_realization, graph, initial,
                        da_realization):
        """

        :type graph: gtsam.NonlinearFactorGraph
        :type initial: gtsam.Values
        """

        # Adding a factor per each measurement with corresponding data association realization
        measurement_number = 0
        path_length = len(da_realization)
        for realization in new_da_realization:
            realization = int(realization)
            graph.add(
                gtsam.BetweenFactorPose3(self.X(path_length),
                                         self.XO(realization),
                                         measurement[measurement_number],
                                         self.model_noise))
            measurement_number += 1

            # Initialization of object of the realization
            # TODO: CHANGE TO SOMETHING MORE ACCURATE / LESS BASIC
            if initial.exists(self.XO(realization)) is False:
                initial.insert(self.XO(realization),
                               gtsam.Pose3(gtsam.Pose2(0.0, 0.0, 0.0)))

        # Saving the data association realization in a list
        da_realization[path_length - 1] = new_da_realization
Esempio n. 3
0
    def log_pdf_value(self, measurement, point_x, point_xo):
        """

        :type point_x: gtsam.Pose3
        :type point_xo: gtsam.Pose3
        :type measurement: gtsam.Pose3
        """

        meas_x = measurement[2] * np.cos(measurement[0] / 180 * np.pi) \
                 * np.cos(measurement[1] / 180 * np.pi)
        meas_y = measurement[2] * np.sin(measurement[0] / 180 * np.pi) \
                 * np.cos(measurement[1] / 180 * np.pi)
        meas_z = measurement[2] * np.sin(measurement[1] / 180 * np.pi)

        pose_meas = gtsam.Pose3(gtsam.Rot3.Ypr(0.0, 0.0, 0.0),
                                np.array([meas_x, meas_y, meas_z]))

        # Create an auxilary factor and compute his log-pdf value (without the normalizing constant)
        aux_factor = gtsam.BetweenFactorPose3(1, 2, pose_meas,
                                              self.model_noise)
        poses = gtsam.Values()
        poses.insert(1, point_x)
        poses.insert(2, point_xo)

        return aux_factor.error(poses)
Esempio n. 4
0
 def add_node(self, kf):
     self.initialEstimate.insert(X(kf.kfID), gt.Pose3(kf.pose_matrix()))
     for kf_n, rel_pose, _ in kf.neighbors:
         if kf_n.kfID > kf.kfID:
             continue
         self.graph.add(
             gt.BetweenFactorPose3(X(kf.kfID), X(kf_n.kfID),
                                   gt.Pose3(rel_pose), self.covariance))
Esempio n. 5
0
 def add_skip_odom(self, delta_odom, prev_node_idx, cur_node_idx):
     '''
     params:
     delta_odom = 1*7 np.array(tx,ty,tz,w,x,y,z)
     '''
     delta = gen_pose(delta_odom)
     self.graph.add(
         gtsam.BetweenFactorPose3(X(prev_node_idx), X(cur_node_idx), delta,
                                  self.skip_noise))
Esempio n. 6
0
def populate_factor_graph(graph, initial_estimates, initial_pose, clouds):
    """Populates a gtsam.NonlinearFactorGraph with
    factors between state variables. Populates
    initial_estimates for state variables as well.

    Args:
        graph (gtsam.NonlinearFactorGraph): the factor graph populated with ICP constraints
        initial_estimates (gtsam.Values):   the populated estimates for vehicle poses
        initial_pose (gtsam.Pose3):         the starting pose for the estimates in world coordinates
        clouds (np.ndarray):                the numpy array with all our point clouds
    """
    ICP_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4]))
    factor_pose = initial_pose

    # Add ICP Factors between each pair of clouds
    prev_T = gtsam.Pose3()
    for i in range(len(clouds) - 1):
        # TODO: Run ICP between clouds (hint: use inital_tranform argument)
        T = icp(clouds[i], clouds[i+1], prev_T)[0]
        prev_T = T
        # TODO: Set T to its inverse: use `gtsam.Pose3.inverse()`
        T = T.inverse()
        # TODO: Add a `gtsam.BetweenFactorPose3()` to the graph
        graph.add(gtsam.BetweenFactorPose3(i, i+1, T, ICP_NOISE))
        factor_pose = factor_pose.compose(T)
        initial_estimates.insert(i+1, factor_pose)
        print(".", end="")

    # Add skip connections between every other frame
    prev_T = gtsam.Pose3()
    for i in range(0, len(clouds) - 2, 2):
        # TODO: Run ICP between clouds (hint: use inital_tranform argument)
        T= icp(clouds[i], clouds[i+2], prev_T)[0]
        prev_T = T  
        # TODO: Set T to its inverse: use `gtsam.Pose3.inverse()`
        T = T.inverse()
        # TODO: Add a `gtsam.BetweenFactorPose3()` to the graph
        graph.add(gtsam.BetweenFactorPose3(i, i+2, T, ICP_NOISE))
        
        print(".", end="")

    return graph, initial_estimates
Esempio n. 7
0
 def add_odom(self, delta_odom, cur_pose_estimate):
     '''
     params:
     delta_odom = 1*7 np.array(tx,ty,tz,w,x,y,z)
     '''
     delta = gen_pose(delta_odom)
     cur_pose = gen_pose(cur_pose_estimate)
     self.graph.add(
         gtsam.BetweenFactorPose3(X(self.node_idx), X(self.node_idx + 1),
                                  delta, self.odom_noise))
     self.initial_estimate.insert(X(self.node_idx + 1), cur_pose)
Esempio n. 8
0
def step(data, isam, result, truth, currPoseIndex):
    '''
    Do one step isam update
    @param[in] data: measurement data (odometry and visual measurements and their noiseModels)
    @param[in/out] isam: current isam object, will be updated
    @param[in/out] result: current result object, will be updated
    @param[in] truth: ground truth data, used to initialize new variables
    @param[currPoseIndex]: index of the current pose
    '''
    # iSAM expects us to give it a new set of factors
    # along with initial estimates for any new variables introduced.
    newFactors = gtsam.NonlinearFactorGraph()
    initialEstimates = gtsam.Values()

    # Add odometry
    prevPoseIndex = currPoseIndex - 1
    odometry = data.odometry[prevPoseIndex]
    newFactors.add(
        gtsam.BetweenFactorPose3(symbol('x', prevPoseIndex),
                                 symbol('x', currPoseIndex), odometry,
                                 data.noiseModels.odometry))

    # Add visual measurement factors and initializations as necessary
    for k in range(len(data.Z[currPoseIndex])):
        zij = data.Z[currPoseIndex][k]
        j = data.J[currPoseIndex][k]
        jj = symbol('l', j)
        newFactors.add(
            gtsam.GenericProjectionFactorCal3_S2(zij,
                                                 data.noiseModels.measurement,
                                                 symbol('x', currPoseIndex),
                                                 jj, data.K))
        # TODO: initialize with something other than truth
        if not result.exists(jj) and not initialEstimates.exists(jj):
            lmInit = truth.points[j]
            initialEstimates.insert(jj, lmInit)

    # Initial estimates for the new pose.
    prevPose = result.atPose3(symbol('x', prevPoseIndex))
    initialEstimates.insert(symbol('x', currPoseIndex),
                            prevPose.compose(odometry))

    # Update ISAM
    # figure(1)tic
    isam.update(newFactors, initialEstimates)
    # t=toc plot(frame_i,t,'r.') tic
    newResult = isam.calculateEstimate()
    # t=toc plot(frame_i,t,'g.')

    return isam, newResult
Esempio n. 9
0
    def add_observation(self, var1_name, var2_name, transform, noise=None):
        """ Between factor """
        noise_gt = self._process_noise(noise)
        transform_gt = sophus2gtsam(transform)

        var1 = self.vars[var1_name]
        var2 = self.vars[var2_name]

        factor = gtsam.BetweenFactorPose3(var1, var2, transform_gt, noise_gt)
        self.gtsam_graph.push_back(factor)

        # Add edge information
        self.factor_edges[var1].append((var2, transform_gt))
        self.factor_edges[var2].append((var1, transform_gt.inverse()))
Esempio n. 10
0
    def setUp(self):

        model = gtsam.noiseModel.Isotropic.Sigma(6, 0.1)

        # We consider a small graph:
        #                            symbolic FG
        #               x2               0  1
        #             / | \              1  2
        #            /  |  \             2  3
        #          x3   |   x1           2  0
        #           \   |   /            0  3
        #            \  |  /
        #               x0
        #
        p0 = Point3(0, 0, 0)
        self.R0 = Rot3.Expmap(np.array([0.0, 0.0, 0.0]))
        p1 = Point3(1, 2, 0)
        self.R1 = Rot3.Expmap(np.array([0.0, 0.0, 1.570796]))
        p2 = Point3(0, 2, 0)
        self.R2 = Rot3.Expmap(np.array([0.0, 0.0, 3.141593]))
        p3 = Point3(-1, 1, 0)
        self.R3 = Rot3.Expmap(np.array([0.0, 0.0, 4.712389]))

        pose0 = Pose3(self.R0, p0)
        pose1 = Pose3(self.R1, p1)
        pose2 = Pose3(self.R2, p2)
        pose3 = Pose3(self.R3, p3)

        g = NonlinearFactorGraph()
        g.add(gtsam.BetweenFactorPose3(x0, x1, pose0.between(pose1), model))
        g.add(gtsam.BetweenFactorPose3(x1, x2, pose1.between(pose2), model))
        g.add(gtsam.BetweenFactorPose3(x2, x3, pose2.between(pose3), model))
        g.add(gtsam.BetweenFactorPose3(x2, x0, pose2.between(pose0), model))
        g.add(gtsam.BetweenFactorPose3(x0, x3, pose0.between(pose3), model))
        g.add(gtsam.PriorFactorPose3(x0, pose0, model))
        self.graph = g
    def action_step(self, action, action_noise):

        graph_add = gtsam.NonlinearFactorGraph()
        initial_add = gtsam.Values()

        # Setting path and initiating DA realization.
        path_length = len(self.daRealization)
        self.daRealization.append(0)

        # Advancing the belief by camera
        self.graph.add(
            gtsam.BetweenFactorPose3(self.X(path_length),
                                     self.X(path_length + 1), action,
                                     action_noise))
        graph_add.add(
            gtsam.BetweenFactorPose3(self.X(path_length),
                                     self.X(path_length + 1), action,
                                     action_noise))

        # Setting initial values, save an initial value vector for the propagated pose, need it later
        if self.initial.exists(self.X(path_length + 1)) is False:
            self.initial.insert(
                self.X(path_length + 1),
                self.initial.atPose3(self.X(path_length)).compose(action))
            # self.initial.insert(self.X(path_length + 1), gtsam.Pose3(gtsam.Pose2(0.0, 0.0, 0.0)))
        if initial_add.exists(self.X(path_length + 1)) is False:
            initial_add.insert(
                self.X(path_length + 1),
                self.initial.atPose3(self.X(path_length)).compose(action))
            # initial_add.insert(self.X(path_length + 1), gtsam.Pose3(gtsam.Pose2(0.0, 0.0, 0.0)))

        self.isam.update(graph_add, initial_add)
        self.optimize()

        # Setting resultsFlag to false to require optimization
        self.resultsFlag = False
Esempio n. 12
0
    def log_pdf_value(self, measurement, point_x, point_xo):
        """

        :type point_x: gtsam.Pose3
        :type point_xo: gtsam.Pose3
        :type measurement: gtsam.Pose3
        """

        # Create an auxilary factor and compute his log-pdf value (without the normalizing constant)
        aux_factor = gtsam.BetweenFactorPose3(1, 2, measurement,
                                              self.model_noise)
        poses = gtsam.Values()
        poses.insert(1, point_x)
        poses.insert(2, point_xo)

        return aux_factor.error(poses)
Esempio n. 13
0
def add_pose_factors(graph, init_est, dataset_path):
    f = open(dataset_path + '/obj_det_poses.pkl', 'rb')
    data = pickle.load(f)
    cnt = 0
    for pose in data.items():
        #print('Adding pose det factor for ' + pose[0])
        num = int(pose[0].split('.')[0])

        pos = gtsam.Point3(pose[1][:3])
        quat = pose[1][3:]

        #wxyz to xyzw
        quat[0], quat[1], quat[2], quat[3] = quat[1], quat[2], quat[3], quat[0]
        rot = gtsam.Rot3((Rotation.from_quat(quat)).as_dcm())
        pose = gtsam.Pose3(rot, pos)
        factor = gtsam.BetweenFactorPose3(C(num), O(0), pose, pose_noise)
        print('new factor')
        graph.push_back(factor)
        cnt += 1

    init_est.insert(O(0), gtsam.Pose3())  #identity
Esempio n. 14
0
    def test_Pose3SLAMExample(self) -> None:
        # Create a hexagon of poses
        hexagon = circlePose3(numPoses=6, radius=1.0)
        p0 = hexagon.atPose3(0)
        p1 = hexagon.atPose3(1)

        # create a Pose graph with one equality constraint and one measurement
        fg = gtsam.NonlinearFactorGraph()
        fg.add(gtsam.NonlinearEqualityPose3(0, p0))
        delta = p0.between(p1)
        covariance = gtsam.noiseModel.Diagonal.Sigmas(
            np.array([
                0.05, 0.05, 0.05,
                np.deg2rad(5.),
                np.deg2rad(5.),
                np.deg2rad(5.)
            ]))
        fg.add(gtsam.BetweenFactorPose3(0, 1, delta, covariance))
        fg.add(gtsam.BetweenFactorPose3(1, 2, delta, covariance))
        fg.add(gtsam.BetweenFactorPose3(2, 3, delta, covariance))
        fg.add(gtsam.BetweenFactorPose3(3, 4, delta, covariance))
        fg.add(gtsam.BetweenFactorPose3(4, 5, delta, covariance))
        fg.add(gtsam.BetweenFactorPose3(5, 0, delta, covariance))

        # Create initial config
        initial = gtsam.Values()
        s = 0.10
        initial.insert(0, p0)
        initial.insert(1,
                       hexagon.atPose3(1).retract(s * np.random.randn(6, 1)))
        initial.insert(2,
                       hexagon.atPose3(2).retract(s * np.random.randn(6, 1)))
        initial.insert(3,
                       hexagon.atPose3(3).retract(s * np.random.randn(6, 1)))
        initial.insert(4,
                       hexagon.atPose3(4).retract(s * np.random.randn(6, 1)))
        initial.insert(5,
                       hexagon.atPose3(5).retract(s * np.random.randn(6, 1)))

        # optimize
        optimizer = gtsam.LevenbergMarquardtOptimizer(fg, initial)
        result = optimizer.optimizeSafely()

        pose_1 = result.atPose3(1)
        self.gtsamAssertEquals(pose_1, p1, 1e-4)
Esempio n. 15
0
def icp(clouda, cloudb, initial_transform=gtsam.Pose3(), max_iterations=25):
    """Runs ICP on two clouds by calling
    all five steps implemented above.
    Iterates until close enough or max
    iterations.

    Returns a series of intermediate clouds
    for visualization purposes.

    Args:
        clouda (ndarray):                point cloud A
        cloudb (ndarray):                point cloud B
        initial_transform (gtsam.Pose3): the initial estimate of transform between clouda and cloudb (step 1 of icp)
        max_iterations (int):            maximum iters of ICP to run before breaking

    Ret:
        bTa (gtsam.Pose3): the final transform
        icp_series (list): visualized icp for debugging
    """
    
    
    icp_series = []
    bTa = initial_transform
    i = 0
    temp = True
    while  i < max_iterations and temp:

      newClTr = transform_cloud(bTa,clouda)
      newCloudb = assign_closest_pairs(newClTr, cloudb)
      transform = estimate_transform(newClTr,newCloudb)

      if transform.equals(gtsam.Pose3.identity(),tol=1e-2:
        temp = False
      else:
        bTa = gtsam.Pose3(np.matmul(bTa.matrix(),transform.matrix()))
        i += 1
        icp_series.append([newClTr, cloudb])
    
    return bTa, icp_series

"""The animation shows how clouda has moved after each iteration of ICP. You should see stationary landmarks, like walls and parked cars, converge onto each other."""

aTb, icp_series = icp(clouda, cloudb)
visualize_clouds_animation(icp_series, speed=400, show_grid_lines=True)

"""ICP is a computationally intense algorithm and we plan to run it between each cloud pair in our 180 clouds dataset. Use the python profiler to identify the computationally expensive subroutines in your algorithm and use numpy to reduce your runtime. The TAs get ~6.5 seconds."""

import cProfile
cProfile.run('icp(clouda, cloudb)')

"""These unit tests will verify the basic functionality of the functions you've implemented in this section. Keep in mind that these are not exhaustive."""

import unittest

class TestICP(unittest.TestCase):

    def setUp(self):
        self.testclouda = np.array([[1], [1], [1]])
        self.testcloudb = np.array([[2, 10], [1, 1], [1, 1]])
        self.testcloudc = np.array([[2], [1], [1]])
        self.testbTa = gtsam.Pose3(gtsam.Rot3(), gtsam.Point3(1, 0, 0))
        self.testcloudd = np.array([[0, 20, 10], [0, 10, 20], [0, 0, 0]])
        self.testcloude = np.array([[10, 30, 20], [10, 20, 30], [0, 0, 0]])

    def test_assign_closest_pairs1(self):
        expected = (3, 1)
        actual = assign_closest_pairs(self.testclouda, self.testcloudb).shape
        self.assertEqual(expected, actual)

    def test_assign_closest_pairs2(self):
        expected = 2
        actual = assign_closest_pairs(self.testclouda, self.testcloudb)[0][0]
        self.assertEqual(expected, actual)

    def test_estimate_transform1(self):
        expected = 1
        actual = estimate_transform(self.testclouda, self.testcloudc).x()
        self.assertEqual(expected, actual)

    def test_estimate_transform2(self):
        expected = 10
        actual = estimate_transform(self.testcloudd, self.testcloude).x()
        self.assertAlmostEqual(expected, actual, places=7)
        actua2 = estimate_transform(self.testcloudd, self.testcloude).y()
        self.assertAlmostEqual(expected, actua2, places=7)

    def test_transform_cloud1(self):
        expected = 2
        actual = transform_cloud(self.testbTa, self.testclouda)[0][0]
        self.assertEqual(expected, actual)

    def test_icp1(self):
        ret = icp(self.testclouda, self.testcloudb)
        expected1 = type(gtsam.Pose3())
        actual1 = type(ret[0])
        self.assertEqual(expected1, actual1)
        expected2 = type([])
        actual2 = type(ret[1])
        self.assertEqual(expected2, actual2)
        expected3 = type([])
        actual3 = type(ret[1][0])
        self.assertEqual(expected3, actual3)

    def test_icp2(self):
        expected = 1
        actual = icp(self.testclouda, self.testcloudb)[0].x()
        self.assertEqual(expected, actual)

    def test_icp3(self):
        expected = 1
        actual = icp(self.testclouda, self.testcloudc)[0].x()
        self.assertEqual(expected, actual)

if __name__ == "__main__":
    unittest.main(argv=['first-arg-is-ignored'], exit=False)

"""# Factor Graph

In this section, we'll build a factor graph to estimate the pose of our vechicle using the transforms our ICP algorithm gives us between frames. These ICP transforms are the factors that tie the pose variables together.

We will be using GTSAM to construct the factor graph as well as perform a optimization for the pose of the car as it travels down the street. Let's start with a simple example first. Recall from PoseSLAM describe in the LIDAR slides how we could add a factor (aka constraint) between two state variables. When we revisited a state, we could add a loop closure. Since the car in our dataset never revisits a previous pose, there is not loop closure. Here is that example from the slides copied here. Note how the graph is initialized and how factors are added.
"""

# # Factor graph example 

# Helper function to create a pose
def vector3(x, y, z):
    """Create 3d double numpy array."""
    return np.array([x, y, z], dtype=np.float)

# Create noise model
priorNoise = gtsam.noiseModel_Diagonal.Sigmas(vector3(0.3, 0.3, 0.1))
model = gtsam.noiseModel_Diagonal.Sigmas(vector3(0.2, 0.2, 0.1))

# Instantiate the factor graph
example_graph = gtsam.NonlinearFactorGraph()

# Adding a prior on the first pose
example_graph.add(gtsam.PriorFactorPose2(1, gtsam.Pose2(0, 0, 0), priorNoise))

# Create odometry (Between) factors between consecutive poses
example_graph.add(gtsam.BetweenFactorPose2( 1, 2, gtsam.Pose2(2, 0, 0), model)) 
example_graph.add(gtsam.BetweenFactorPose2(2, 3, gtsam.Pose2(2, 0, math.pi / 2), model)) 
example_graph.add(gtsam.BetweenFactorPose2(3, 4, gtsam.Pose2(2, 0, math.pi / 2), model)) 
example_graph.add(gtsam.BetweenFactorPose2(4, 5, gtsam.Pose2(2, 0, math.pi / 2), model)) 

# Add the loop closure constraint
example_graph.add(gtsam.BetweenFactorPose2(5, 2, gtsam.Pose2(2, 0, math.pi / 2), model)) 

# Create the initial estimate
example_initial_estimate = gtsam.Values()
example_initial_estimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
example_initial_estimate.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
example_initial_estimate.insert(3, gtsam.Pose2(4.1, 0.1, math.pi / 2))
example_initial_estimate.insert(4, gtsam.Pose2(4.0, 2.0, math.pi))
example_initial_estimate.insert(5, gtsam.Pose2(2.1, 2.1, -math.pi / 2))

# 4. Optimize the initial values using a Gauss-Newton nonlinear optimizer
ex_parameters = gtsam.GaussNewtonParams()
ex_parameters.setRelativeErrorTol(1e-5)
ex_parameters.setMaxIterations(100)
ex_optimizer = gtsam.GaussNewtonOptimizer(example_graph, example_initial_estimate, ex_parameters)
ex_result = ex_optimizer.optimize()
print("Final Result:\n{}".format(ex_result))

# Plot your graph
marginals = gtsam.Marginals(example_graph, ex_result)
fig = plt.figure(0)
for i in range(1, 6):
    gtsam_plot.plot_pose2(0, ex_result.atPose2(i), 0.5, marginals.marginalCovariance(i))

plt.axis('equal')
plt.show()

"""**TODO** [25 points]

You will be using your ICP implementation here to find the transform between two subsequent clouds. These transforms become the factors between pose variables in the graph. So, you will need to go through all the point clouds and run icp pair-wise to find the relative movement of the car. With these transformation, create a factor representing the transform between the pose variables.

We talked about how loop closure helps us consolidate conflicting data into a better global estimate. Unfortunately, our car does not perform a loop closure. So, our graph would just be a long series of poses connected by icp-returned transforms. However, our lidar scans are noisy, which means that our icp-returned transforms are not perfect either. This ultimately results in incorrect vehicle poses and overall map. One way that we can augment our graph is through "skipping". We simply run ICP between every other cloud, and add these skip connections into the graph. You can basically perform ICP between two non-consecutive point clouds and add that transform as a factor in the factor graph.
"""

def populate_factor_graph(graph, initial_estimates, initial_pose, clouds):
    """Populates a gtsam.NonlinearFactorGraph with
    factors between state variables. Populates
    initial_estimates for state variables as well.

    Args:
        graph (gtsam.NonlinearFactorGraph): the factor graph populated with ICP constraints
        initial_estimates (gtsam.Values):   the populated estimates for vehicle poses
        initial_pose (gtsam.Pose3):         the starting pose for the estimates in world coordinates
        clouds (np.ndarray):                the numpy array with all our point clouds
    """
    ICP_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4]))
    factor_pose = initial_pose

    # Add ICP Factors between each pair of clouds
    prev_T = gtsam.Pose3()
    for i in range(len(clouds) - 1):
        # TODO: Run ICP between clouds (hint: use inital_tranform argument)
        bta, icp_series = icp(clouds[i], clouds[i+1], initial_transform=prev_T)
        #T = initial_transform(initial_pose, clouds)

        # TODO: Set T to its inverse: use `gtsam.Pose3.inverse()`
        T = bta.inverse()
        # TODO: Add a `gtsam.BetweenFactorPose3()` to the graph
        graph.add(gtsam.BetweenFactorPose3(i, i+1, T, ICP_NOISE))



        factor_pose = factor_pose.compose(T)
        initial_estimates.insert(i+1, factor_pose)
        print(".", end="")

    # Add skip connections between every other frame
    prev_T = gtsam.Pose3()
    for i in range(0, len(clouds) - 2, 2):
        # TODO: Run ICP between clouds (hint: use inital_tranform argument)
        bta, icp_series = icp(clouds[i], clouds[i+2],initial_transform=prev_T)
        # TODO: Set T to its inverse: use `gtsam.Pose3.inverse()`
        T = bta.inverse()
        # TODO: Add a `gtsam.BetweenFactorPose3()` to the graph
        graph.add(gtsam.BetweenFactorPose3(i, i+2, T, ICP_NOISE))

        print(".", end="")

    return graph, initial_estimates

"""The real power of GTSAM will show here. In five lines, we'll setup a Gauss Newton nonlinear optimizer and optimize for the vehicle's poses in world coordinates.

Note: This cell runs your ICP implementation 180 times. If you've implemented your ICP similarly to the TAs, expect this cell to take 2 minutes. If you're missing the `initial_transform` argument for icp, it may take ~1 hour.
"""

# load in all clouds in our dataset
clouds = read_ply(*scans_fnames)

# Setting up our factor graph
graph = gtsam.NonlinearFactorGraph()
initial_estimates = gtsam.Values()

# We get the initial pose of the car from Argo AI's dataset, and we add it to the graph as such
PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4]))
initial_pose = gtsam.Pose3(gtsam.Rot3(0.9982740, -0.0572837,  0.0129474, 0.0575611,  0.9980955, -0.0221840, -0.0116519,  0.0228910,  0.9996701),
                           gtsam.Point3(-263.9464864482589, 2467.3015467381383, -19.374652610889633))
graph.add(gtsam.PriorFactorPose3(0, initial_pose, PRIOR_NOISE))
initial_estimates.insert(0, initial_pose)

# We'll use your function to populate the factor graph
graph, initial_estimates = populate_factor_graph(graph, initial_estimates, initial_pose, clouds)

# Now optimize for the states
parameters = gtsam.GaussNewtonParams()
parameters.setRelativeErrorTol(1e-5)
parameters.setMaxIterations(100)
optimizer = gtsam.GaussNewtonOptimizer(graph, initial_estimates, parameters)
result = optimizer.optimize()

"""Let's plot these poses to see how our vechicle moves.

Screenshot this for your reflection.
"""

poses_cloud = np.array([[], [], []])
for i in range(len(clouds)):
    poses_cloud = np.hstack([poses_cloud, np.array([[result.atPose3(i).x()], [result.atPose3(i).y()], [result.atPose3(i).z()]])])

init_car_pose = gtsam.Pose3(gtsam.Rot3(0.9982740, -0.0572837,  0.0129474, 0.0575611,  0.9980955, -0.0221840, -0.0116519,  0.0228910,  0.9996701),
                            gtsam.Point3(-263.9464864482589, 2467.3015467381383, -19.374652610889633))
visualize_clouds([poses_cloud, transform_cloud(init_car_pose, clouds[0])], show_grid_lines=True)

"""These unit tests will verify the basic functionality of the function you've implemented in this section. Keep in mind that these are not exhaustive."""

import unittest

class TestFactorGraph(unittest.TestCase):

    def setUp(cls):
      test_clouds = read_ply(*scans_fnames)[:6]
      PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4]))
      ICP_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4]))
      test_graph = gtsam.NonlinearFactorGraph()
      test_initial_estimates = gtsam.Values()
      initial_pose = gtsam.Pose3(gtsam.Rot3(0.9982740, -0.0572837,  0.0129474, 0.0575611,  0.9980955, -0.0221840, -0.0116519,  0.0228910,  0.9996701),
                          gtsam.Point3(-263.9464864482589, 2467.3015467381383, -19.374652610889633))
      test_graph.add(gtsam.PriorFactorPose3(0, initial_pose, PRIOR_NOISE))
      test_initial_estimates.insert(0, initial_pose)
      test_graph, test_initial_estimates = populate_factor_graph(test_graph, test_initial_estimates, initial_pose, test_clouds)
      cls.graph = test_graph
      cls.initial_estimates = test_initial_estimates
    
    def test_graph_params(self):
      self.assertTrue(type(self.graph) == gtsam.NonlinearFactorGraph)
    
    def test_initial_estimates_params(self):
      self.assertTrue(type(self.initial_estimates) == gtsam.Values)

def suite():
  functions = ['test_graph_params', 'test_initial_estimates_params']
  suite = unittest.TestSuite()
  for func in functions:
    suite.addTest(TestFactorGraph(func))
  return suite
    
if __name__ == "__main__":
    runner = unittest.TextTestRunner()
    runner.run(suite())

"""# Mapping

In this section, we'll tackle the mapping component of SLAM (Simulataneous Localization and Mapping). The previous section used a factor graph to localize our vehicle's poses in world coordinates. We'll now use those poses to form a map of the street from the point clouds.

Given the poses and the clouds, this task is easy. We'll use your `transform_cloud` method from the ICP section to transform every other cloud in our dataset to be centered at the corresponding pose where the cloud was captured. Visualizing all of these clouds yields the complete map. We don't use every cloud in our dataset to reduce the amount of noise in our map while retaining plenty of detail.

Screenshot this for your reflection.
"""

cloud_map = []
for i in range(0, len(clouds), 2):
    cloud_map.append(transform_cloud(result.atPose3(i), clouds[i-1]))

visualize_clouds(cloud_map, show_grid_lines=True, single_color="#C6C6C6")

"""# Reflection
Esempio n. 16
0
    return np.array([x, y, z, a, b, c], dtype=np.float)


parser = argparse.ArgumentParser(
    description="A 3D Pose SLAM example that reads input from g2o, and "
                "initializes Pose3")
parser.add_argument('-i', '--input', help='input file g2o format', required=False)
args = parser.parse_args()

graph = gtsam.NonlinearFactorGraph()

# for i in range(graph_in.size()):
measurement = gtsam.Pose3(gtsam.Rot3.RzRyRx(1, 1, 1), gtsam.Point3(1, 1, 1))
model = gtsam.noiseModel_Isotropic.Sigma(
        6, 1.0)
factor = gtsam.BetweenFactorPose3(0, 1, measurement, model)

graph.add(factor)

# Add Prior on the first key
# priorModel = gtsam.noiseModel_Diagonal.Variances(vector6(1, 1, 1,
#                                                          1, 1, 1))
# firstKey = initial.keys().at(0)
# graph.add(gtsam.PriorFactorPose3(firstKey, gtsam.Pose3(), priorModel))
val = gtsam.Values()
val.insert(0, gtsam.Pose3.identity())
val.insert(1, gtsam.Pose3.identity())

def linearize(graph, val) -> gtsam.GaussianFactorGraph:
    return graph.linearize(val)
Esempio n. 17
0
    def build_graph(sequence, config):
        """
        Adds noise to sequence variables / measurements. 
        Returns graph, initial_estimate
        """

        # create empty graph / estimate
        graph = gtsam.NonlinearFactorGraph()
        initial_estimate = gtsam.Values()

        # declare noise models
        prior_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([float(config['base']['PRIOR_SIGMA'])]*6, dtype=np.float))
        odometry_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([float(config['base']['ODOM_SIGMA'])]*6, dtype=np.float))
        bbox_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([float(config['base']['BOX_SIGMA'])]*4, dtype=np.float))

        # get noisy odometry / boxes 
        true_odometry = sequence.true_trajectory.as_odometry()
        noisy_odometry = true_odometry.add_noise(mu=0.0, sd=float(config['base']['ODOM_NOISE']))
        noisy_boxes = sequence.true_boxes.add_noise(mu=0.0, sd=float(config['base']['BOX_NOISE']))

        # initialize trajectory
        # TODO: ensure aligned in same reference frame
        initial_trajectory = noisy_odometry.as_trajectory(sequence.true_trajectory.data()[0])
        # initial_trajectory = noisy_odometry.as_trajectory()

        # initialize quadrics
        # NOTE: careful initializing with true quadrics and noise traj as it may not make sense
        if config['base']['Initialization'] == 'SVD':
            initial_quadrics = System.initialize_quadrics(initial_trajectory, noisy_boxes, sequence.calibration)
        elif config['base']['Initialization'] == 'Dataset':
            initial_quadrics = sequence.true_quadrics

        # add prior pose
        prior_factor = gtsam.PriorFactorPose3(System.X(0), initial_trajectory.at(0), prior_noise)
        graph.add(prior_factor)

        # add odometry measurements
        for (start_key, end_key), rpose in noisy_odometry.items():
            odometry_factor = gtsam.BetweenFactorPose3(System.X(start_key), System.X(end_key), rpose, odometry_noise)
            graph.add(odometry_factor)

        # add initial pose estimates
        for pose_key, pose in initial_trajectory.items():
            initial_estimate.insert(System.X(pose_key), pose)

        # add valid box measurements
        valid_objects = []
        initialized_quadrics = initial_quadrics.keys()
        for object_key in np.unique(noisy_boxes.object_keys()):

            # add if quadric initialized
            if object_key in initialized_quadrics:
                
                # get all views of quadric
                object_boxes = noisy_boxes.at_object(object_key)

                # add if enough views
                if len(object_boxes) > 3:

                    # add measurements
                    valid_objects.append(object_key)
                    for (pose_key, t), box in object_boxes.items():
                        bbf = quadricslam.BoundingBoxFactor(box, sequence.calibration, System.X(pose_key), System.Q(object_key), bbox_noise)
                        bbf.addToGraph(graph)

        # add initial landmark estimates
        for object_key, quadric in initial_quadrics.items():

            # add if seen > 3 times
            if (object_key in valid_objects):
                quadric.addToValues(initial_estimate, System.Q(object_key))

        return graph, initial_estimate
problem._poses = input_poses
problem._graph = input_graph

from gtsam import writeG2o


def vector6(x, y, z, a, b, c):
    """Create 6d double numpy array."""
    return np.array([x, y, z, a, b, c], dtype=np.float)


poses = gtsam.Values()
graph = gtsam.NonlinearFactorGraph()
keys = problem._poses.keys()
import utils
for i in range(keys.size()):
    rand = utils.random_omega(1)
    randr = utils.random_omega(1)
    original: gtsam.Pose3 = input_poses.atPose3(keys.at(i))
    # poses.insert(keys.at(i), gtsam.Pose3(gtsam.Rot3.RzRyRx(randr[0], randr[1],randr[2]), gtsam.Point3(rand[0], rand[1], rand[2])))
    poses.insert(keys.at(i), original.retract(np.concatenate([rand, randr])))

for i in range(problem._graph.size()):
    factor = gtsam.dynamic_cast_BetweenFactorPose3_NonlinearFactor(
        problem._graph.at(i))
    model = gtsam.noiseModel_Isotropic.Sigma(6, 1.0)
    graph.add(
        gtsam.BetweenFactorPose3(factor.keys().at(0),
                                 factor.keys().at(1), factor.measured(),
                                 model))
writeG2o(graph, poses, g2oFile)
                x_val.append(0)
                y_val.append(0)
                z_val.append(0)
                FIRST_IMAGE = False

            dt = np.transpose(new_rotate * new_translation) * prev_rot
            prev_rot = np.dot(prev_rot, new_rotate)
            prev_trans = prev_trans - dt
            # print(i, "translation: ", new_translation.T, " | ", prev_trans)
            x_val.append(prev_trans[0, 0])
            y_val.append(prev_trans[0, 1])
            z_val.append(prev_trans[0, 2])

            estimates.insert(frame_count, gtsam.Pose3(gtsam.Rot3(prev_rot), gtsam.Point3(np.array(prev_trans).reshape(3))))
            if frame_count > 0:
                factor = gtsam.BetweenFactorPose3(frame_count-1, frame_count, gtsam.Pose3(gtsam.Rot3(new_rotate), gtsam.Point3(np.array(new_translation).reshape(3))), odom_noise)
            else:
                factor = gtsam.PriorFactorPose3(0, gtsam.Pose3(gtsam.Rot3(prev_rot), gtsam.Point3(np.array(prev_trans).reshape(3))), prior_model)

            graph.add(factor)
            frame_count = frame_count + 1

            pry = calc.rotationMatrixToEulerAngles(new_rotate)
            all_pitch = np.append(all_pitch, pry[0])
            all_roll = np.append(all_roll, pry[1])
            all_yaw = np.append(all_yaw, pry[2])

            all_x = np.append(all_x, new_translation[0])
            all_y = np.append(all_y, new_translation[1])
            all_z = np.append(all_z, new_translation[2])
            if cv2.waitKey(1) & 0xFF == ord('q'):
Esempio n. 20
0
        # add a perturbation to initial pose estimates to simulate noise
        perturbed_pose = poses[i].compose(
            gtsam.Pose3(gtsam.Rot3.Rodrigues(0.1, 0.1, 0.1),
                        gtsam.Point3(0.1, 0.2, 0.3)))
        initial_estimate.insert(X(i), perturbed_pose)

    # add quadric estimate
    for i, quadric in enumerate(quadrics):
        quadric.addToValues(initial_estimate, Q(i))

    # add relative poses to graph as odometry
    for i in range(len(poses) - 1):
        relative_pose = poses[i].between(poses[i + 1])
        odometry_factor = gtsam.BetweenFactorPose3(X(i), X(i + 1),
                                                   relative_pose,
                                                   odometry_noise)
        graph.add(odometry_factor)

    # reproject true quadrics into each true pose
    for j, quadric in enumerate(quadrics):
        for i, pose in enumerate(poses):
            conic = quadricslam.QuadricCamera.project(quadric, pose,
                                                      calibration)
            bounds = conic.bounds()
            bbf = quadricslam.BoundingBoxFactor(bounds, calibration, X(i),
                                                Q(j), bbox_noise)
            bbf.addToGraph(graph)

    # define lm parameters
    parameters = gtsam.LevenbergMarquardtParams()
Esempio n. 21
0
def initialize(data, truth, options):
    # Initialize iSAM
    params = gtsam.ISAM2Params()
    if options.alwaysRelinearize:
        params.relinearizeSkip = 1
    isam = gtsam.ISAM2(params=params)

    # Add constraints/priors
    # TODO: should not be from ground truth!
    newFactors = gtsam.NonlinearFactorGraph()
    initialEstimates = gtsam.Values()
    for i in range(2):
        ii = symbol('x', i)
        if i == 0:
            if options.hardConstraint:  # add hard constraint
                newFactors.add(
                    gtsam.NonlinearEqualityPose3(ii, truth.cameras[0].pose()))
            else:
                newFactors.add(
                    gtsam.PriorFactorPose3(ii, truth.cameras[i].pose(),
                                           data.noiseModels.posePrior))
        initialEstimates.insert(ii, truth.cameras[i].pose())

    nextPoseIndex = 2

    # Add visual measurement factors from two first poses and initialize
    # observed landmarks
    for i in range(2):
        ii = symbol('x', i)
        for k in range(len(data.Z[i])):
            j = data.J[i][k]
            jj = symbol('l', j)
            newFactors.add(
                gtsam.GenericProjectionFactorCal3_S2(
                    data.Z[i][k], data.noiseModels.measurement, ii, jj,
                    data.K))
            # TODO: initial estimates should not be from ground truth!
            if not initialEstimates.exists(jj):
                initialEstimates.insert(jj, truth.points[j])
            if options.pointPriors:  # add point priors
                newFactors.add(
                    gtsam.PriorFactorPoint3(jj, truth.points[j],
                                            data.noiseModels.pointPrior))

    # Add odometry between frames 0 and 1
    newFactors.add(
        gtsam.BetweenFactorPose3(symbol('x', 0), symbol('x', 1),
                                 data.odometry[1], data.noiseModels.odometry))

    # Update ISAM
    if options.batchInitialization:  # Do a full optimize for first two poses
        batchOptimizer = gtsam.LevenbergMarquardtOptimizer(
            newFactors, initialEstimates)
        fullyOptimized = batchOptimizer.optimize()
        isam.update(newFactors, fullyOptimized)
    else:
        isam.update(newFactors, initialEstimates)

    # figure(1)tic
    # t=toc plot(frame_i,t,'r.') tic
    result = isam.calculateEstimate()
    # t=toc plot(frame_i,t,'g.')

    return isam, result, nextPoseIndex
Esempio n. 22
0
prior_noise = gtsam.noiseModel.Diagonal.Covariance(
    np.array([[3.2, 0.0, 0.0, 0.0], [0.0, 3.2, 0.0, 0.0], [0.0, 0.0, 3.2, 0.0],
              [0.0, 0.0, 0.0, 3.2]]))
prior_noise_pose = gtsam.noiseModel.Diagonal.Variances(
    np.array([0.003, 0.003, 0.001, 0.003, 0.003, 0.003]))
geo_noise = gtsam.noiseModel.Diagonal.Variances(
    np.array([0.003, 0.003, 0.001, 0.002, 0.002, 0.003]))
graph_1.add(
    lambda_prior_factor.LambdaPriorFactor(L(0), np.array([0., 0., 0., 0.]),
                                          prior_noise))
graph_1.add(
    gtsam.PriorFactorPose3(X(0), gtsam.Pose3(gtsam.Pose2(0.0, 0.0, 0.0)),
                           prior_noise_pose))
graph_1.add(
    gtsam.BetweenFactorPose3(X(0), XO(1),
                             gtsam.Pose3(gtsam.Pose2(1.0, 0.0, -0 * 3.14 / 2)),
                             geo_noise))
graph_1.add(
    joint_lambda_pose_factor_fake_2d.JLPFactor(
        X(0), XO(1), Lambda(0), Lambda(1), [1.0, 0.0, 0.0, 0.0],
        [1.1, 0.0, 0.0, 0.0, 1.1, 0.0, 0.0, 1.1, 0.0, 1.1]))
print(graph_1)

initial = gtsam.Values()
initial.insert(Lambda(0), [0.0, 0.0, 0.0, 0.0])
initial.insert(Lambda(1), [0.0, 0.0, 0.0, 0.0])
initial.insert(
    X(0),
    gtsam.Pose3(gtsam.Rot3.Ypr(0.0, 0.0, 0.0), np.array([0.036, 1.72645,
                                                         0.0])))
initial.insert(