예제 #1
0
    def setUpClass(cls):
        """
        Stereographic fisheye projection
        
        An equidistant fisheye projection with focal length f is defined
        as the relation r/f = 2*tan(theta/2), with r being the radius in the 
        image plane and theta the incident angle of the object point.
        """
        x, y, z = 1.0, 0.0, 1.0
        r = np.linalg.norm([x, y, z])
        u, v = 2*x/(z+r), 0.0
        cls.obj_point = np.array([x, y, z])
        cls.img_point = np.array([u, v])

        fx, fy, s, u0, v0 = 2, 2, 0, 0, 0
        k1, k2, p1, p2 = 0, 0, 0, 0
        xi = 1
        cls.stereographic = gtsam.Cal3Unified(fx, fy, s, u0, v0, k1, k2, p1, p2, xi)

        p1 = [-1.0, 0.0, -1.0]
        p2 = [ 1.0, 0.0, -1.0]
        q1 = gtsam.Rot3(1.0, 0.0, 0.0, 0.0)
        q2 = gtsam.Rot3(1.0, 0.0, 0.0, 0.0)
        pose1 = gtsam.Pose3(q1, p1)
        pose2 = gtsam.Pose3(q2, p2)
        camera1 = gtsam.PinholeCameraCal3Unified(pose1, cls.stereographic)
        camera2 = gtsam.PinholeCameraCal3Unified(pose2, cls.stereographic)
        cls.origin = np.array([0.0, 0.0, 0.0])
        cls.poses = gtsam.Pose3Vector([pose1, pose2])
        cls.cameras = gtsam.CameraSetCal3Unified([camera1, camera2])
        cls.measurements = gtsam.Point2Vector([k.project(cls.origin) for k in cls.cameras])
예제 #2
0
def main():
    """Main runner."""
    # Create an empty nonlinear factor graph
    graph = gtsam.NonlinearFactorGraph()

    # Add a prior on the first point, setting it to the origin
    # A prior factor consists of a mean and a noise model (covariance matrix)
    priorMean = gtsam.Pose3()  # prior at origin
    graph.add(gtsam.PriorFactorPose3(1, priorMean, PRIOR_NOISE))

    # Add GPS factors
    gps = gtsam.Point3(lat0, lon0, h0)
    graph.add(gtsam.GPSFactor(1, gps, GPS_NOISE))
    print("\nFactor Graph:\n{}".format(graph))

    # Create the data structure to hold the initialEstimate estimate to the solution
    # For illustrative purposes, these have been deliberately set to incorrect values
    initial = gtsam.Values()
    initial.insert(1, gtsam.Pose3())
    print("\nInitial Estimate:\n{}".format(initial))

    # optimize using Levenberg-Marquardt optimization
    params = gtsam.LevenbergMarquardtParams()
    optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
    result = optimizer.optimize()
    print("\nFinal Result:\n{}".format(result))
예제 #3
0
def sim3_transform_map(result, nrCameras, nrPoints):
    # Similarity transform
    s_poses = []
    s_points = []
    _sim3 = sim3.Similarity3()

    for i in range(nrCameras):
        pose_i = result.atPose3(symbol(ord('x'), i))
        s_poses.append(pose_i)

    for j in range(nrPoints):
        point_j = result.atPoint3(symbol(ord('p'), j))
        s_points.append(point_j)

    theta = np.radians(30)
    wRc = gtsam.Rot3(np.array([[-math.sin(theta), 0, math.cos(theta)],
                               [-math.cos(theta), 0, -math.sin(theta)],
                               [0, 1, 0]]))
    d_pose1 = gtsam.Pose3(
        wRc, gtsam.Point3(-math.sin(theta)*1.58, -math.cos(theta)*1.58, 1.2))
    d_pose2 = gtsam.Pose3(
        wRc, gtsam.Point3(-math.sin(theta)*1.58*3, -math.cos(theta)*1.58*3, 1.2))
    pose_pairs = [[s_poses[2], d_pose2], [s_poses[0], d_pose1]]

    _sim3.align_pose(pose_pairs)
    print('R', _sim3._R)
    print('t', _sim3._t)
    print('s', _sim3._s)

    s_map = (s_poses, s_points)
    actual_poses, actual_points = _sim3.map_transform(s_map)
    d_map = _sim3.map_transform(s_map)

    return _sim3, d_map
예제 #4
0
    def setUpClass(cls):
        """
        Equidistant fisheye projection
        
        An equidistant fisheye projection with focal length f is defined
        as the relation r/f = tan(theta), with r being the radius in the 
        image plane and theta the incident angle of the object point.
        """
        x, y, z = 1.0, 0.0, 1.0
        u, v = np.arctan2(x, z), 0.0
        cls.obj_point = np.array([x, y, z])
        cls.img_point = np.array([u, v])

        p1 = [-1.0, 0.0, -1.0]
        p2 = [1.0, 0.0, -1.0]
        q1 = gtsam.Rot3(1.0, 0.0, 0.0, 0.0)
        q2 = gtsam.Rot3(1.0, 0.0, 0.0, 0.0)
        pose1 = gtsam.Pose3(q1, p1)
        pose2 = gtsam.Pose3(q2, p2)
        camera1 = gtsam.PinholeCameraCal3Fisheye(pose1)
        camera2 = gtsam.PinholeCameraCal3Fisheye(pose2)
        cls.origin = np.array([0.0, 0.0, 0.0])
        cls.poses = gtsam.Pose3Vector([pose1, pose2])
        cls.cameras = gtsam.CameraSetCal3Fisheye([camera1, camera2])
        cls.measurements = gtsam.Point2Vector(
            [k.project(cls.origin) for k in cls.cameras])
예제 #5
0
 def add_node(self, kf):
     self.initialEstimate.insert(X(kf.kfID), gt.Pose3(kf.pose_matrix()))
     for kf_n, rel_pose, _ in kf.neighbors:
         if kf_n.kfID > kf.kfID:
             continue
         self.graph.add(
             gt.BetweenFactorPose3(X(kf.kfID), X(kf_n.kfID),
                                   gt.Pose3(rel_pose), self.covariance))
    def add_mr_object_measurement(self,
                                  stack,
                                  class_realization,
                                  new_input_object_prior=None,
                                  new_input_object_covariance=None):
        graph_add = gtsam.NonlinearFactorGraph()
        initial_add = gtsam.Values()

        idx = 0
        for obj in stack:

            cov_noise_model = stack[obj]['covariance']
            exp_gtsam = gtsam.Pose3(
                gtsam.Rot3.Ypr(stack[obj]['expectation'][2],
                               stack[obj]['expectation'][1],
                               stack[obj]['expectation'][0]),
                gtsam.Point3(stack[obj]['expectation'][3],
                             stack[obj]['expectation'][4],
                             stack[obj]['expectation'][5]))

            pose_rotation_matrix = exp_gtsam.rotation().matrix()
            cov_noise_model_rotated = self.rotate_cov_6x6(
                cov_noise_model, np.transpose(pose_rotation_matrix))
            cov_noise_model_rotated = gtsam.noiseModel.Gaussian.Covariance(
                cov_noise_model_rotated)
            #updating the graph
            self.graph.add(
                gtsam.PriorFactorPose3(self.XO(obj), exp_gtsam,
                                       cov_noise_model_rotated))
            graph_add.add(
                gtsam.PriorFactorPose3(self.XO(obj), exp_gtsam,
                                       cov_noise_model_rotated))

            self.prop_belief.add(
                gtsam.PriorFactorPose3(self.XO(obj), exp_gtsam,
                                       cov_noise_model_rotated))

            if self.prop_initial.exists(self.XO(obj)) is False:
                self.prop_initial.insert(
                    self.XO(obj), gtsam.Pose3(gtsam.Pose2(0.0, 0.0, 0.0)))

            # if obj in new_input_object_prior and obj not in self.classRealization:
            #     self.graph.add(gtsam.PriorFactorPose3(self.XO(obj), new_input_object_prior[obj],
            #                                           new_input_object_covariance[obj]))
            #     self.prop_belief.add(gtsam.PriorFactorPose3(self.XO(obj), new_input_object_prior[obj],
            #                                                 new_input_object_covariance[obj]))
            self.classRealization[obj] = class_realization[obj]

            if self.isam.value_exists(self.XO(obj)) is False:
                initial_add.insert(self.XO(obj),
                                   gtsam.Pose3(gtsam.Pose2(0.0, 0.0, 0.0)))

            idx += 1

            if stack[obj]['weight'] > -322:
                self.logWeight += stack[obj]['weight']  # + 150
            else:
                self.logWeight = -math.inf
예제 #7
0
 def test_allPose3s(self):
     """Test allPose3s."""
     initial = gtsam.Values()
     initial.insert(0, gtsam.Pose3())
     initial.insert(2, gtsam.Point2(1, 1))
     initial.insert(1, gtsam.Pose3())
     initial.insert(3, gtsam.Point3(1, 2, 3))
     result = gtsam.utilities.allPose3s(initial)
     self.assertEqual(result.size(), 2)
예제 #8
0
 def test_reprojectionErrors(self):
     """Test reprojectionErrors."""
     pixels = np.asarray([[20, 30], [20, 30]])
     I = [1, 2]
     K = gtsam.Cal3_S2()
     graph = gtsam.NonlinearFactorGraph()
     gtsam.utilities.insertProjectionFactors(
         graph, 0, I, pixels, gtsam.noiseModel.Isotropic.Sigma(2, 0.1), K)
     values = gtsam.Values()
     values.insert(0, gtsam.Pose3())
     cam = gtsam.PinholeCameraCal3_S2(gtsam.Pose3(), K)
     gtsam.utilities.insertBackprojections(values, cam, I, pixels, 10)
     errors = gtsam.utilities.reprojectionErrors(graph, values)
     np.testing.assert_allclose(errors, np.zeros((2, 2)))
예제 #9
0
    def setUp(self):
        """Set up two constraints to test."""
        aTb = gtsam.Pose3(gtsam.Rot3.Yaw(np.pi / 2), gtsam.Point3(1, 2, 3))
        cov = gtsam.noiseModel.Isotropic.Variance(6, 1e-3).covariance()
        counts = np.zeros((5, 5))
        counts[0][0] = 200
        self.a_constraint_b = Constraint(1, 2, aTb, cov, counts)

        aTc = gtsam.Pose3(gtsam.Rot3(), gtsam.Point3(0.5, 0.5, 0.5))
        variances = np.array([1e-4, 1e-5, 1e-6, 1e-1, 1e-2, 1e-3])
        aCOVc = gtsam.noiseModel.Diagonal.Variances(variances).covariance()
        counts = np.zeros((5, 5))
        counts[1][2] = 100
        self.a_constraint_c = Constraint(1, 3, aTc, aCOVc, counts)
예제 #10
0
def plot_cameras_gtsam(transform_matrix_filename: str):
    set_cams = np.load(
        '/home/sushmitawarrier/clevr-dataset-gen/output/images/CLEVR_new000000/set_cams.npz'
    )
    with open(transform_matrix_filename) as f:
        transforms = json.load(f)
    figure_number = 0
    fig = plt.figure(figure_number)
    axes = fig.gca(projection='3d')
    plt.cla()
    # Plot cameras
    idx = 0
    for i in range(len(set_cams['set_rot'])):
        print(set_cams['set_rot'][i])
        print("i", i)
        rot = set_cams['set_rot'][i]
        t = set_cams['set_loc'][i]
        rot_3 = gtsam.Rot3.RzRyRx(rot)
        pose_i = gtsam.Pose3(rot_3, t)
        if idx % 2 == 0:
            gtsam_plot.plot_pose3(figure_number, pose_i, 1)
        break
        idx += 1

    for i in transforms['frames']:
        T = i['transform_matrix']
        pose_i = gtsam.Pose3(T)
        #print("T", T)
        #print("pose_i",pose_i)
        #print(set_cams['set_loc'][0])
        #print(set_cams['set_rot'][0])
        # Reference pose
        # trying to look along x-axis, 0.2m above ground plane
        # x forward, y left, z up (x-y is the ground plane)
        upright = gtsam.Rot3.Ypr(-np.pi / 2, 0., -np.pi / 2)
        pose_j = gtsam.Pose3(upright, gtsam.Point3(0, 0, 0.2))
        axis_length = 1
        # plotting alternate poses
        if idx % 2 == 0:
            # gtsam_plot.plot_pose3(figure_number, pose_i, axis_length)
            gtsam_plot.plot_pose3(figure_number, pose_j, axis_length)
        idx += 1
    x_axe, y_axe, z_axe = 10, 10, 10
    # Draw
    axes.set_xlim3d(-x_axe, x_axe)
    axes.set_ylim3d(-y_axe, y_axe)
    axes.set_zlim3d(0, z_axe)
    plt.legend()
    plt.show()
예제 #11
0
def circlePose3(numPoses=8, radius=1.0, symbolChar='\0'):
    """
    circlePose3 generates a set of poses in a circle. This function
    returns those poses inside a gtsam.Values object, with sequential
    keys starting from 0. An optional character may be provided, which
    will be stored in the msb of each key (i.e. gtsam.Symbol).

    We use aerospace/navlab convention, X forward, Y right, Z down
    First pose will be at (R,0,0)
    ^y   ^ X
    |    |
    z-->xZ--> Y  (z pointing towards viewer, Z pointing away from viewer)
    Vehicle at p0 is looking towards y axis (X-axis points towards world y)
    """

    values = gtsam.Values()
    theta = 0.0
    dtheta = 2 * pi / numPoses
    gRo = gtsam.Rot3(
        np.array([[0., 1., 0.], [1., 0., 0.], [0., 0., -1.]], order='F'))
    for i in range(numPoses):
        key = gtsam.symbol(symbolChar, i)
        gti = gtsam.Point3(radius * cos(theta), radius * sin(theta), 0)
        oRi = gtsam.Rot3.Yaw(
            -theta)  # negative yaw goes counterclockwise, with Z down !
        gTi = gtsam.Pose3(gRo.compose(oRi), gti)
        values.insert(key, gTi)
        theta = theta + dtheta
    return values
예제 #12
0
def icp(clouda, cloudb, initial_transform=gtsam.Pose3(), max_iterations=25):
    """Runs ICP on two clouds by calling
    all five steps implemented above.
    Iterates until close enough or max
    iterations.

    Returns a series of intermediate clouds
    for visualization purposes.

    Args:
        clouda (ndarray):                point cloud A
        cloudb (ndarray):                point cloud B
        initial_transform (gtsam.Pose3): the initial estimate of transform between clouda and cloudb (step 1 of icp)
        max_iterations (int):            maximum iters of ICP to run before breaking

    Ret:
        bTa (gtsam.Pose3): the final transform
        icp_series (list): visualized icp for debugging
    """
    bTa = initial_transform
    icp_series = []
    for i in range(max_iterations):
        icp_series.append([clouda, cloudb])
        transA = transform_cloud(bTa, clouda)
        cloudb = assign_closest_pairs(transA, cloudb)
        bTa = estimate_transform(clouda, cloudb)
    return bTa, icp_series
def estimate_poses(i_iZj_list: gtsam.BinaryMeasurementsUnit3,
                   wRc_values: gtsam.Values) -> gtsam.Values:
    """Estimate poses given rotations and normalized translation directions between cameras.

    Args:
        i_iZj_list: List of normalized translation direction measurements between camera pairs, 
                    Z here refers to measurements. The measurements are of camera j with reference 
                    to camera i (iZj), in camera i's coordinate frame (i_). iZj represents a unit 
                    vector to j in i's frame and is not a transformation. 
        wRc_values: Rotations of the cameras in the world frame.

    Returns:
        gtsam.Values: Estimated poses.
    """

    # Convert the translation direction measurements to world frame using the rotations.
    w_iZj_list = gtsam.BinaryMeasurementsUnit3()
    for i_iZj in i_iZj_list:
        w_iZj = gtsam.Unit3(
            wRc_values.atRot3(i_iZj.key1()).rotate(i_iZj.measured().point3()))
        w_iZj_list.append(
            gtsam.BinaryMeasurementUnit3(i_iZj.key1(), i_iZj.key2(), w_iZj,
                                         i_iZj.noiseModel()))

    # Remove the outliers in the unit translation directions.
    w_iZj_inliers = filter_outliers(w_iZj_list)

    # Run the optimizer to obtain translations for normalized directions.
    wtc_values = gtsam.TranslationRecovery(w_iZj_inliers).run()

    wTc_values = gtsam.Values()
    for key in wRc_values.keys():
        wTc_values.insert(
            key, gtsam.Pose3(wRc_values.atRot3(key), wtc_values.atPoint3(key)))
    return wTc_values
예제 #14
0
    def action_generator(action, action_noise_diag, no_noise=False):
        """

        :type action: gtsam.Pose3
        """
        action_noise = np.diag(action_noise_diag)
        action_rotation = action.rotation().rpy()
        action_vector = np.array([
            action_rotation[2], action_rotation[1], action_rotation[0],
            action.x(),
            action.y(),
            action.z()
        ])
        generated_action = np.random.multivariate_normal(
            action_vector, action_noise)
        generated_action_pose = gtsam.Pose3(
            gtsam.Rot3.Ypr(generated_action[0], generated_action[1],
                           generated_action[2]),
            np.array([
                generated_action[3], generated_action[4], generated_action[5]
            ]))
        if no_noise is False:
            return generated_action_pose
        else:
            return action
예제 #15
0
def n2g(numpy_arr, obj):
    if obj == "Quaternion":
        x, y, z, w = numpy_arr
        return gtsam.Rot3.Quaternion(w, x, y, z)
    elif obj == "Euler":
        roll, pitch, yaw = numpy_arr
        return gtsam.Rot3.Ypr(yaw, pitch, roll)
    elif obj == "Point2":
        x, y = numpy_arr
        return gtsam.Point2(x, y)
    elif obj == "Pose2":
        x, y, yaw = numpy_arr
        return gtsam.Pose2(x, y, yaw)
    elif obj == "Point3":
        x, y, z = numpy_arr
        return gtsam.Point3(x, y, z)
    elif obj == "Pose3":
        x, y, z, roll, pitch, yaw = numpy_arr
        return gtsam.Pose3(gtsam.Rot3.Ypr(yaw, pitch, roll),
                           gtsam.Point3(x, y, z))
    elif obj == "imuBiasConstantBias":
        imu_bias = numpy_arr
        return gtsam.imuBias_ConstantBias(np.array(imu_bias[:3]),
                                          np.array(imu_bias[3:]))
    elif obj == "Vector":
        return np.array(numpy_arr)
    else:
        raise NotImplementedError("Not implemented from numpy to " + obj)
예제 #16
0
 def setUp(self):
     self.testclouda = np.array([[1], [1], [1]])
     self.testcloudb = np.array([[2, 10], [1, 1], [1, 1]])
     self.testcloudc = np.array([[2], [1], [1]])
     self.testbTa = gtsam.Pose3(gtsam.Rot3(), gtsam.Point3(1, 0, 0))
     self.testcloudd = np.array([[0, 20, 10], [0, 10, 20], [0, 0, 0]])
     self.testcloude = np.array([[10, 30, 20], [10, 20, 30], [0, 0, 0]])
예제 #17
0
    def test_jacobian(self):
        """Evaluate jacobian at optical axis"""
        obj_point_on_axis = np.array([0, 0, 1])
        img_point = np.array([0.0, 0.0])
        pose = gtsam.Pose3()
        camera = gtsam.Cal3Unified()
        state = gtsam.Values()
        camera_key, pose_key, landmark_key = K(0), P(0), L(0)
        state.insert_cal3unified(camera_key, camera)
        state.insert_point3(landmark_key, obj_point_on_axis)
        state.insert_pose3(pose_key, pose)
        g = gtsam.NonlinearFactorGraph()
        noise_model = gtsam.noiseModel.Unit.Create(2)
        factor = gtsam.GeneralSFMFactor2Cal3Unified(img_point, noise_model,
                                                    pose_key, landmark_key,
                                                    camera_key)
        g.add(factor)
        f = g.error(state)
        gaussian_factor_graph = g.linearize(state)
        H, z = gaussian_factor_graph.jacobian()
        self.assertAlmostEqual(f, 0)
        self.gtsamAssertEquals(z, np.zeros(2))
        self.gtsamAssertEquals(H @ H.T, 4 * np.eye(2))

        Dcal = np.zeros((2, 10), order='F')
        Dp = np.zeros((2, 2), order='F')
        camera.calibrate(img_point, Dcal, Dp)

        self.gtsamAssertEquals(
            Dcal,
            np.array([[0., 0., 0., -1., 0., 0., 0., 0., 0., 0.],
                      [0., 0., 0., 0., -1., 0., 0., 0., 0., 0.]]))
        self.gtsamAssertEquals(Dp, np.array([[1., -0.], [-0., 1.]]))
예제 #18
0
def estimate_transform(clouda, cloudb):
    """Estimate the transform from clouda to
    cloudb. Returns a gtsam.Pose3 object.

    Args:
        clouda (ndarray): point cloud A
        cloudb (ndarray): point cloud B
    """
    if clouda.shape != cloudb.shape and clouda.shape[1] < 3:
        return None

    centroida = np.average(clouda, axis=1)
    centroidb = np.average(cloudb, axis=1)

    clouda_prime = clouda - centroida[:, np.newaxis]
    cloudb_prime = cloudb - centroidb[:, np.newaxis]
    H = np.sum(clouda_prime.T[:, :, None] * cloudb_prime.T[:, None], axis=0)

    aRb = gtsam.Rot3.ClosestTo(H)
    rot_centroidb = aRb.rotate(gtsam.Point3(centroidb))
    atb = gtsam.Point3(
        centroida -
        np.array([rot_centroidb.x(),
                  rot_centroidb.y(),
                  rot_centroidb.z()]))

    return gtsam.Pose3(aRb, atb).inverse()
예제 #19
0
def icp(clouda, cloudb, initial_transform=gtsam.Pose3(), max_iterations=25):
    """Runs ICP on two clouds by calling
    all five steps implemented above.
    Iterates until close enough or max
    iterations.

    Returns a series of intermediate clouds
    for visualization purposes.

    Args:
        clouda (ndarray):                point cloud A
        cloudb (ndarray):                point cloud B
        initial_transform (gtsam.Pose3): the initial estimate of transform between clouda and cloudb (step 1 of icp)
        max_iterations (int):            maximum iters of ICP to run before breaking

    Ret:
        bTa (gtsam.Pose3): the final transform
        icp_series (list): visualized icp for debugging
    """
    trans = initial_transform
    count = 0
    cA = clouda
    cB = cloudb

    icp_series = [[cA, cB]]
    while (count < max_iterations):  # and not np.allclose(cA, cB)):
        cA = transform_cloud(trans, cA)
        cB = assign_closest_pairs(cA, cB)
        T = estimate_transform(cA, cB)
        icp_series.append([cA, cB])
        count += 1

    return bTa, icp_series
예제 #20
0
    def add_measurement(self, measurement, new_da_realization, graph, initial,
                        da_realization, class_realization):
        """

        :type graph: gtsam.NonlinearFactorGraph
        :type initial: gtsam.Values
        """

        # Adding a factor per each measurement with corresponding data association realization
        measurement_number = 0
        path_length = len(da_realization) - 1
        for realization in new_da_realization:
            realization = int(realization)
            graph.add(
                libNewFactorFake_p5.ClsModelFactor3(
                    self.X(path_length), self.XO(realization),
                    self.ClsModelCore[class_realization[
                        new_da_realization[measurement_number] - 1] - 1],
                    measurement[measurement_number]))
            measurement_number += 1

            # Initialization of object of the realization
            # TODO: CHANGE TO SOMETHING MORE ACCURATE / LESS BASIC
            if initial.exists(self.XO(realization)) is False:
                initial.insert(self.XO(realization),
                               gtsam.Pose3(gtsam.Pose2(0.0, 0.0, 0.0)))
예제 #21
0
    def add_measurement(self, measurement, new_da_realization, graph, initial,
                        da_realization, class_realization):
        """
        :type graph: gtsam.NonlinearFactorGraph
        :type initial: gtsam.Values
        """

        # Adding a factor per each measurement with corresponding data association realization
        measurement_number = 0
        #path_length = len(da_realization) - 1
        path_length = len(da_realization)

        for obj in new_da_realization:

            obj = int(obj)

            for obj_index in class_realization:
                if obj == obj_index[0]:
                    obj_class = obj_index[1]

            logit_measurement = self.prob_vector_2_logit(
                measurement[measurement_number])
            graph.add(
                cls_un_model_fake_1d_conf.ClsUModelFactor(
                    self.X(path_length), self.XO(obj), logit_measurement,
                    obj_class))

            measurement_number += 1

            # Initialization of object of the realization
            # TODO: CHANGE TO SOMETHING MORE ACCURATE / LESS BASIC
            if initial.exists(self.XO(obj)) is False:
                initial.insert(self.XO(obj),
                               gtsam.Pose3(gtsam.Pose2(0.0, 0.0, 0.0)))
예제 #22
0
    def motion_sample(self, action, action_noise_matrix, ML=False):

        # Taking the path length for using the latest pose
        self.optimize()
        path_length = len(self.daRealization)
        new_pose = self.result.atPose3(self.X(path_length)).compose(action)

        new_pose_vector = [
            new_pose.rotation().rpy()[0],
            new_pose.rotation().rpy()[1],
            new_pose.rotation().rpy()[2],
            new_pose.x(),
            new_pose.y(),
            new_pose.z()
        ]

        # Sample unless ML assumption is true
        if ML is False:

            sampled_new_pose_vector = np.random.multivariate_normal(
                new_pose_vector, action_noise_matrix)
            return gtsam.Pose3(
                gtsam.Rot3.Ypr(sampled_new_pose_vector[2],
                               sampled_new_pose_vector[1],
                               sampled_new_pose_vector[0]),
                np.array([
                    sampled_new_pose_vector[3], sampled_new_pose_vector[4],
                    sampled_new_pose_vector[5]
                ]))
        else:
            return new_pose
예제 #23
0
    def log_pdf_value(self, measurement, point_x, point_xo):
        """

        :type point_x: gtsam.Pose3
        :type point_xo: gtsam.Pose3
        :type measurement: gtsam.Pose3
        """

        meas_x = measurement[2] * np.cos(measurement[0] / 180 * np.pi) \
                 * np.cos(measurement[1] / 180 * np.pi)
        meas_y = measurement[2] * np.sin(measurement[0] / 180 * np.pi) \
                 * np.cos(measurement[1] / 180 * np.pi)
        meas_z = measurement[2] * np.sin(measurement[1] / 180 * np.pi)

        pose_meas = gtsam.Pose3(gtsam.Rot3.Ypr(0.0, 0.0, 0.0),
                                np.array([meas_x, meas_y, meas_z]))

        # Create an auxilary factor and compute his log-pdf value (without the normalizing constant)
        aux_factor = gtsam.BetweenFactorPose3(1, 2, pose_meas,
                                              self.model_noise)
        poses = gtsam.Values()
        poses.insert(1, point_x)
        poses.insert(2, point_xo)

        return aux_factor.error(poses)
예제 #24
0
def gtsamOpt(inputfname, outputfname):

    graph, initial = gtsam.readG2o(inputfname, True)
    # Add Prior on the first key
    priorModel = gtsam.noiseModel_Diagonal.Variances(
        vector6(1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4))

    print("Adding prior to g2o file ")
    graphWithPrior = graph
    firstKey = initial.keys().at(0)
    graphWithPrior.add(
        gtsam.PriorFactorPose3(firstKey, gtsam.Pose3(), priorModel))

    params = gtsam.GaussNewtonParams()
    params.setVerbosity(
        "Termination")  # this will show info about stopping conds
    optimizer = gtsam.GaussNewtonOptimizer(graphWithPrior, initial, params)
    result = optimizer.optimize()
    print("Optimization complete")

    print("initial error = ", graphWithPrior.error(initial))
    print("final error = ", graphWithPrior.error(result))

    print("Writing results to file: ", outputfname)
    graphNoKernel, _ = gtsam.readG2o(inputfname, True)
    gtsam.writeG2o(graphNoKernel, result, outputfname)
    print("Done!")
예제 #25
0
def gtsamOpt2PoseStampedarray(inputfname, pose_array):
    pointID = 0
    graph, initial = gtsam.readG2o(inputfname, True)
    # Add Prior on the first key
    priorModel = gtsam.noiseModel_Diagonal.Variances(
        vector6(1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4))

    print("Adding prior to g2o file ")
    graphWithPrior = graph
    firstKey = initial.keys().at(0)
    graphWithPrior.add(
        gtsam.PriorFactorPose3(firstKey, gtsam.Pose3(), priorModel))

    params = gtsam.GaussNewtonParams()
    params.setVerbosity(
        "Termination")  # this will show info about stopping conds
    optimizer = gtsam.GaussNewtonOptimizer(graphWithPrior, initial, params)
    result = optimizer.optimize()
    print("Optimization complete")

    print("initial error = ", graphWithPrior.error(initial))
    print("final error = ", graphWithPrior.error(result))

    resultPoses = gtsam.allPose3s(result)
    keys = resultPoses.keys()
    for i in range(keys.size()):
        posetmp = gstamPose2Transform(resultPoses.atPose3(keys.at(i)))
        posestampedtmp = PoseStamped()
        posestampedtmp.header.frame_id = str(keys.at(i))
        posestampedtmp.pose = posetmp
        pose_array.poseArray.append(posestampedtmp)
예제 #26
0
    def model_sample(pose_x, pose_xo, noise=np.eye(6), ML_sample=False):
        """

        :type pose_x: gtsam.Pose3
        :type pose_xo: gtsam.Pose3
        :type rel_pose: gtsam.Pose3
        """

        rel_pose = pose_x.between(pose_xo)
        rel_pose_vector = [
            rel_pose.rotation().rpy()[0],
            rel_pose.rotation().rpy()[1],
            rel_pose.rotation().rpy()[2],
            rel_pose.x(),
            rel_pose.y(),
            rel_pose.z()
        ]
        if ML_sample is False:

            sampled_rel_pose_vector = np.random.multivariate_normal(
                rel_pose_vector, noise)
            return gtsam.Pose3(
                gtsam.Rot3.Ypr(sampled_rel_pose_vector[2],
                               sampled_rel_pose_vector[1],
                               sampled_rel_pose_vector[0]),
                np.array([
                    sampled_rel_pose_vector[3], sampled_rel_pose_vector[4],
                    sampled_rel_pose_vector[5]
                ]))
        else:
            return rel_pose
예제 #27
0
    def add_measurement(self, measurement, new_da_realization, graph, initial,
                        da_realization):
        """

        :type graph: gtsam.NonlinearFactorGraph
        :type initial: gtsam.Values
        """

        # Adding a factor per each measurement with corresponding data association realization
        measurement_number = 0
        path_length = len(da_realization)
        for realization in new_da_realization:
            realization = int(realization)
            graph.add(
                gtsam.BetweenFactorPose3(self.X(path_length),
                                         self.XO(realization),
                                         measurement[measurement_number],
                                         self.model_noise))
            measurement_number += 1

            # Initialization of object of the realization
            # TODO: CHANGE TO SOMETHING MORE ACCURATE / LESS BASIC
            if initial.exists(self.XO(realization)) is False:
                initial.insert(self.XO(realization),
                               gtsam.Pose3(gtsam.Pose2(0.0, 0.0, 0.0)))

        # Saving the data association realization in a list
        da_realization[path_length - 1] = new_da_realization
예제 #28
0
    def __init__(self, goal_pose = gtsam.Pose2(gtsam.Pose3(0.0, 0.0, 0.0)), goal_weight=0,
                 information_theoretic_weight=0, lambda_uncertainty_weight=0):

        self.goal_pose = goal_pose
        self.goal_weight = goal_weight
        self.information_theoretic_weight = information_theoretic_weight
        self.lambda_uncertainty_weight = lambda_uncertainty_weight
예제 #29
0
 def test_perturbPoint2(self):
     """Test perturbPoint2."""
     values = gtsam.Values()
     values.insert(0, gtsam.Pose3())
     values.insert(1, gtsam.Point2(1, 1))
     gtsam.utilities.perturbPoint2(values, 1.0)
     self.assertTrue(
         not np.allclose(values.atPoint2(1), gtsam.Point2(1, 1)))
예제 #30
0
 def test_extractPose3(self):
     """Test extractPose3."""
     initial = gtsam.Values()
     pose3 = np.asarray([1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0.])
     initial.insert(1, gtsam.Pose2(0.0, 0.1, 0.1))
     initial.insert(2, gtsam.Pose3())
     np.testing.assert_allclose(gtsam.utilities.extractPose3(initial),
                                pose3.reshape(-1, 12))