Example #1
0
 def test_between(self):
     """Test between method."""
     T2 = Pose3(Rot3.Rodrigues(0.3, 0.2, 0.1), Point3(3.5, -8.2, 4.2))
     T3 = Pose3(Rot3.Rodrigues(-90, 0, 0), Point3(1, 2, 3))
     expected = T2.inverse().compose(T3)
     actual = T2.between(T3)
     self.gtsamAssertEquals(actual, expected, 1e-6)
Example #2
0
    def run(
        self, num_images: int,
        i2Ri1_dict: Dict[Tuple[int, int],
                         Optional[Rot3]]) -> List[Optional[Rot3]]:
        """Run the rotation averaging.

        Args:
            num_images: number of poses.
            i2Ri1_dict: relative rotations as dictionary (i1, i2): i2Ri1.

        Returns:
            Global rotations for each camera pose, i.e. wRi, as a list. The number of entries in the list is
                `num_images`. The list may contain `None` where the global rotation could not be computed (either
                underconstrained system or ill-constrained system).
        """
        if len(i2Ri1_dict) == 0:
            return [None] * num_images

        # create the random seed using relative rotations
        seed_rotation = next(iter(i2Ri1_dict.values()))

        np.random.seed(int(1000 * seed_rotation.xyz()[0]) % (2 ^ 32))

        # TODO: do not assign values where we do not have any edge

        # generate dummy rotations
        wRi_list = []
        for _ in range(num_images):
            random_vector = np.random.rand(3) * 2 * np.pi
            wRi_list.append(
                Rot3.Rodrigues(random_vector[0], random_vector[1],
                               random_vector[2]))

        return wRi_list
Example #3
0
    def test_transformFrom(self):
        """Test transformFrom method."""
        pose = Pose3(Rot3.Rodrigues(0, 0, -math.pi / 2), Point3(2, 4, 0))
        actual = pose.transformFrom(Point3(2, 1, 10))
        expected = Point3(3, 2, 10)
        self.gtsamAssertEquals(actual, expected, 1e-6)

        # multi-point version
        points = np.stack([Point3(2, 1, 10), Point3(2, 1, 10)]).T
        actual_array = pose.transformFrom(points)
        self.assertEqual(actual_array.shape, (3, 2))
        expected_array = np.stack([expected, expected]).T
        np.testing.assert_allclose(actual_array, expected_array, atol=1e-6)
Example #4
0
def preintegration_parameters():
    # IMU preintegration parameters
    # Default Params for a Z-up navigation frame, such as ENU: gravity points along negative Z-axis
    PARAMS = gtsam.PreintegrationParams.MakeSharedU(g)
    I = np.eye(3)
    PARAMS.setAccelerometerCovariance(I * 0.1)
    PARAMS.setGyroscopeCovariance(I * 0.1)
    PARAMS.setIntegrationCovariance(I * 0.1)
    PARAMS.setUse2ndOrderCoriolis(False)
    PARAMS.setOmegaCoriolis(vector3(0, 0, 0))

    BIAS_COVARIANCE = gtsam.noiseModel.Isotropic.Variance(6, 0.1)
    DELTA = Pose3(Rot3.Rodrigues(0, 0, 0), Point3(0.05, -0.10, 0.20))

    return PARAMS, BIAS_COVARIANCE, DELTA
Example #5
0
    def test_align_squares(self):
        """Test if Align method can align 2 squares."""
        square = np.array([[0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 0]],
                          float).T
        sTt = Pose3(Rot3.Rodrigues(0, 0, -math.pi), Point3(2, 4, 0))
        transformed = sTt.transformTo(square)

        st_pairs = Point3Pairs()
        for j in range(4):
            st_pairs.append((square[:, j], transformed[:, j]))

        # Recover the transformation sTt
        estimated_sTt = Pose3.Align(st_pairs)
        self.gtsamAssertEquals(estimated_sTt, sTt, 1e-10)

        # Matrix version
        estimated_sTt = Pose3.Align(square, transformed)
        self.gtsamAssertEquals(estimated_sTt, sTt, 1e-10)
Example #6
0
 def test_transform_to(self):
     """Test transformTo method."""
     transform = Pose3(Rot3.Rodrigues(0, 0, -1.570796), Point3(2, 4, 0))
     actual = transform.transformTo(Point3(3, 2, 10))
     expected = Point3(2, 1, 10)
     self.gtsamAssertEquals(actual, expected, 1e-6)
Example #7
0
 def test_transform_to(self):
     """Test transform_to method."""
     transform = Pose3(Rot3.Rodrigues(0, 0, -1.570796), Point3(2, 4, 0))
     actual = transform.transform_to(Point3(3, 2, 10))
     expected = Point3(2, 1, 10)
     self.assertTrue(actual.equals(expected, 1e-6))
Example #8
0
 def test__between(self):
     T2 = Pose3(Rot3.Rodrigues(0.3, 0.2, 0.1), Point3(3.5, -8.2, 4.2))
     T3 = Pose3(Rot3.Rodrigues(-90, 0, 0), Point3(1, 2, 3))
     expected = T2.inverse().compose(T3)
     actual = T2.between(T3)
     self.assertTrue(actual.equals(expected, 1e-6))
def main():
    """
    A structure-from-motion example with landmarks
    - The landmarks form a 10 meter cube
    - The robot rotates around the landmarks, always facing towards the cube
    """

    # Define the camera calibration parameters
    K = Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0)

    # Define the camera observation noise model
    camera_noise = gtsam.noiseModel.Isotropic.Sigma(
        2, 1.0)  # one pixel in u and v

    # Create the set of ground-truth landmarks
    points = SFMdata.createPoints()
    # Create the set of ground-truth poses
    poses = SFMdata.createPoses(K)

    # Create a NonlinearISAM object which will relinearize and reorder the variables
    # every "reorderInterval" updates
    isam = NonlinearISAM(reorderInterval=3)

    # Create a Factor Graph and Values to hold the new data
    graph = NonlinearFactorGraph()
    initial_estimate = Values()

    # Loop over the different poses, adding the observations to iSAM incrementally
    for i, pose in enumerate(poses):
        camera = PinholeCameraCal3_S2(pose, K)
        # Add factors for each landmark observation
        for j, point in enumerate(points):
            measurement = camera.project(point)
            factor = GenericProjectionFactorCal3_S2(measurement, camera_noise,
                                                    X(i), L(j), K)
            graph.push_back(factor)

        # Intentionally initialize the variables off from the ground truth
        noise = Pose3(r=Rot3.Rodrigues(-0.1, 0.2, 0.25),
                      t=Point3(0.05, -0.10, 0.20))
        initial_xi = pose.compose(noise)

        # Add an initial guess for the current pose
        initial_estimate.insert(X(i), initial_xi)

        # If this is the first iteration, add a prior on the first pose to set the coordinate frame
        # and a prior on the first landmark to set the scale
        # Also, as iSAM solves incrementally, we must wait until each is observed at least twice before
        # adding it to iSAM.
        if i == 0:
            # Add a prior on pose x0, with 0.3 rad std on roll,pitch,yaw and 0.1m x,y,z
            pose_noise = gtsam.noiseModel.Diagonal.Sigmas(
                np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1]))
            factor = PriorFactorPose3(X(0), poses[0], pose_noise)
            graph.push_back(factor)

            # Add a prior on landmark l0
            point_noise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1)
            factor = PriorFactorPoint3(L(0), points[0], point_noise)
            graph.push_back(factor)

            # Add initial guesses to all observed landmarks
            noise = np.array([-0.25, 0.20, 0.15])
            for j, point in enumerate(points):
                # Intentionally initialize the variables off from the ground truth
                initial_lj = points[j] + noise
                initial_estimate.insert(L(j), initial_lj)
        else:
            # Update iSAM with the new factors
            isam.update(graph, initial_estimate)
            current_estimate = isam.estimate()
            print('*' * 50)
            print('Frame {}:'.format(i))
            current_estimate.print_('Current estimate: ')

            # Clear the factor graph and values for the next iteration
            graph.resize(0)
            initial_estimate.clear()