Ejemplo n.º 1
0
 def test_serialization(self):
     """Test if serialization is working normally"""
     expected = Pose3(Rot3.Ypr(0.0, 1.0, 0.0), Point3(1, 1, 0))
     actual = Pose3()
     serialized = expected.serialize()
     actual.deserialize(serialized)
     self.gtsamAssertEquals(expected, actual, 1e-10)
Ejemplo n.º 2
0
    def setUp(self):
        # Create poses for the source map
        s_pose1 = Pose3(Rot3(np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])),
                        Point3(0, 0, 0))
        s_pose2 = Pose3(Rot3(np.array([[-1, 0, 0], [0, 1, 0], [0, 0, 1]])),
                        Point3(4, 0, 0))
        s_poses = [s_pose1, s_pose2]
        # Create points for the source map
        s_point1 = Point3(1, 1, 0)
        s_point2 = Point3(1, 3, 0)
        s_point3 = Point3(3, 3, 0)
        s_point4 = Point3(3, 1, 0)
        s_point5 = Point3(2, 2, 1)
        s_points = [s_point1, s_point2, s_point3, s_point4, s_point5]
        # Create the source map
        self.s_map = (s_poses, s_points)

        # Create poses for the destination map
        d_pose1 = Pose3(Rot3(np.array([[-1, 0, 0], [0, 1, 0], [0, 0, -1]])),
                        Point3(4, 6, 10))
        d_pose2 = Pose3(Rot3(np.array([[1, 0, 0], [0, 1, 0], [0, 0, -1]])),
                        Point3(-4, 6, 10))
        d_poses = [d_pose1, d_pose2]
        # Create points for the destination map
        d_point1 = Point3(2, 8, 10)
        d_point2 = Point3(2, 12, 10)
        d_point3 = Point3(-2, 12, 10)
        d_point4 = Point3(-2, 8, 10)
        d_point5 = Point3(0, 10, 8)
        d_points = [d_point1, d_point2, d_point3, d_point4, d_point5]
        d_map = (d_poses, d_points)
        # Create the destination map
        self.d_map = (d_poses, d_points)
Ejemplo n.º 3
0
    def test_nonconsecutive_indices(self):
        """Run rotation averaging on a graph with indices that are not ordered as [0,...,N-1].

        Note: Test would fail if Shonan keys were not temporarily re-ordered inside the implementation.
        See https://github.com/borglab/gtsam/issues/784
        """
        num_images = 4

        # assume pose 0 is orphaned in the visibility graph
        # Let wTi0's (R,t) be parameterized as identity Rot3(), and t = [1,1,0]
        wTi1 = Pose3(Rot3(), np.array([3, 1, 0]))
        wTi2 = Pose3(Rot3(), np.array([3, 3, 0]))
        wTi3 = Pose3(Rot3(), np.array([1, 3, 0]))

        # generate i2Ri1 rotations
        # (i1,i2) -> i2Ri1
        i2Ri1_input = {
            (1, 2): wTi2.between(wTi1).rotation(),
            (2, 3): wTi3.between(wTi2).rotation(),
            (1, 3): wTi3.between(wTi1).rotation(),
        }

        i2Ri1_priors = {edge: None for edge in i2Ri1_input.keys()}
        wRi_computed = self.obj.run(num_images, i2Ri1_input, i2Ri1_priors)
        wRi_expected = [
            None, wTi1.rotation(),
            wTi2.rotation(),
            wTi3.rotation()
        ]
        self.assertTrue(
            geometry_comparisons.compare_rotations(
                wRi_computed,
                wRi_expected,
                angular_error_threshold_degrees=0.1))
Ejemplo n.º 4
0
    def test_calculate_triangulation_angle_in_degrees(self) -> None:
        """Test the computation of triangulation angle using a simple example.
        Lengths of line segments are defined as follows:
                   5*sqrt(2)
                  X ---- C1
                  |    /
        5*sqrt(2) |  / 10 = 5*sqrt(2)*sqrt(2)
                  C2
        Cameras and point situated as follows in the x-z plane:
        (0,0,0)
             o---- +z
             |
             |
             +x
                      X (5,0,5)
        (10,0,0)
             o---- +z
             |
             |
             +x
        """
        camera_center_1 = np.array([0, 0, 0])
        camera_center_2 = np.array([10, 0, 0])
        point_3d = np.array([5, 0, 5])

        expected = 90

        wT0 = Pose3(Rot3(), camera_center_1)
        wT1 = Pose3(Rot3(), camera_center_2)
        computed = mvs_utils.calculate_triangulation_angle_in_degrees(
            camera_1=PinholeCameraCal3Bundler(wT0),
            camera_2=PinholeCameraCal3Bundler(wT1),
            point_3d=point_3d,
        )
        self.assertAlmostEqual(computed, expected)
Ejemplo n.º 5
0
def simulate_two_planes_scene(M: int, N: int) -> Tuple[Keypoints, Keypoints, EssentialMatrix]:
    """Generate a scene where 3D points are on two planes, and projects the points to the 2 cameras. There are M points
    on plane 1, and N points on plane 2.

    The two planes in this test are:
    1. -10x -y -20z +150 = 0
    2. 15x -2y -35z +200 = 0

    Args:
        M: number of points on 1st plane.
        N: number of points on 2nd plane.

    Returns:
        keypoints for image i1, of length (M+N).
        keypoints for image i2, of length (M+N).
        Essential matrix i2Ei1.
    """
    # range of 3D points
    range_x_coordinate = (-5, 7)
    range_y_coordinate = (-10, 10)

    # define the plane equation
    plane1_coeffs = (-10, -1, -20, 150)
    plane2_coeffs = (15, -2, -35, 200)

    # sample the points from planes
    plane1_points = sample_points_on_plane(plane1_coeffs, range_x_coordinate, range_y_coordinate, M)
    plane2_points = sample_points_on_plane(plane2_coeffs, range_x_coordinate, range_y_coordinate, N)

    points_3d = np.vstack((plane1_points, plane2_points))

    # define the camera poses and compute the essential matrix
    wti1 = np.array([0.1, 0, -20])
    wti2 = np.array([1, -2, -20.4])

    wRi1 = Rot3.RzRyRx(np.pi / 20, 0, 0.0)
    wRi2 = Rot3.RzRyRx(0.0, np.pi / 6, 0.0)

    wTi1 = Pose3(wRi1, wti1)
    wTi2 = Pose3(wRi2, wti2)
    i2Ti1 = wTi2.between(wTi1)

    i2Ei1 = EssentialMatrix(i2Ti1.rotation(), Unit3(i2Ti1.translation()))

    # project 3D points to 2D image measurements
    intrinsics = Cal3Bundler()
    camera_i1 = PinholeCameraCal3Bundler(wTi1, intrinsics)
    camera_i2 = PinholeCameraCal3Bundler(wTi2, intrinsics)

    uv_im1 = []
    uv_im2 = []
    for point in points_3d:
        uv_im1.append(camera_i1.project(point))
        uv_im2.append(camera_i2.project(point))

    uv_im1 = np.vstack(uv_im1)
    uv_im2 = np.vstack(uv_im2)

    # return the points as keypoints and the essential matrix
    return Keypoints(coordinates=uv_im1), Keypoints(coordinates=uv_im2), i2Ei1
Ejemplo n.º 6
0
def main():
    q = [0, 0, 0, 0, 0, 0, 0]
    # q = [1, 1, 1, 1, 1, 1, 1]
    lengths = [10, 20, 10, 20, 10, 10, 20]
    si = get_si()
    T = get_T(q, lengths)

    # FK init
    fk = Pose3(Rot3.Ypr(0.0, 0.0, 0.0), Point3(0, 0, 0))

    # expMap
    g = get_expMap(q, si)
    gtool = Pose3(Rot3.Ypr(0.0, 0.0, 0.0), Point3(100, 0, 0))
    G = compose(*(g + [gtool]))
    fk_1 = get_fk(lengths, q)
    fk_1.between(G)
    # Jacobian
    J = get_Jacobian(T, si)
    Jinv = np.linalg.pinv(J)
    #check jacobian forward
    q_dot = np.array([0.5, 0, 1, 0, 0, -1, 0]).T
    J.dot(q_dot)
    # check with movement
    v = np.array([[0, 0, 0, 0, 0, 1]]).T
    d = np.matmul(Jinv, v)
    q += np.squeeze(d)

    fk = get_fk(lengths, q)
    print fk
Ejemplo n.º 7
0
 def test_transform_from(self):
     """Test transform form"""
     T = Pose3(Rot3(1, 0, 0, 0, 0, 1, 0, -1, 0), Point3(1, 2, 3))
     pose = Pose3(Rot3(), Point3(1, 2, 3))
     actual = transform_from(T, pose)
     expected = Pose3(Rot3(1, 0, 0, 0, 0, 1, 0, -1, 0), Point3(2, 5, 1))
     self.gtsamAssertEquals(actual, expected)
Ejemplo n.º 8
0
    def test_sim3_pose(self):
        """Test generating similarity transform with Pose3 pairs."""
        # Create expected sim3
        expected_R = Rot3.Ry(math.radians(180))
        expected_s = 2
        expected_t = Point3(4, 6, 10)
        print(expected_R)

        # Create source poses
        s_pose1 = Pose3(Rot3(np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])),
                        Point3(0, 0, 0))
        s_pose2 = Pose3(Rot3(np.array([[-1, 0, 0], [0, 1, 0], [0, 0, 1]])),
                        Point3(4, 0, 0))

        # Create destination poses
        d_pose1 = Pose3(Rot3(np.array([[-1, 0, 0], [0, 1, 0], [0, 0, -1]])),
                        Point3(4, 6, 10))
        d_pose2 = Pose3(Rot3(np.array([[1, 0, 0], [0, 1, 0], [0, 0, -1]])),
                        Point3(-4, 6, 10))

        # Align
        pose_pairs = [[s_pose1, d_pose1], [s_pose2, d_pose2]]
        sim3 = Similarity3()
        sim3.sim3_pose(pose_pairs)

        # Check actual sim3 equals to expected sim3
        assert (expected_R.equals(sim3._R, 1e-6))
        self.assertAlmostEqual(sim3._s, expected_s, delta=1e-6)
        self.assert_gtsam_equals(sim3._t, expected_t)
Ejemplo n.º 9
0
    def create_initial_estimate(self, pose_indices):
        """Create initial estimate from ???."""
        wRc = Rot3(1, 0, 0, 0, 0, 1, 0, -1, 0)  # camera to world rotation
        depth = 20

        initial_estimate = gtsam.Values()

        # Create dictionary for initial estimation
        for img_idx, features in enumerate(self._image_features):
            for kp_idx, keypoint in enumerate(features.keypoints):
                if self.image_points[img_idx].kp_3d_exist(kp_idx):
                    landmark_id = self.image_points[img_idx].kp_3d(kp_idx)
                    # Filter invalid landmarks
                    if self._landmark_estimates[
                            landmark_id].seen >= self._min_landmark_seen:
                        key_point = Point2(keypoint[0], keypoint[1])
                        key = P(landmark_id)
                        if not initial_estimate.exists(key):
                            # do back-projection
                            pose = Pose3(wRc,
                                         Point3(0, self.delta_z * img_idx, 2))
                            landmark_3d_point = self.back_projection(
                                key_point, pose, depth)
                            initial_estimate.insert(P(landmark_id),
                                                    landmark_3d_point)

        # Initial estimate for poses
        for idx in pose_indices:
            pose_i = Pose3(wRc, Point3(0, self.delta_z * idx, 2))
            initial_estimate.insert(X(idx), pose_i)

        return initial_estimate
Ejemplo n.º 10
0
 def test_between(self):
     """Test between method."""
     T2 = Pose3(Rot3.Rodrigues(0.3, 0.2, 0.1), Point3(3.5, -8.2, 4.2))
     T3 = Pose3(Rot3.Rodrigues(-90, 0, 0), Point3(1, 2, 3))
     expected = T2.inverse().compose(T3)
     actual = T2.between(T3)
     self.gtsamAssertEquals(actual, expected, 1e-6)
Ejemplo n.º 11
0
def epipolar_inlier_correspondences(
    keypoints_i1: Keypoints,
    keypoints_i2: Keypoints,
    intrinsics_i1: Cal3Bundler,
    intrinsics_i2: Cal3Bundler,
    i2Ti1: Pose3,
    dist_threshold: float,
) -> Tuple[Optional[np.ndarray], Optional[np.ndarray]]:
    """Compute inlier correspondences using epipolar geometry and the ground truth relative pose.

    Args:
        keypoints_i1: keypoints in image i1.
        keypoints_i2: corr. keypoints in image i2.
        intrinsics_i1: intrinsics for i1.
        intrinsics_i2: intrinsics for i2.
        i2Ti1: relative pose
        dist_threshold: max acceptable distance for a correct correspondence.

    Returns:
        is_inlier: (N, ) mask of inlier correspondences.
        distance_squared: squared sampson distance between corresponding keypoints.
    """
    i2Ei1 = EssentialMatrix(i2Ti1.rotation(), Unit3(i2Ti1.translation()))
    i2Fi1 = verification_utils.essential_to_fundamental_matrix(
        i2Ei1, intrinsics_i1, intrinsics_i2)
    distance_squared = verification_utils.compute_epipolar_distances_sq_sampson(
        keypoints_i1.coordinates, keypoints_i2.coordinates, i2Fi1)
    is_inlier = distance_squared < dist_threshold**2 if distance_squared is not None else None

    return is_inlier, distance_squared
Ejemplo n.º 12
0
    def test_values(self):
        values = Values()
        E = EssentialMatrix(Rot3(), Unit3())
        tol = 1e-9

        values.insert(0, Point2(0, 0))
        values.insert(1, Point3(0, 0, 0))
        values.insert(2, Rot2())
        values.insert(3, Pose2())
        values.insert(4, Rot3())
        values.insert(5, Pose3())
        values.insert(6, Cal3_S2())
        values.insert(7, Cal3DS2())
        values.insert(8, Cal3Bundler())
        values.insert(9, E)
        values.insert(10, imuBias_ConstantBias())

        # Special cases for Vectors and Matrices
        # Note that gtsam's Eigen Vectors and Matrices requires double-precision
        # floating point numbers in column-major (Fortran style) storage order,
        # whereas by default, numpy.array is in row-major order and the type is
        # in whatever the number input type is, e.g. np.array([1,2,3])
        # will have 'int' type.
        #
        # The wrapper will automatically fix the type and storage order for you,
        # but for performance reasons, it's recommended to specify the correct
        # type and storage order.
        # for vectors, the order is not important, but dtype still is
        vec = np.array([1., 2., 3.])
        values.insert(11, vec)
        mat = np.array([[1., 2.], [3., 4.]], order='F')
        values.insert(12, mat)
        # Test with dtype int and the default order='C'
        # This still works as the wrapper converts to the correct type and order for you
        # but is nornally not recommended!
        mat2 = np.array([[1, 2, ], [3, 5]])
        values.insert(13, mat2)

        self.assertTrue(values.atPoint2(0).equals(Point2(0,0), tol))
        self.assertTrue(values.atPoint3(1).equals(Point3(0,0,0), tol))
        self.assertTrue(values.atRot2(2).equals(Rot2(), tol))
        self.assertTrue(values.atPose2(3).equals(Pose2(), tol))
        self.assertTrue(values.atRot3(4).equals(Rot3(), tol))
        self.assertTrue(values.atPose3(5).equals(Pose3(), tol))
        self.assertTrue(values.atCal3_S2(6).equals(Cal3_S2(), tol))
        self.assertTrue(values.atCal3DS2(7).equals(Cal3DS2(), tol))
        self.assertTrue(values.atCal3Bundler(8).equals(Cal3Bundler(), tol))
        self.assertTrue(values.atEssentialMatrix(9).equals(E, tol))
        self.assertTrue(values.atimuBias_ConstantBias(
            10).equals(imuBias_ConstantBias(), tol))

        # special cases for Vector and Matrix:
        actualVector = values.atVector(11)
        self.assertTrue(np.allclose(vec, actualVector, tol))
        actualMatrix = values.atMatrix(12)
        self.assertTrue(np.allclose(mat, actualMatrix, tol))
        actualMatrix2 = values.atMatrix(13)
        self.assertTrue(np.allclose(mat2, actualMatrix2, tol))
Ejemplo n.º 13
0
 def test_compute_translation_to_direction_angle_is_zero(self):
     i2Ui1_measured = Unit3(Point3(1, 0, 0))
     wTi2_estimated = Pose3(Rot3(), Point3(0, 0, 0))
     wTi1_estimated = Pose3(Rot3(), Point3(2, 0, 0))
     self.assertEqual(
         geometry_comparisons.compute_translation_to_direction_angle(
             i2Ui1_measured, wTi2_estimated, wTi1_estimated),
         0.0,
     )
Ejemplo n.º 14
0
    def run(self):
        reached = 0

        r = rospy.Rate(10)
        while not rospy.is_shutdown():

            goal = self.goals[self.goalindex]
            self.sTg = Pose3(Rot3.Rz(goal[3]), Point3(goal[0], goal[1],
                                                      goal[2]))
            R = quaternion_matrix([
                self.sTcurr_ros.pose.orientation.x,
                self.sTcurr_ros.pose.orientation.y,
                self.sTcurr_ros.pose.orientation.z,
                self.sTcurr_ros.pose.orientation.w
            ])[0:3, 0:3]
            self.sTcurr = Pose3(
                Rot3(R),
                Point3(self.sTcurr_ros.pose.position.x,
                       self.sTcurr_ros.pose.position.y,
                       self.sTcurr_ros.pose.position.z))

            currTg = self.sTcurr.between(self.sTg)

            u_pitch = self.kp_x * currTg.x() + self.kd_x * (
                currTg.x() - self.lastT.x()) + min(
                    max(-0.2, self.ki_x * self.ix), 0.2)
            u_roll = self.kp_y * currTg.y() + self.kd_y * (currTg.y() -
                                                           self.lastT.y())
            currTg_yaw = currTg.rotation().rpy()[2]
            u_yaw = self.kp_yaw * currTg_yaw + self.kd_yaw * (currTg_yaw -
                                                              self.lastyaw)
            u_z = self.kp_x * currTg.z() + self.kd_z * (currTg.z() -
                                                        self.lastT.z())

            self.lastT = currTg
            self.lastyaw = currTg_yaw

            self.ix += currTg.x()
            self.iy += currTg.y()
            self.iz += currTg.z()
            self.iyaw += currTg_yaw

            self.move_cmd.linear.x = min(max(-1, u_pitch), 1)
            self.move_cmd.linear.y = min(max(-1, u_roll), 1)
            self.move_cmd.linear.z = min(max(-1, u_z), 1)
            self.move_cmd.angular.z = min(max(-1, u_yaw), 1)

            self.velPub.publish(self.move_cmd)
            reached = self.check(currTg)
            if (reached == 1):
                reached = 0
                self.goalindex += 1

            r.sleep()
Ejemplo n.º 15
0
def test_get_points_within_radius_of_cameras_negative_radius():
    """Catch degenerate input."""
    wTi0 = Pose3(Rot3(), np.zeros(3))
    wTi1 = Pose3(Rot3(), np.array([10.0, 0, 0]))
    wTi_list = [wTi0, wTi1]
    points_3d = np.array([[-15, 0, 0], [0, 15, 0], [-5, 0, 0], [15, 0, 0],
                          [25, 0, 0]])
    radius = -5
    nearby_points_3d = geometry_comparisons.get_points_within_radius_of_cameras(
        wTi_list, points_3d, radius)
    assert nearby_points_3d is None, "Non-positive radius is not allowed"
Ejemplo n.º 16
0
def test_get_points_within_radius_of_cameras_no_points():
    """Catch degenerate input."""

    wTi0 = Pose3(Rot3(), np.zeros(3))
    wTi1 = Pose3(Rot3(), np.array([10.0, 0, 0]))
    wTi_list = [wTi0, wTi1]
    points_3d = np.zeros((0, 3))
    radius = 10.0

    nearby_points_3d = geometry_comparisons.get_points_within_radius_of_cameras(
        wTi_list, points_3d, radius)
    assert nearby_points_3d is None, "At least one 3d point must be provided"
Ejemplo n.º 17
0
    def setUp(self):
        """Set up the data association object and test data."""
        super().setUp()

        self.obj = DummyDataAssociation(0.5, 2)

        # set up ground truth data for comparison

        self.dummy_corr_idxs_dict = {
            (0, 1): np.array([[0, 2]]),
            (1, 2): np.array([[2, 3], [4, 5], [7, 9]]),
            (0, 2): np.array([[1, 8]]),
        }
        self.keypoints_list = [
            Keypoints(coordinates=np.array([[12, 16], [13, 18], [0, 10]])),
            Keypoints(coordinates=np.array([
                [8, 2],
                [16, 14],
                [22, 23],
                [1, 6],
                [50, 50],
                [16, 12],
                [82, 121],
                [39, 60],
            ])),
            Keypoints(coordinates=np.array([
                [1, 1],
                [8, 13],
                [40, 6],
                [82, 21],
                [1, 6],
                [12, 18],
                [15, 14],
                [25, 28],
                [7, 10],
                [14, 17],
            ])),
        ]

        # Generate two poses for use in triangulation tests
        # Looking along X-axis, 1 meter above ground plane (x-y)
        upright = Rot3.Ypr(-np.pi / 2, 0.0, -np.pi / 2)
        pose1 = Pose3(upright, Point3(0, 0, 1))

        # create second camera 1 meter to the right of first camera
        pose2 = pose1.compose(Pose3(Rot3(), Point3(1, 0, 0)))

        self.poses = Pose3Vector()
        self.poses.append(pose1)
        self.poses.append(pose2)

        # landmark ~5 meters infront of camera
        self.expected_landmark = Point3(5, 0.5, 1.2)
Ejemplo n.º 18
0
    def test_two_view_correspondences(self):
        """Tests the bundle adjustment for relative pose on a simulated scene."""

        i1Ri2 = EXAMPLE_DATA.get_camera(1).pose().rotation()
        i1ti2 = EXAMPLE_DATA.get_camera(1).pose().translation()
        i2Ti1 = Pose3(i1Ri2, i1ti2)
        camera_i1 = PinholeCameraCal3Bundler(Pose3(), Cal3Bundler())
        camera_i2 = PinholeCameraCal3Bundler(i2Ti1, Cal3Bundler())
        tracks_3d, valid_indices = TwoViewEstimator.triangulate_two_view_correspondences(
            camera_i1, camera_i2, self.keypoints_i1, self.keypoints_i2,
            self.corr_idxs)
        self.assertEqual(len(tracks_3d), 5)
        self.assertEqual(len(valid_indices), 5)
Ejemplo n.º 19
0
    def setUp(self):
        """Set up two camera poses"""
        # Looking along X-axis, 1 meter above ground plane (x-y)
        pose1 = Pose3(UPRIGHT, Point3(0, 0, 1))

        # create second camera 1 meter to the right of first camera
        pose2 = pose1.compose(Pose3(Rot3(), Point3(1, 0, 0)))
        # twoPoses
        self.poses = Pose3Vector()
        self.poses.append(pose1)
        self.poses.append(pose2)

        # landmark ~5 meters infront of camera
        self.landmark = Point3(5, 0.5, 1.2)
Ejemplo n.º 20
0
    def test_compare_global_poses_scaled_squares(self):
        """Make sure a big and small square can be aligned.

        The u's represent a big square (10x10), and v's represents a small square (4x4).
        """
        R0 = Rotation.from_euler("z", 0, degrees=True).as_matrix()
        R90 = Rotation.from_euler("z", 90, degrees=True).as_matrix()
        R180 = Rotation.from_euler("z", 180, degrees=True).as_matrix()
        R270 = Rotation.from_euler("z", 270, degrees=True).as_matrix()

        wTu0 = Pose3(Rot3(R0), np.array([2, 3, 0]))
        wTu1 = Pose3(Rot3(R90), np.array([12, 3, 0]))
        wTu2 = Pose3(Rot3(R180), np.array([12, 13, 0]))
        wTu3 = Pose3(Rot3(R270), np.array([2, 13, 0]))

        wTi_list = [wTu0, wTu1, wTu2, wTu3]

        wTv0 = Pose3(Rot3(R0), np.array([4, 3, 0]))
        wTv1 = Pose3(Rot3(R90), np.array([8, 3, 0]))
        wTv2 = Pose3(Rot3(R180), np.array([8, 7, 0]))
        wTv3 = Pose3(Rot3(R270), np.array([4, 7, 0]))

        wTi_list_ = [wTv0, wTv1, wTv2, wTv3]

        pose_graphs_equal = geometry_comparisons.compare_global_poses(
            wTi_list, wTi_list_)
        self.assertTrue(pose_graphs_equal)
Ejemplo n.º 21
0
    def test_computation_graph(self):
        """Test the dask computation graph execution using a valid collection of relative unit-translations."""
        """Test a simple case with 8 camera poses.

        The camera poses are arranged on the circle and point towards the center
        of the circle. The poses of 8 cameras are obtained from SFMdata and the
        unit translations directions between some camera pairs are computed from their global translations.

        This test is copied from GTSAM's TranslationAveragingExample.
        """

        fx, fy, s, u0, v0 = 50.0, 50.0, 0.0, 50.0, 50.0
        expected_wTi_list = SFMdata.createPoses(Cal3_S2(fx, fy, s, u0, v0))

        wRi_list = [x.rotation() for x in expected_wTi_list]

        # create relative translation directions between a pose index and the
        # next two poses
        i2Ui1_dict = {}
        for i1 in range(len(expected_wTi_list) - 1):
            for i2 in range(i1 + 1, min(len(expected_wTi_list), i1 + 3)):
                # create relative translations using global R and T.
                i2Ui1_dict[(i1, i2)] = Unit3(expected_wTi_list[i2].between(
                    expected_wTi_list[i1]).translation())

        # use the `run` API to get expected results
        expected_wti_list = self.obj.run(len(wRi_list), i2Ui1_dict, wRi_list)
        expected_wTi_list = [
            Pose3(wRi, wti) if wti is not None else None
            for (wRi, wti) in zip(wRi_list, expected_wti_list)
        ]

        # form computation graph and execute
        i2Ui1_graph = dask.delayed(i2Ui1_dict)
        wRi_graph = dask.delayed(wRi_list)
        computation_graph = self.obj.create_computation_graph(
            len(wRi_list), i2Ui1_graph, wRi_graph)
        with dask.config.set(scheduler="single-threaded"):
            wti_list = dask.compute(computation_graph)[0]

        wTi_list = [
            Pose3(wRi, wti) if wti is not None else None
            for (wRi, wti) in zip(wRi_list, wti_list)
        ]

        # compare the entries
        self.assertTrue(
            geometry_comparisons.compare_global_poses(wTi_list,
                                                      expected_wTi_list))
Ejemplo n.º 22
0
 def test_load_poses_from_file(self):
     """Test load psoes from file."""
     file_name = self.data_directory+"result/poses.dat"
     poses = load_poses_from_file(file_name)
     # """Test pose output"""
     delta_z = 1
     num_frames = 3
     wRc = Rot3(1, 0, 0, 0, 0, 1, 0, -1, 0)
     for i in range(num_frames):
         # Create ground truth poses
         expected_pose_i = Pose3(wRc, Point3(0, delta_z*i, 2))
         # Get actual poses
         rotation = Rot3(np.array(poses[i][3:]).reshape(3, 3))
         actual_pose_i = Pose3(rotation, Point3(np.array(poses[i][0:3])))
         self.gtsamAssertEquals(actual_pose_i, expected_pose_i, 1e-6)
Ejemplo n.º 23
0
def get_ortho_axis_alignment_transform(gtsfm_data: GtsfmData) -> Pose3:
    """Wrapper function for all the functions in ellipsoid.py. Obtains the Pose3 transformation required to align
    the GtsfmData to the x,y,z axes.

    Args:
        gtsfm_data: scene data to write to transform.

    Returns:
        The final transformation required to align point cloud and frustums.
    """
    # Iterate through each track to gather a list of 3D points forming the point cloud.
    num_pts = gtsfm_data.number_tracks()
    point_cloud = [gtsfm_data.get_track(j).point3() for j in range(num_pts)]
    point_cloud = np.array(point_cloud)  # point_cloud has shape Nx3

    # Filter outlier points, Center point cloud, and obtain alignment rotation.
    points_filtered, inlier_mask = remove_outlier_points(point_cloud)
    points_centered = center_point_cloud(points_filtered)
    wuprightRw = get_alignment_rotation_matrix_from_svd(points_centered)

    # Calculate translation vector based off rotated point cloud (excluding outliers).
    point_cloud_rotated = point_cloud @ wuprightRw.T
    rotated_mean = np.mean(point_cloud_rotated[inlier_mask], axis=0)

    # Obtain the Pose3 object needed to align camera frustums.
    walignedTw = Pose3(Rot3(wuprightRw), -1 * rotated_mean)

    return walignedTw
Ejemplo n.º 24
0
 def test_back_projection(self):
     """Test back projection"""
     actual = self.back_end.back_projection(
         Point2(640, 360), Pose3(Rot3(1, 0, 0, 0, 0, 1, 0, -1, 0),
                                 Point3()), 20)
     expected = Point3(0, 20, 0)
     self.gtsamAssertEquals(actual, expected)
Ejemplo n.º 25
0
def main():
    """Main runner."""
    args = parse_args()
    kitti_calibration, imu_measurements, gps_measurements = loadKittiData()

    if not kitti_calibration.bodyTimu.equals(Pose3(), 1e-8):
        raise ValueError(
            "Currently only support IMUinBody is identity, i.e. IMU and body frame are the same"
        )

    # Configure different variables
    first_gps_pose = 1
    gps_skip = 10

    # Configure noise models
    noise_model_gps = noiseModel.Diagonal.Precisions(
        np.asarray([0, 0, 0] + [1.0 / 0.07] * 3))

    sigma_init_x = noiseModel.Diagonal.Precisions(
        np.asarray([0, 0, 0, 1, 1, 1]))
    sigma_init_v = noiseModel.Diagonal.Sigmas(np.ones(3) * 1000.0)
    sigma_init_b = noiseModel.Diagonal.Sigmas(
        np.asarray([0.1] * 3 + [5.00e-05] * 3))

    isam = optimize(gps_measurements, imu_measurements, sigma_init_x,
                    sigma_init_v, sigma_init_b, noise_model_gps,
                    kitti_calibration, first_gps_pose, gps_skip)

    save_results(isam, args.output_filename, first_gps_pose, gps_measurements)
Ejemplo n.º 26
0
 def setUp(self):
     """Create mapping Back-end and read csv file."""
     # Input images(undistorted) calibration
     fov, w, h = 60, 1280, 720
     calibration = Cal3_S2(fov, w, h)
     # Create pose priors
     wRc = Rot3(1, 0, 0, 0, 0, 1, 0, -1, 0)  # camera to world rotation
     pose_estimates = [Pose3(wRc, Point3(0, i, 2)) for i in range(3)]
     # Create measurement noise for bundle adjustment
     sigma = 1.0
     measurement_noise = gtsam.noiseModel_Isotropic.Sigma(2, sigma)
     # Create pose prior noise
     rotation_sigma = np.radians(60)
     translation_sigma = 1
     pose_noise_sigmas = np.array([
         rotation_sigma, rotation_sigma, rotation_sigma, translation_sigma,
         translation_sigma, translation_sigma
     ])
     pose_prior_noise = gtsam.noiseModel_Diagonal.Sigmas(pose_noise_sigmas)
     # Create MappingBackEnd instance
     data_directory = 'mapping/sim_match_data/'
     min_landmark_seen = 3
     self.num_images = 3
     self.back_end = MappingBackEnd(
         data_directory, self.num_images, calibration, pose_estimates, measurement_noise, pose_prior_noise, min_landmark_seen)  # pylint: disable=line-too-long
Ejemplo n.º 27
0
def view_scene(args: argparse.Namespace) -> None:
    """Read GTSFM output from .txt files and render the scene to the GUI.

    We also zero-center the point cloud, and transform camera poses to a new
    world frame, where the point cloud is zero-centered.

    Args:
        args: rendering options.
    """
    points_fpath = f"{args.output_dir}/points3D.txt"
    images_fpath = f"{args.output_dir}/images.txt"
    cameras_fpath = f"{args.output_dir}/cameras.txt"

    # Read in data.
    wTi_list, img_fnames, calibrations, point_cloud, rgb = io_utils.read_scene(
        images_fpath, cameras_fpath, points_fpath)
    if args.show_mvs_result:
        point_cloud, rgb = io_utils.read_point_cloud_from_ply(args.ply_fpath)

    if len(calibrations) == 1:
        calibrations = calibrations * len(img_fnames)
    mean_pt = compute_point_cloud_center_robust(point_cloud)

    # Zero-center the point cloud (about estimated center).
    zcwTw = Pose3(Rot3(np.eye(3)), -mean_pt)
    # expression below is equivalent to applying zcwTw.transformFrom() to each world point
    point_cloud -= mean_pt
    is_nearby = np.linalg.norm(point_cloud, axis=1) < args.max_range
    point_cloud = point_cloud[is_nearby]
    rgb = rgb[is_nearby]
    for i in range(len(wTi_list)):
        wTi_list[i] = zcwTw.compose(wTi_list[i])

    draw_scene_open3d(point_cloud, rgb, wTi_list, calibrations, args)
Ejemplo n.º 28
0
def decompose_camera_projection_matrix(M: np.ndarray) -> Pose3:
    """Recovera camera pose from a 3x4 projection matrix.

    Reference: https://johnwlambert.github.io/cam-calibration/#decomposition

    Args:
        M: array of shape (3,4) representing camera projection matrix.
            Let M = [m1 | m2 | m3 | m4] = [ Q | m4]

    Returns:
        K: 3x3 upper triangular camera intrinsics matrix. The matrix diagonal is
            guaranteed to contain all positive entries.
        wTc: camera pose in world frame.
    """
    m4 = M[:, 3]
    Q = M[:3, :3]
    wtc = np.linalg.inv(-Q) @ m4

    K, cRw = scipy.linalg.rq(Q)

    # multiply columns of K by -1 if K(i,i) entry on diagonal is negative.
    # T is a matrix that scales by -1 or 1
    T = np.diag(np.sign(np.diag(K)))
    K = K @ T

    # scale rows of cRw by T, to keep a valid decomposition. T is its own inverse.
    wRc = Rot3(T @ cRw).inverse()
    wTc = Pose3(wRc, wtc)

    return K, wTc
Ejemplo n.º 29
0
    def test_round_trip_images_txt(self) -> None:
        """Starts with a pose. Writes the pose to images.txt (in a temporary directory). Then reads images.txt to recover
        that same pose. Checks if the original wTc and recovered wTc match up."""

        # fmt: off
        # Rotation 45 degrees about the z-axis.
        original_wRc = np.array([[np.cos(np.pi / 4), -np.sin(np.pi / 4), 0],
                                 [np.sin(np.pi / 4),
                                  np.cos(np.pi / 4), 0], [0, 0, 1]])
        original_wtc = np.array([3, -2, 1])
        # fmt: on

        # Setup dummy GtsfmData Object with one image
        original_wTc = Pose3(Rot3(original_wRc), original_wtc)
        default_intrinsics = Cal3Bundler(fx=100, k1=0, k2=0, u0=0, v0=0)
        camera = PinholeCameraCal3Bundler(original_wTc, default_intrinsics)
        gtsfm_data = GtsfmData(number_images=1)
        gtsfm_data.add_camera(0, camera)

        image = Image(value_array=None, file_name="dummy_image.jpg")
        images = [image]

        # Perform write and read operations inside a temporary directory
        with tempfile.TemporaryDirectory() as tempdir:
            images_fpath = os.path.join(tempdir, "images.txt")

            io_utils.write_images(gtsfm_data, images, tempdir)
            wTi_list, _ = io_utils.read_images_txt(images_fpath)
            recovered_wTc = wTi_list[0]

        npt.assert_almost_equal(original_wTc.matrix(),
                                recovered_wTc.matrix(),
                                decimal=3)
Ejemplo n.º 30
0
    def test_bundle_adjust(self):
        """Tests the bundle adjustment for relative pose on a simulated scene."""
        two_view_estimator = TwoViewEstimator(matcher=None,
                                              verifier=None,
                                              inlier_support_processor=None,
                                              bundle_adjust_2view=True,
                                              eval_threshold_px=4)

        # Extract example poses.
        i1Ri2 = EXAMPLE_DATA.get_camera(1).pose().rotation()
        i1ti2 = EXAMPLE_DATA.get_camera(1).pose().translation()
        i2Ti1 = Pose3(i1Ri2, i1ti2).inverse()
        i2Ei1 = EssentialMatrix(i2Ti1.rotation(), Unit3(i2Ti1.translation()))

        # Perform bundle adjustment.
        i2Ri1_optimized, i2Ui1_optimized, corr_idxs = two_view_estimator.bundle_adjust(
            keypoints_i1=self.keypoints_i1,
            keypoints_i2=self.keypoints_i2,
            verified_corr_idxs=self.corr_idxs,
            camera_intrinsics_i1=Cal3Bundler(),
            camera_intrinsics_i2=Cal3Bundler(),
            i2Ri1_initial=i2Ei1.rotation(),
            i2Ui1_initial=i2Ei1.direction(),
            i2Ti1_prior=None,
        )

        # Assert
        rotation_angular_error = comp_utils.compute_relative_rotation_angle(
            i2Ri1_optimized, i2Ei1.rotation())
        translation_angular_error = comp_utils.compute_relative_unit_translation_angle(
            i2Ui1_optimized, i2Ei1.direction())
        self.assertLessEqual(rotation_angular_error, 1)
        self.assertLessEqual(translation_angular_error, 1)
        np.testing.assert_allclose(corr_idxs, self.corr_idxs)