def setUp(self):
        """Set up a small Karcher mean optimization example."""
        # Grabbed from KarcherMeanFactor unit tests.
        R = Rot3.Expmap(np.array([0.1, 0, 0]))
        rotations = {R, R.inverse()}  # mean is the identity
        self.expected = Rot3()

        def check(actual):
            # Check that optimizing yields the identity
            self.gtsamAssertEquals(actual.atRot3(KEY), self.expected, tol=1e-6)
            # Check that logging output prints out 3 lines (exact intermediate values differ by OS)
            self.assertEqual(self.capturedOutput.getvalue().count('\n'), 3)
            # reset stdout catcher
            self.capturedOutput.truncate(0)

        self.check = check

        self.graph = gtsam.NonlinearFactorGraph()
        for R in rotations:
            self.graph.add(gtsam.PriorFactorRot3(KEY, R, MODEL))
        self.initial = gtsam.Values()
        self.initial.insert(KEY, R)

        # setup output capture
        self.capturedOutput = StringIO()
        sys.stdout = self.capturedOutput
Exemple #2
0
    def test_computation_graph(self):
        """Test the dask computation graph execution using a valid collection of relative poses."""

        num_poses = 3

        i2Ri1_dict = {
            (0, 1): Rot3.RzRyRx(0, np.deg2rad(30), 0),
            (1, 2): Rot3.RzRyRx(0, 0, np.deg2rad(20)),
        }

        i2Ri1_graph = dask.delayed(i2Ri1_dict)

        # use the GTSAM API directly (without dask) for rotation averaging
        expected_wRi_list = self.obj.run(num_poses, i2Ri1_dict)

        # use dask's computation graph
        computation_graph = self.obj.create_computation_graph(
            num_poses, i2Ri1_graph)

        with dask.config.set(scheduler="single-threaded"):
            wRi_list = dask.compute(computation_graph)[0]

        # compare the two results
        self.assertTrue(
            geometry_comparisons.compare_rotations(
                wRi_list, expected_wRi_list,
                ROTATION_ANGLE_ERROR_THRESHOLD_DEG))
Exemple #3
0
    def setUp(self):
        # Create poses for the source map
        s_pose1 = Pose3(Rot3(np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])),
                        Point3(0, 0, 0))
        s_pose2 = Pose3(Rot3(np.array([[-1, 0, 0], [0, 1, 0], [0, 0, 1]])),
                        Point3(4, 0, 0))
        s_poses = [s_pose1, s_pose2]
        # Create points for the source map
        s_point1 = Point3(1, 1, 0)
        s_point2 = Point3(1, 3, 0)
        s_point3 = Point3(3, 3, 0)
        s_point4 = Point3(3, 1, 0)
        s_point5 = Point3(2, 2, 1)
        s_points = [s_point1, s_point2, s_point3, s_point4, s_point5]
        # Create the source map
        self.s_map = (s_poses, s_points)

        # Create poses for the destination map
        d_pose1 = Pose3(Rot3(np.array([[-1, 0, 0], [0, 1, 0], [0, 0, -1]])),
                        Point3(4, 6, 10))
        d_pose2 = Pose3(Rot3(np.array([[1, 0, 0], [0, 1, 0], [0, 0, -1]])),
                        Point3(-4, 6, 10))
        d_poses = [d_pose1, d_pose2]
        # Create points for the destination map
        d_point1 = Point3(2, 8, 10)
        d_point2 = Point3(2, 12, 10)
        d_point3 = Point3(-2, 12, 10)
        d_point4 = Point3(-2, 8, 10)
        d_point5 = Point3(0, 10, 8)
        d_points = [d_point1, d_point2, d_point3, d_point4, d_point5]
        d_map = (d_poses, d_points)
        # Create the destination map
        self.d_map = (d_poses, d_points)
Exemple #4
0
def main():
    q = [0, 0, 0, 0, 0, 0, 0]
    # q = [1, 1, 1, 1, 1, 1, 1]
    lengths = [10, 20, 10, 20, 10, 10, 20]
    si = get_si()
    T = get_T(q, lengths)

    # FK init
    fk = Pose3(Rot3.Ypr(0.0, 0.0, 0.0), Point3(0, 0, 0))

    # expMap
    g = get_expMap(q, si)
    gtool = Pose3(Rot3.Ypr(0.0, 0.0, 0.0), Point3(100, 0, 0))
    G = compose(*(g + [gtool]))
    fk_1 = get_fk(lengths, q)
    fk_1.between(G)
    # Jacobian
    J = get_Jacobian(T, si)
    Jinv = np.linalg.pinv(J)
    #check jacobian forward
    q_dot = np.array([0.5, 0, 1, 0, 0, -1, 0]).T
    J.dot(q_dot)
    # check with movement
    v = np.array([[0, 0, 0, 0, 0, 1]]).T
    d = np.matmul(Jinv, v)
    q += np.squeeze(d)

    fk = get_fk(lengths, q)
    print fk
Exemple #5
0
def simulate_two_planes_scene(M: int, N: int) -> Tuple[Keypoints, Keypoints, EssentialMatrix]:
    """Generate a scene where 3D points are on two planes, and projects the points to the 2 cameras. There are M points
    on plane 1, and N points on plane 2.

    The two planes in this test are:
    1. -10x -y -20z +150 = 0
    2. 15x -2y -35z +200 = 0

    Args:
        M: number of points on 1st plane.
        N: number of points on 2nd plane.

    Returns:
        keypoints for image i1, of length (M+N).
        keypoints for image i2, of length (M+N).
        Essential matrix i2Ei1.
    """
    # range of 3D points
    range_x_coordinate = (-5, 7)
    range_y_coordinate = (-10, 10)

    # define the plane equation
    plane1_coeffs = (-10, -1, -20, 150)
    plane2_coeffs = (15, -2, -35, 200)

    # sample the points from planes
    plane1_points = sample_points_on_plane(plane1_coeffs, range_x_coordinate, range_y_coordinate, M)
    plane2_points = sample_points_on_plane(plane2_coeffs, range_x_coordinate, range_y_coordinate, N)

    points_3d = np.vstack((plane1_points, plane2_points))

    # define the camera poses and compute the essential matrix
    wti1 = np.array([0.1, 0, -20])
    wti2 = np.array([1, -2, -20.4])

    wRi1 = Rot3.RzRyRx(np.pi / 20, 0, 0.0)
    wRi2 = Rot3.RzRyRx(0.0, np.pi / 6, 0.0)

    wTi1 = Pose3(wRi1, wti1)
    wTi2 = Pose3(wRi2, wti2)
    i2Ti1 = wTi2.between(wTi1)

    i2Ei1 = EssentialMatrix(i2Ti1.rotation(), Unit3(i2Ti1.translation()))

    # project 3D points to 2D image measurements
    intrinsics = Cal3Bundler()
    camera_i1 = PinholeCameraCal3Bundler(wTi1, intrinsics)
    camera_i2 = PinholeCameraCal3Bundler(wTi2, intrinsics)

    uv_im1 = []
    uv_im2 = []
    for point in points_3d:
        uv_im1.append(camera_i1.project(point))
        uv_im2.append(camera_i2.project(point))

    uv_im1 = np.vstack(uv_im1)
    uv_im2 = np.vstack(uv_im2)

    # return the points as keypoints and the essential matrix
    return Keypoints(coordinates=uv_im1), Keypoints(coordinates=uv_im2), i2Ei1
Exemple #6
0
    def test_sim3_pose(self):
        """Test generating similarity transform with Pose3 pairs."""
        # Create expected sim3
        expected_R = Rot3.Ry(math.radians(180))
        expected_s = 2
        expected_t = Point3(4, 6, 10)
        print(expected_R)

        # Create source poses
        s_pose1 = Pose3(Rot3(np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])),
                        Point3(0, 0, 0))
        s_pose2 = Pose3(Rot3(np.array([[-1, 0, 0], [0, 1, 0], [0, 0, 1]])),
                        Point3(4, 0, 0))

        # Create destination poses
        d_pose1 = Pose3(Rot3(np.array([[-1, 0, 0], [0, 1, 0], [0, 0, -1]])),
                        Point3(4, 6, 10))
        d_pose2 = Pose3(Rot3(np.array([[1, 0, 0], [0, 1, 0], [0, 0, -1]])),
                        Point3(-4, 6, 10))

        # Align
        pose_pairs = [[s_pose1, d_pose1], [s_pose2, d_pose2]]
        sim3 = Similarity3()
        sim3.sim3_pose(pose_pairs)

        # Check actual sim3 equals to expected sim3
        assert (expected_R.equals(sim3._R, 1e-6))
        self.assertAlmostEqual(sim3._s, expected_s, delta=1e-6)
        self.assert_gtsam_equals(sim3._t, expected_t)
Exemple #7
0
 def test_transform_from(self):
     """Test transform form"""
     T = Pose3(Rot3(1, 0, 0, 0, 0, 1, 0, -1, 0), Point3(1, 2, 3))
     pose = Pose3(Rot3(), Point3(1, 2, 3))
     actual = transform_from(T, pose)
     expected = Pose3(Rot3(1, 0, 0, 0, 0, 1, 0, -1, 0), Point3(2, 5, 1))
     self.gtsamAssertEquals(actual, expected)
Exemple #8
0
 def test_between(self):
     """Test between method."""
     T2 = Pose3(Rot3.Rodrigues(0.3, 0.2, 0.1), Point3(3.5, -8.2, 4.2))
     T3 = Pose3(Rot3.Rodrigues(-90, 0, 0), Point3(1, 2, 3))
     expected = T2.inverse().compose(T3)
     actual = T2.between(T3)
     self.gtsamAssertEquals(actual, expected, 1e-6)
Exemple #9
0
    def test_nonconsecutive_indices(self):
        """Run rotation averaging on a graph with indices that are not ordered as [0,...,N-1].

        Note: Test would fail if Shonan keys were not temporarily re-ordered inside the implementation.
        See https://github.com/borglab/gtsam/issues/784
        """
        num_images = 4

        # assume pose 0 is orphaned in the visibility graph
        # Let wTi0's (R,t) be parameterized as identity Rot3(), and t = [1,1,0]
        wTi1 = Pose3(Rot3(), np.array([3, 1, 0]))
        wTi2 = Pose3(Rot3(), np.array([3, 3, 0]))
        wTi3 = Pose3(Rot3(), np.array([1, 3, 0]))

        # generate i2Ri1 rotations
        # (i1,i2) -> i2Ri1
        i2Ri1_input = {
            (1, 2): wTi2.between(wTi1).rotation(),
            (2, 3): wTi3.between(wTi2).rotation(),
            (1, 3): wTi3.between(wTi1).rotation(),
        }

        i2Ri1_priors = {edge: None for edge in i2Ri1_input.keys()}
        wRi_computed = self.obj.run(num_images, i2Ri1_input, i2Ri1_priors)
        wRi_expected = [
            None, wTi1.rotation(),
            wTi2.rotation(),
            wTi3.rotation()
        ]
        self.assertTrue(
            geometry_comparisons.compare_rotations(
                wRi_computed,
                wRi_expected,
                angular_error_threshold_degrees=0.1))
Exemple #10
0
    def test_calculate_triangulation_angle_in_degrees(self) -> None:
        """Test the computation of triangulation angle using a simple example.
        Lengths of line segments are defined as follows:
                   5*sqrt(2)
                  X ---- C1
                  |    /
        5*sqrt(2) |  / 10 = 5*sqrt(2)*sqrt(2)
                  C2
        Cameras and point situated as follows in the x-z plane:
        (0,0,0)
             o---- +z
             |
             |
             +x
                      X (5,0,5)
        (10,0,0)
             o---- +z
             |
             |
             +x
        """
        camera_center_1 = np.array([0, 0, 0])
        camera_center_2 = np.array([10, 0, 0])
        point_3d = np.array([5, 0, 5])

        expected = 90

        wT0 = Pose3(Rot3(), camera_center_1)
        wT1 = Pose3(Rot3(), camera_center_2)
        computed = mvs_utils.calculate_triangulation_angle_in_degrees(
            camera_1=PinholeCameraCal3Bundler(wT0),
            camera_2=PinholeCameraCal3Bundler(wT1),
            point_3d=point_3d,
        )
        self.assertAlmostEqual(computed, expected)
Exemple #11
0
    def test_align_poses_along_straight_line_gauge(self):
        """Test if Align Pose3Pairs method can account for gauge ambiguity.

        Scenario:
           3 object poses
           with gauge ambiguity (2x scale)
           world frame has poses rotated about z-axis (90 degree yaw)
           world and egovehicle frame translated by 11 meters w.r.t. each other
        """
        Rz90 = Rot3.Rz(np.deg2rad(90))

        # Create source poses (three objects o1, o2, o3 living in the egovehicle "e" frame)
        # Suppose they are 3d cuboids detected by an onboard sensor in the egovehicle frame
        eTo0 = Pose3(Rot3(), np.array([1, 0, 0]))
        eTo1 = Pose3(Rot3(), np.array([2, 0, 0]))
        eTo2 = Pose3(Rot3(), np.array([4, 0, 0]))

        eToi_list = [eTo0, eTo1, eTo2]

        # Create destination poses
        # (same three objects, but instead living in the world/city "w" frame)
        wTo0 = Pose3(Rz90, np.array([0, 12, 0]))
        wTo1 = Pose3(Rz90, np.array([0, 14, 0]))
        wTo2 = Pose3(Rz90, np.array([0, 18, 0]))

        wToi_list = [wTo0, wTo1, wTo2]

        we_pairs = Pose3Pairs(list(zip(wToi_list, eToi_list)))

        # Recover the transformation wSe (i.e. world_S_egovehicle)
        wSe = Similarity3.Align(we_pairs)

        for wToi, eToi in zip(wToi_list, eToi_list):
            self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi))
Exemple #12
0
    def test_values(self):
        values = Values()
        E = EssentialMatrix(Rot3(), Unit3())
        tol = 1e-9

        values.insert(0, Point2(0, 0))
        values.insert(1, Point3(0, 0, 0))
        values.insert(2, Rot2())
        values.insert(3, Pose2())
        values.insert(4, Rot3())
        values.insert(5, Pose3())
        values.insert(6, Cal3_S2())
        values.insert(7, Cal3DS2())
        values.insert(8, Cal3Bundler())
        values.insert(9, E)
        values.insert(10, imuBias_ConstantBias())

        # Special cases for Vectors and Matrices
        # Note that gtsam's Eigen Vectors and Matrices requires double-precision
        # floating point numbers in column-major (Fortran style) storage order,
        # whereas by default, numpy.array is in row-major order and the type is
        # in whatever the number input type is, e.g. np.array([1,2,3])
        # will have 'int' type.
        #
        # The wrapper will automatically fix the type and storage order for you,
        # but for performance reasons, it's recommended to specify the correct
        # type and storage order.
        # for vectors, the order is not important, but dtype still is
        vec = np.array([1., 2., 3.])
        values.insert(11, vec)
        mat = np.array([[1., 2.], [3., 4.]], order='F')
        values.insert(12, mat)
        # Test with dtype int and the default order='C'
        # This still works as the wrapper converts to the correct type and order for you
        # but is nornally not recommended!
        mat2 = np.array([[1, 2, ], [3, 5]])
        values.insert(13, mat2)

        self.assertTrue(values.atPoint2(0).equals(Point2(0,0), tol))
        self.assertTrue(values.atPoint3(1).equals(Point3(0,0,0), tol))
        self.assertTrue(values.atRot2(2).equals(Rot2(), tol))
        self.assertTrue(values.atPose2(3).equals(Pose2(), tol))
        self.assertTrue(values.atRot3(4).equals(Rot3(), tol))
        self.assertTrue(values.atPose3(5).equals(Pose3(), tol))
        self.assertTrue(values.atCal3_S2(6).equals(Cal3_S2(), tol))
        self.assertTrue(values.atCal3DS2(7).equals(Cal3DS2(), tol))
        self.assertTrue(values.atCal3Bundler(8).equals(Cal3Bundler(), tol))
        self.assertTrue(values.atEssentialMatrix(9).equals(E, tol))
        self.assertTrue(values.atimuBias_ConstantBias(
            10).equals(imuBias_ConstantBias(), tol))

        # special cases for Vector and Matrix:
        actualVector = values.atVector(11)
        self.assertTrue(np.allclose(vec, actualVector, tol))
        actualMatrix = values.atMatrix(12)
        self.assertTrue(np.allclose(mat, actualMatrix, tol))
        actualMatrix2 = values.atMatrix(13)
        self.assertTrue(np.allclose(mat2, actualMatrix2, tol))
 def test_compute_translation_to_direction_angle_is_zero(self):
     i2Ui1_measured = Unit3(Point3(1, 0, 0))
     wTi2_estimated = Pose3(Rot3(), Point3(0, 0, 0))
     wTi1_estimated = Pose3(Rot3(), Point3(2, 0, 0))
     self.assertEqual(
         geometry_comparisons.compute_translation_to_direction_angle(
             i2Ui1_measured, wTi2_estimated, wTi1_estimated),
         0.0,
     )
Exemple #14
0
    def run(self):
        reached = 0

        r = rospy.Rate(10)
        while not rospy.is_shutdown():

            goal = self.goals[self.goalindex]
            self.sTg = Pose3(Rot3.Rz(goal[3]), Point3(goal[0], goal[1],
                                                      goal[2]))
            R = quaternion_matrix([
                self.sTcurr_ros.pose.orientation.x,
                self.sTcurr_ros.pose.orientation.y,
                self.sTcurr_ros.pose.orientation.z,
                self.sTcurr_ros.pose.orientation.w
            ])[0:3, 0:3]
            self.sTcurr = Pose3(
                Rot3(R),
                Point3(self.sTcurr_ros.pose.position.x,
                       self.sTcurr_ros.pose.position.y,
                       self.sTcurr_ros.pose.position.z))

            currTg = self.sTcurr.between(self.sTg)

            u_pitch = self.kp_x * currTg.x() + self.kd_x * (
                currTg.x() - self.lastT.x()) + min(
                    max(-0.2, self.ki_x * self.ix), 0.2)
            u_roll = self.kp_y * currTg.y() + self.kd_y * (currTg.y() -
                                                           self.lastT.y())
            currTg_yaw = currTg.rotation().rpy()[2]
            u_yaw = self.kp_yaw * currTg_yaw + self.kd_yaw * (currTg_yaw -
                                                              self.lastyaw)
            u_z = self.kp_x * currTg.z() + self.kd_z * (currTg.z() -
                                                        self.lastT.z())

            self.lastT = currTg
            self.lastyaw = currTg_yaw

            self.ix += currTg.x()
            self.iy += currTg.y()
            self.iz += currTg.z()
            self.iyaw += currTg_yaw

            self.move_cmd.linear.x = min(max(-1, u_pitch), 1)
            self.move_cmd.linear.y = min(max(-1, u_roll), 1)
            self.move_cmd.linear.z = min(max(-1, u_z), 1)
            self.move_cmd.angular.z = min(max(-1, u_yaw), 1)

            self.velPub.publish(self.move_cmd)
            reached = self.check(currTg)
            if (reached == 1):
                reached = 0
                self.goalindex += 1

            r.sleep()
    def test_find_karcher_mean_identity(self):
        """Averaging 3 identity rotations should yield the identity."""
        a1Rb1 = Rot3()
        a2Rb2 = Rot3()
        a3Rb3 = Rot3()

        aRb_list = gtsam.Rot3Vector([a1Rb1, a2Rb2, a3Rb3])
        aRb_expected = Rot3()

        aRb = gtsam.FindKarcherMean(aRb_list)
        self.gtsamAssertEquals(aRb, aRb_expected)
def test_get_points_within_radius_of_cameras_negative_radius():
    """Catch degenerate input."""
    wTi0 = Pose3(Rot3(), np.zeros(3))
    wTi1 = Pose3(Rot3(), np.array([10.0, 0, 0]))
    wTi_list = [wTi0, wTi1]
    points_3d = np.array([[-15, 0, 0], [0, 15, 0], [-5, 0, 0], [15, 0, 0],
                          [25, 0, 0]])
    radius = -5
    nearby_points_3d = geometry_comparisons.get_points_within_radius_of_cameras(
        wTi_list, points_3d, radius)
    assert nearby_points_3d is None, "Non-positive radius is not allowed"
def test_get_points_within_radius_of_cameras_no_points():
    """Catch degenerate input."""

    wTi0 = Pose3(Rot3(), np.zeros(3))
    wTi1 = Pose3(Rot3(), np.array([10.0, 0, 0]))
    wTi_list = [wTi0, wTi1]
    points_3d = np.zeros((0, 3))
    radius = 10.0

    nearby_points_3d = geometry_comparisons.get_points_within_radius_of_cameras(
        wTi_list, points_3d, radius)
    assert nearby_points_3d is None, "At least one 3d point must be provided"
    def setUp(self):
        """Set up the data association object and test data."""
        super().setUp()

        self.obj = DummyDataAssociation(0.5, 2)

        # set up ground truth data for comparison

        self.dummy_corr_idxs_dict = {
            (0, 1): np.array([[0, 2]]),
            (1, 2): np.array([[2, 3], [4, 5], [7, 9]]),
            (0, 2): np.array([[1, 8]]),
        }
        self.keypoints_list = [
            Keypoints(coordinates=np.array([[12, 16], [13, 18], [0, 10]])),
            Keypoints(coordinates=np.array([
                [8, 2],
                [16, 14],
                [22, 23],
                [1, 6],
                [50, 50],
                [16, 12],
                [82, 121],
                [39, 60],
            ])),
            Keypoints(coordinates=np.array([
                [1, 1],
                [8, 13],
                [40, 6],
                [82, 21],
                [1, 6],
                [12, 18],
                [15, 14],
                [25, 28],
                [7, 10],
                [14, 17],
            ])),
        ]

        # Generate two poses for use in triangulation tests
        # Looking along X-axis, 1 meter above ground plane (x-y)
        upright = Rot3.Ypr(-np.pi / 2, 0.0, -np.pi / 2)
        pose1 = Pose3(upright, Point3(0, 0, 1))

        # create second camera 1 meter to the right of first camera
        pose2 = pose1.compose(Pose3(Rot3(), Point3(1, 0, 0)))

        self.poses = Pose3Vector()
        self.poses.append(pose1)
        self.poses.append(pose2)

        # landmark ~5 meters infront of camera
        self.expected_landmark = Point3(5, 0.5, 1.2)
Exemple #19
0
    def test_compare_global_poses_scaled_squares(self):
        """Make sure a big and small square can be aligned.

        The u's represent a big square (10x10), and v's represents a small square (4x4).
        """
        R0 = Rotation.from_euler("z", 0, degrees=True).as_matrix()
        R90 = Rotation.from_euler("z", 90, degrees=True).as_matrix()
        R180 = Rotation.from_euler("z", 180, degrees=True).as_matrix()
        R270 = Rotation.from_euler("z", 270, degrees=True).as_matrix()

        wTu0 = Pose3(Rot3(R0), np.array([2, 3, 0]))
        wTu1 = Pose3(Rot3(R90), np.array([12, 3, 0]))
        wTu2 = Pose3(Rot3(R180), np.array([12, 13, 0]))
        wTu3 = Pose3(Rot3(R270), np.array([2, 13, 0]))

        wTi_list = [wTu0, wTu1, wTu2, wTu3]

        wTv0 = Pose3(Rot3(R0), np.array([4, 3, 0]))
        wTv1 = Pose3(Rot3(R90), np.array([8, 3, 0]))
        wTv2 = Pose3(Rot3(R180), np.array([8, 7, 0]))
        wTv3 = Pose3(Rot3(R270), np.array([4, 7, 0]))

        wTi_list_ = [wTv0, wTv1, wTv2, wTv3]

        pose_graphs_equal = geometry_comparisons.compare_global_poses(
            wTi_list, wTi_list_)
        self.assertTrue(pose_graphs_equal)
Exemple #20
0
    def test_simple(self):
        """Test a simple case with three relative rotations."""

        i2Ri1_dict = {
            (1, 0): Rot3.RzRyRx(0, np.deg2rad(30), 0),
            (2, 1): Rot3.RzRyRx(0, 0, np.deg2rad(20)),
        }

        expected_wRi_list = [
            Rot3.RzRyRx(0, 0, 0),
            Rot3.RzRyRx(0, np.deg2rad(30), 0),
            i2Ri1_dict[(1, 0)].compose(i2Ri1_dict[(2, 1)]),
        ]

        self.__execute_test(i2Ri1_dict, expected_wRi_list)
    def test_compute_relative_rotation_angle(self):
        """Tests the relative angle between two rotations."""

        R_1 = Rot3.RzRyRx(0, np.deg2rad(45), np.deg2rad(22.5))
        R_2 = Rot3.RzRyRx(0, np.deg2rad(90), np.deg2rad(22.5))

        # returns angle in degrees
        computed_deg = geometry_comparisons.compute_relative_rotation_angle(
            R_1, R_2)
        expected_deg = 45

        np.testing.assert_allclose(computed_deg,
                                   expected_deg,
                                   rtol=1e-3,
                                   atol=1e-3)
 def test_load_poses_from_file(self):
     """Test load psoes from file."""
     file_name = self.data_directory+"result/poses.dat"
     poses = load_poses_from_file(file_name)
     # """Test pose output"""
     delta_z = 1
     num_frames = 3
     wRc = Rot3(1, 0, 0, 0, 0, 1, 0, -1, 0)
     for i in range(num_frames):
         # Create ground truth poses
         expected_pose_i = Pose3(wRc, Point3(0, delta_z*i, 2))
         # Get actual poses
         rotation = Rot3(np.array(poses[i][3:]).reshape(3, 3))
         actual_pose_i = Pose3(rotation, Point3(np.array(poses[i][0:3])))
         self.gtsamAssertEquals(actual_pose_i, expected_pose_i, 1e-6)
Exemple #23
0
    def setUp(self):
        """ Set up two camera poses """
        # Looking along X-axis, 1 meter above ground plane (x-y)
        upright = Rot3.Ypr(-np.pi / 2, 0., -np.pi / 2)
        pose1 = Pose3(upright, Point3(0, 0, 1))

        # create second camera 1 meter to the right of first camera
        pose2 = pose1.compose(Pose3(Rot3(), Point3(1, 0, 0)))
        # twoPoses
        self.poses = Pose3Vector()
        self.poses.append(pose1)
        self.poses.append(pose2)

        # landmark ~5 meters infront of camera
        self.landmark = Point3(5, 0.5, 1.2)
Exemple #24
0
    def run(
        self, num_images: int,
        i2Ri1_dict: Dict[Tuple[int, int],
                         Optional[Rot3]]) -> List[Optional[Rot3]]:
        """Run the rotation averaging.

        Args:
            num_images: number of poses.
            i2Ri1_dict: relative rotations as dictionary (i1, i2): i2Ri1.

        Returns:
            Global rotations for each camera pose, i.e. wRi, as a list. The number of entries in the list is
                `num_images`. The list may contain `None` where the global rotation could not be computed (either
                underconstrained system or ill-constrained system).
        """
        if len(i2Ri1_dict) == 0:
            return [None] * num_images

        # create the random seed using relative rotations
        seed_rotation = next(iter(i2Ri1_dict.values()))

        np.random.seed(int(1000 * seed_rotation.xyz()[0]) % (2 ^ 32))

        # TODO: do not assign values where we do not have any edge

        # generate dummy rotations
        wRi_list = []
        for _ in range(num_images):
            random_vector = np.random.rand(3) * 2 * np.pi
            wRi_list.append(
                Rot3.Rodrigues(random_vector[0], random_vector[1],
                               random_vector[2]))

        return wRi_list
Exemple #25
0
 def test_serialization(self):
     """Test if serialization is working normally"""
     expected = Pose3(Rot3.Ypr(0.0, 1.0, 0.0), Point3(1, 1, 0))
     actual = Pose3()
     serialized = expected.serialize()
     actual.deserialize(serialized)
     self.gtsamAssertEquals(expected, actual, 1e-10)
Exemple #26
0
 def test_back_projection(self):
     """Test back projection"""
     actual = self.back_end.back_projection(
         Point2(640, 360), Pose3(Rot3(1, 0, 0, 0, 0, 1, 0, -1, 0),
                                 Point3()), 20)
     expected = Point3(0, 20, 0)
     self.gtsamAssertEquals(actual, expected)
Exemple #27
0
 def setUp(self):
     """Create mapping Back-end and read csv file."""
     # Input images(undistorted) calibration
     fov, w, h = 60, 1280, 720
     calibration = Cal3_S2(fov, w, h)
     # Create pose priors
     wRc = Rot3(1, 0, 0, 0, 0, 1, 0, -1, 0)  # camera to world rotation
     pose_estimates = [Pose3(wRc, Point3(0, i, 2)) for i in range(3)]
     # Create measurement noise for bundle adjustment
     sigma = 1.0
     measurement_noise = gtsam.noiseModel_Isotropic.Sigma(2, sigma)
     # Create pose prior noise
     rotation_sigma = np.radians(60)
     translation_sigma = 1
     pose_noise_sigmas = np.array([
         rotation_sigma, rotation_sigma, rotation_sigma, translation_sigma,
         translation_sigma, translation_sigma
     ])
     pose_prior_noise = gtsam.noiseModel_Diagonal.Sigmas(pose_noise_sigmas)
     # Create MappingBackEnd instance
     data_directory = 'mapping/sim_match_data/'
     min_landmark_seen = 3
     self.num_images = 3
     self.back_end = MappingBackEnd(
         data_directory, self.num_images, calibration, pose_estimates, measurement_noise, pose_prior_noise, min_landmark_seen)  # pylint: disable=line-too-long
    def test_factor(self):
        """Check that the InnerConstraint factor leaves the mean unchanged."""
        # Make a graph with two variables, one between, and one InnerConstraint
        # The optimal result should satisfy the between, while moving the other
        # variable to make the mean the same as before.
        # Mean of R and R' is identity. Let's make a BetweenFactor making R21 =
        # R*R*R, i.e. geodesic length is 3 rather than 2.
        graph = gtsam.NonlinearFactorGraph()
        R12 = R.compose(R.compose(R))
        graph.add(gtsam.BetweenFactorRot3(1, 2, R12, MODEL))
        keys = gtsam.KeyVector()
        keys.append(1)
        keys.append(2)
        graph.add(gtsam.KarcherMeanFactorRot3(keys))

        initial = gtsam.Values()
        initial.insert(1, R.inverse())
        initial.insert(2, R)
        expected = Rot3()

        result = gtsam.GaussNewtonOptimizer(graph, initial).optimize()
        actual = gtsam.FindKarcherMean(
            gtsam.Rot3Vector([result.atRot3(1), result.atRot3(2)]))
        self.gtsamAssertEquals(expected, actual)
        self.gtsamAssertEquals(
            R12, result.atRot3(1).between(result.atRot3(2)))
 def test_find(self):
     # Check that optimizing for Karcher mean (which minimizes Between distance)
     # gets correct result.
     rotations = gtsam.Rot3Vector([R, R.inverse()])
     expected = Rot3()
     actual = gtsam.FindKarcherMean(rotations)
     self.gtsamAssertEquals(expected, actual)
Exemple #30
0
def decompose_camera_projection_matrix(M: np.ndarray) -> Pose3:
    """Recovera camera pose from a 3x4 projection matrix.

    Reference: https://johnwlambert.github.io/cam-calibration/#decomposition

    Args:
        M: array of shape (3,4) representing camera projection matrix.
            Let M = [m1 | m2 | m3 | m4] = [ Q | m4]

    Returns:
        K: 3x3 upper triangular camera intrinsics matrix. The matrix diagonal is
            guaranteed to contain all positive entries.
        wTc: camera pose in world frame.
    """
    m4 = M[:, 3]
    Q = M[:3, :3]
    wtc = np.linalg.inv(-Q) @ m4

    K, cRw = scipy.linalg.rq(Q)

    # multiply columns of K by -1 if K(i,i) entry on diagonal is negative.
    # T is a matrix that scales by -1 or 1
    T = np.diag(np.sign(np.diag(K)))
    K = K @ T

    # scale rows of cRw by T, to keep a valid decomposition. T is its own inverse.
    wRc = Rot3(T @ cRw).inverse()
    wTc = Pose3(wRc, wtc)

    return K, wTc