コード例 #1
0
    def test_data(self):
        """Test functions in SfmData"""
        # Create new track with 3 measurements
        i1, i2, i3 = 3, 5, 6
        uv_i1 = Point2(21.23, 45.64)

        # translating along X-axis
        uv_i2 = Point2(45.7, 45.64)
        uv_i3 = Point2(68.35, 45.64)

        # add measurements and arbitrary point to the track
        measurements = [(i1, uv_i1), (i2, uv_i2), (i3, uv_i3)]
        pt = Point3(1.0, 6.0, 2.0)
        track2 = SfmTrack(pt)
        track2.addMeasurement(i1, uv_i1)
        track2.addMeasurement(i2, uv_i2)
        track2.addMeasurement(i3, uv_i3)
        self.data.addTrack(self.tracks)
        self.data.addTrack(track2)

        # Number of tracks in SfmData is 2
        self.assertEqual(self.data.numberTracks(), 2)

        # camera idx of first measurement of second track corresponds to i1
        cam_idx, img_measurement = self.data.track(1).measurement(0)
        self.assertEqual(cam_idx, i1)
コード例 #2
0
ファイル: test_Values.py プロジェクト: wykxwyc/ped_ws
    def test_values(self):
        values = Values()
        E = EssentialMatrix(Rot3(), Unit3())
        tol = 1e-9

        values.insert(0, Point2(0, 0))
        values.insert(1, Point3(0, 0, 0))
        values.insert(2, Rot2())
        values.insert(3, Pose2())
        values.insert(4, Rot3())
        values.insert(5, Pose3())
        values.insert(6, Cal3_S2())
        values.insert(7, Cal3DS2())
        values.insert(8, Cal3Bundler())
        values.insert(9, E)
        values.insert(10, imuBias_ConstantBias())

        # Special cases for Vectors and Matrices
        # Note that gtsam's Eigen Vectors and Matrices requires double-precision
        # floating point numbers in column-major (Fortran style) storage order,
        # whereas by default, numpy.array is in row-major order and the type is
        # in whatever the number input type is, e.g. np.array([1,2,3])
        # will have 'int' type.
        #
        # The wrapper will automatically fix the type and storage order for you,
        # but for performance reasons, it's recommended to specify the correct
        # type and storage order.
        # for vectors, the order is not important, but dtype still is
        vec = np.array([1., 2., 3.])
        values.insert(11, vec)
        mat = np.array([[1., 2.], [3., 4.]], order='F')
        values.insert(12, mat)
        # Test with dtype int and the default order='C'
        # This still works as the wrapper converts to the correct type and order for you
        # but is nornally not recommended!
        mat2 = np.array([[1, 2, ], [3, 5]])
        values.insert(13, mat2)

        self.assertTrue(values.atPoint2(0).equals(Point2(0,0), tol))
        self.assertTrue(values.atPoint3(1).equals(Point3(0,0,0), tol))
        self.assertTrue(values.atRot2(2).equals(Rot2(), tol))
        self.assertTrue(values.atPose2(3).equals(Pose2(), tol))
        self.assertTrue(values.atRot3(4).equals(Rot3(), tol))
        self.assertTrue(values.atPose3(5).equals(Pose3(), tol))
        self.assertTrue(values.atCal3_S2(6).equals(Cal3_S2(), tol))
        self.assertTrue(values.atCal3DS2(7).equals(Cal3DS2(), tol))
        self.assertTrue(values.atCal3Bundler(8).equals(Cal3Bundler(), tol))
        self.assertTrue(values.atEssentialMatrix(9).equals(E, tol))
        self.assertTrue(values.atimuBias_ConstantBias(
            10).equals(imuBias_ConstantBias(), tol))

        # special cases for Vector and Matrix:
        actualVector = values.atVector(11)
        self.assertTrue(np.allclose(vec, actualVector, tol))
        actualMatrix = values.atMatrix(12)
        self.assertTrue(np.allclose(mat, actualMatrix, tol))
        actualMatrix2 = values.atMatrix(13)
        self.assertTrue(np.allclose(mat2, actualMatrix2, tol))
コード例 #3
0
    def test_optimize(self):
        """Do trivial test with three optimizer variants."""
        fg = NonlinearFactorGraph()
        model = gtsam.noiseModel.Unit.Create(2)
        fg.add(PriorFactorPoint2(KEY1, Point2(0, 0), model))

        # test error at minimum
        xstar = Point2(0, 0)
        optimal_values = Values()
        optimal_values.insert(KEY1, xstar)
        self.assertEqual(0.0, fg.error(optimal_values), 0.0)

        # test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 =
        x0 = Point2(3, 3)
        initial_values = Values()
        initial_values.insert(KEY1, x0)
        self.assertEqual(9.0, fg.error(initial_values), 1e-3)

        # optimize parameters
        ordering = Ordering()
        ordering.push_back(KEY1)

        # Gauss-Newton
        gnParams = GaussNewtonParams()
        gnParams.setOrdering(ordering)
        actual1 = GaussNewtonOptimizer(fg, initial_values, gnParams).optimize()
        self.assertAlmostEqual(0, fg.error(actual1))

        # Levenberg-Marquardt
        lmParams = LevenbergMarquardtParams.CeresDefaults()
        lmParams.setOrdering(ordering)
        actual2 = LevenbergMarquardtOptimizer(fg, initial_values,
                                              lmParams).optimize()
        self.assertAlmostEqual(0, fg.error(actual2))

        # Levenberg-Marquardt
        lmParams = LevenbergMarquardtParams.CeresDefaults()
        lmParams.setLinearSolverType("ITERATIVE")
        cgParams = PCGSolverParameters()
        cgParams.setPreconditionerParams(DummyPreconditionerParameters())
        lmParams.setIterativeParams(cgParams)
        actual2 = LevenbergMarquardtOptimizer(fg, initial_values,
                                              lmParams).optimize()
        self.assertAlmostEqual(0, fg.error(actual2))

        # Dogleg
        dlParams = DoglegParams()
        dlParams.setOrdering(ordering)
        actual3 = DoglegOptimizer(fg, initial_values, dlParams).optimize()
        self.assertAlmostEqual(0, fg.error(actual3))

        # Graduated Non-Convexity (GNC)
        gncParams = GncLMParams()
        actual4 = GncLMOptimizer(fg, initial_values, gncParams).optimize()
        self.assertAlmostEqual(0, fg.error(actual4))
コード例 #4
0
ファイル: test_Pose2.py プロジェクト: borglab/gtsam
    def test_transformFrom(self):
        """Test transformFrom method."""
        pose = Pose2(2, 4, -math.pi / 2)
        actual = pose.transformFrom(Point2(2, 1))
        expected = Point2(3, 2)
        self.gtsamAssertEquals(actual, expected, 1e-6)

        # multi-point version
        points = np.stack([Point2(2, 1), Point2(2, 1)]).T
        actual_array = pose.transformFrom(points)
        self.assertEqual(actual_array.shape, (2, 2))
        expected_array = np.stack([expected, expected]).T
        np.testing.assert_allclose(actual_array, expected_array, atol=1e-6)
コード例 #5
0
    def create_initial_estimate(self, pose_indices):
        """Create initial estimate from ???."""
        wRc = Rot3(1, 0, 0, 0, 0, 1, 0, -1, 0)  # camera to world rotation
        depth = 20

        initial_estimate = gtsam.Values()

        # Create dictionary for initial estimation
        for img_idx, features in enumerate(self._image_features):
            for kp_idx, keypoint in enumerate(features.keypoints):
                if self.image_points[img_idx].kp_3d_exist(kp_idx):
                    landmark_id = self.image_points[img_idx].kp_3d(kp_idx)
                    # Filter invalid landmarks
                    if self._landmark_estimates[
                            landmark_id].seen >= self._min_landmark_seen:
                        key_point = Point2(keypoint[0], keypoint[1])
                        key = P(landmark_id)
                        if not initial_estimate.exists(key):
                            # do back-projection
                            pose = Pose3(wRc,
                                         Point3(0, self.delta_z * img_idx, 2))
                            landmark_3d_point = self.back_projection(
                                key_point, pose, depth)
                            initial_estimate.insert(P(landmark_id),
                                                    landmark_3d_point)

        # Initial estimate for poses
        for idx in pose_indices:
            pose_i = Pose3(wRc, Point3(0, self.delta_z * idx, 2))
            initial_estimate.insert(X(idx), pose_i)

        return initial_estimate
コード例 #6
0
    def superpoint_generator(self, image):
        """Use superpoint to extract features in the image
        Returns:
            superpoint - Nx2 (gtsam.Point2) numpy array of 2D point observations.
            descriptors - Nx256 numpy array of corresponding unit normalized descriptors.

        Refer to /SuperPointPretrainedNetwork/demo_superpoint for more information about the parameters
        Output of SuperpointFrontend.run():
          corners - 3xN numpy array with corners [x_i, y_i, confidence_i]^T.
          desc - 256xN numpy array of corresponding unit normalized descriptors.
          heatmap - HxW numpy heatmap in range [0,1] of point confidences.
        """
        fe = SuperPointFrontend(weights_path="SuperPointPretrainedNetwork/superpoint_v1.pth",
                                nms_dist=4,
                                conf_thresh=0.015,
                                nn_thresh=0.7,
                                cuda=False)
        superpoints, descriptors, _ = fe.run(image)

        # Transform superpoints from 3*N numpy array to N*2 numpy array
        superpoints = 4*np.transpose(superpoints[:2, ])
        superpoints=self.undistort_points(superpoints)

        # Transform descriptors from 256*N numpy array to N*256 numpy array
        descriptors = np.transpose(descriptors)

        # Transform superpoint into gtsam.Point2
        superpoints_reformat = [Point2(point) for point in superpoints]

        return Features(np.array(superpoints_reformat), descriptors)
コード例 #7
0
 def test_back_projection(self):
     """Test back projection"""
     actual = self.back_end.back_projection(
         Point2(640, 360), Pose3(Rot3(1, 0, 0, 0, 0, 1, 0, -1, 0),
                                 Point3()), 20)
     expected = Point3(0, 20, 0)
     self.gtsamAssertEquals(actual, expected)
コード例 #8
0
 def test_landmark_association(self):
     """Test landmark association."""
     superpoint_features = Features(
         [[10, 15], [300, 200], [11, 12], [14, 46]],
         [[0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0]])
     observed_landmarks = Landmarks(
         [[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]],
         [[0, 0, 1], [1, 0, 0], [0, 2, 2], [0, 1, 0]])
     observed_landmarks.load_keypoints([[8, 16], [14, 90], [114, 72],
                                        [300, 200]])
     expected_observations = [(Point2(10, 15), Point3(1, 1, 1)),
                              (Point2(300, 200), Point3(4, 4, 4))]
     actual_observations = self.trajectory_estimator.landmark_association(
         superpoint_features, observed_landmarks)
     for i, observation in enumerate(expected_observations):
         self.gtsamAssertEquals(actual_observations[i][0], observation[0])
         self.gtsamAssertEquals(actual_observations[i][1], observation[1])
コード例 #9
0
    def calculate_vision_area(self):
        """Calculate the vision area of the current pose. """
        for y in [0, 2.5, 5, 7.5, 10]:
            vision_area = []
            point_1 = self.back_project(Point2(0, 0), self.calibration, 10, y)
            vision_area.append(point_1)
            point_2 = self.back_project(Point2(640, 0), self.calibration, 10,
                                        y)
            vision_area.append(point_2)
            point_3 = self.back_project(Point2(640, 480), self.calibration, 10,
                                        y)
            vision_area.append(point_3)
            point_4 = self.back_project(Point2(0, 480), self.calibration, 10,
                                        y)
            vision_area.append(point_4)

            self.vision_area_list.append(vision_area)
コード例 #10
0
    def test_back_projection(self):
        """Test back projection"""
        fov, width, height = 60, 1280, 720
        calibration = Cal3_S2(fov, width, height)
        actual = back_projection(
            calibration, Point2(640, 360),
            Pose3(Rot3(1, 0, 0, 0, 0, 1, 0, -1, 0), Point3()), 20)
        expected = Point3(0, 20, 0)
        self.gtsamAssertEquals(actual, expected)

        fov, width, height = 60, 640, 480
        calibration = Cal3_S2(fov, width, height)
        actual = back_projection(
            calibration, Point2(320, 240),
            Pose3(Rot3(1, 0, 0, 0, 0, 1, 0, -1, 0), Point3()), 20)
        expected = Point3(0, 20, 0)
        self.gtsamAssertEquals(actual, expected)
コード例 #11
0
 def test_back_projection(self):
     """test back projection"""
     cal = Cal3DS2(fx=1, fy=1, s=0, u0=320, v0=240, k1=0, k2=0, p1=0, p2=0)
     point = Point2(320, 240)
     pose = Pose3(Rot3(np.array([[0, 1, 0], [0, 0, -1], [1, 0, 0]]).T),
                  Point3(0, 2, 1.2))
     actual_point = back_projection(cal, point, pose)
     actual_point.equals(Point3(10, 2, 1.2), tol=1e-9)
コード例 #12
0
ファイル: sim2.py プロジェクト: Alexma3312/Blimp
 def point(self, point):
     """ Calculate the similarity transform of a Point2
     Parameters:
         point - Point2 object
     Returns:
         Point2 object
     """
     return Point2(self._s * np.dot(self._R.matrix(), point.vector()) +
                   self._t.vector())
コード例 #13
0
    def back_projection(self, key_point = Point2(), pose = Pose3(), depth = 10):
        # Normalize input key_point
        pn = self.cal.calibrate(key_point)

        # Transfer normalized key_point into homogeneous coordinate and scale with depth
        ph = Point3(depth*pn.x(),depth*pn.y(),depth)

        # Transfer the point into the world coordinate
        return pose.transform_from(ph)
コード例 #14
0
def calculate_vision_area(calibration, pose, depth, image_size):
    """Calculate the vision area of the current pose. 
        Return:
            vision_area - [[x,y,z]], back project the four image corners [0,0], [width-1,0],[width-1,height-1],[0,height-1]
    """
    vision_area = []
    point_1 = back_projection(calibration, Point2(0, 0), pose, depth)
    vision_area.append([point_1.x(), point_1.y(), point_1.z()])
    point_2 = back_projection(calibration, Point2(image_size[0] - 1, 0), pose,
                              depth)
    vision_area.append([point_2.x(), point_2.y(), point_2.z()])
    point_3 = back_projection(calibration,
                              Point2(image_size[0] - 1, image_size[1] - 1),
                              pose, depth)
    vision_area.append([point_3.x(), point_3.y(), point_3.z()])
    point_4 = back_projection(calibration, Point2(0, image_size[1] - 1), pose,
                              depth)
    vision_area.append([point_4.x(), point_4.y(), point_4.z()])

    return vision_area
コード例 #15
0
ファイル: sim2.py プロジェクト: Alexma3312/Blimp
 def pose(self, pose):
     """ Calculate the similarity transform of a Pose2
     Parameters:
         pose - Pose2 object
     Returns:
         Pose2 object
     """
     R = self._R.compose(pose.rotation())
     translation = self._s * \
         self._R.rotate(pose.translation()).vector() + self._t.vector()
     return Pose2(R.theta(), Point2(translation))
コード例 #16
0
ファイル: test_point3d_initializer.py プロジェクト: asa/gtsfm
def get_track_with_duplicate_measurements() -> List[SfmMeasurement]:
    """Generates a track with 2 measurements in an image."""

    new_measurements = copy.deepcopy(MEASUREMENTS)

    new_measurements.append(
        SfmMeasurement(
            new_measurements[0].i,
            new_measurements[0].uv + Point2(2.0, -3.0),
        ))

    return new_measurements
コード例 #17
0
ファイル: sim2.py プロジェクト: Alexma3312/Blimp
    def test_transform(self):
        """Test similarity transform on poses and points."""
        expected_poses, expected_points = self.d_map

        sim2 = Similarity2(Rot2(math.radians(90)), Point2(10, 10), 2)
        poses, points = self.s_map
        for i, point in enumerate(points):
            point = sim2.point(point)
            self.assert_gtsam_equals(expected_points[i], point)

        for i, pose in enumerate(poses):
            pose = sim2.pose(pose)
            self.assert_gtsam_equals(expected_poses[i], pose)
コード例 #18
0
    def setUp(self):
        """Set up the optimization problem and ordering"""
        # create graph
        self.fg = NonlinearFactorGraph()
        model = gtsam.noiseModel.Unit.Create(2)
        self.fg.add(PriorFactorPoint2(KEY1, Point2(0, 0), model))

        # test error at minimum
        xstar = Point2(0, 0)
        self.optimal_values = Values()
        self.optimal_values.insert(KEY1, xstar)
        self.assertEqual(0.0, self.fg.error(self.optimal_values), 0.0)

        # test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 =
        x0 = Point2(3, 3)
        self.initial_values = Values()
        self.initial_values.insert(KEY1, x0)
        self.assertEqual(9.0, self.fg.error(self.initial_values), 1e-3)

        # optimize parameters
        self.ordering = Ordering()
        self.ordering.push_back(KEY1)
コード例 #19
0
def robot2world(robotPose, objectPose):
    worldPose = Pose2(vector3(0, 0, 0))
    rx, ry, rth = robotPose

    ox, oy, oth = objectPose
    rPos = Pose2(vector3(rx, ry, 0))
    rAngle = Pose2(vector3(0, 0, np.radians(rth)))
    oPose = Pose2(vector3(ox, oy, np.radians(oth)))

    rPose = compose(rPos, rAngle)
    newPose = rPose.transformFrom(Point2(ox, oy))
    x, y = newPose.x(), newPose.y()
    return x, y, (rth - oth) % 360
コード例 #20
0
def back_projection(cal, key_point=Point2(), pose=Pose3(), depth=10, undist=0):
    """Back project a 2d key point to 3d base on depth."""
    # Normalize input key_point
    if undist == 0:
        point = cal.calibrate(key_point, tol=1e-5)
    if undist == 1:
        point = cal.calibrate(key_point)

    # Transfer normalized key_point into homogeneous coordinate and scale with depth
    point_3d = Point3(depth*point.x(), depth*point.y(), depth)

    # Transfer the point into the world coordinate
    return pose.transform_from(point_3d)
コード例 #21
0
ファイル: test_point3d_initializer.py プロジェクト: asa/gtsfm
def get_track_with_one_outlier() -> List[SfmMeasurement]:
    """Generates a track with outlier measurement."""
    # perturb one measurement
    idx_to_perturb = 5

    perturbed_measurements = copy.deepcopy(MEASUREMENTS)

    original_measurement = perturbed_measurements[idx_to_perturb]
    perturbed_measurements[idx_to_perturb] = SfmMeasurement(
        original_measurement.i,
        perturbed_measurements[idx_to_perturb].uv + Point2(20.0, -10.0),
    )

    return perturbed_measurements
コード例 #22
0
ファイル: sim2.py プロジェクト: Alexma3312/Blimp
    def test_align_case_1(self):
        """Test generating similarity transform with gtsam pose2 align test case 1 - translation only."""
        # Create expected sim2
        expected_R = Rot2(math.radians(0))
        expected_s = 1
        expected_t = Point2(10, 10)

        # Create source points
        s_point1 = Point2(0, 0)
        s_point2 = Point2(20, 10)

        # Create destination points
        d_point1 = Point2(10, 10)
        d_point2 = Point2(30, 20)

        # Align
        point_pairs = [[s_point1, d_point1], [s_point2, d_point2]]
        sim2 = Similarity2()
        sim2.align(point_pairs)

        # Check actual sim2 equals to expected sim2
        assert (expected_R.equals(sim2._R, 1e-6))
        self.assert_gtsam_equals(sim2._t, expected_t)
        self.assertEqual(sim2._s, expected_s)
コード例 #23
0
    def test_tracks(self):
        """Test functions in SfmTrack"""
        # measurement is of format (camera_idx, imgPoint)
        # create arbitrary camera indices for two cameras
        i1, i2 = 4, 5

        # create arbitrary image measurements for cameras i1 and i2
        uv_i1 = Point2(12.6, 82)

        # translating point uv_i1 along X-axis
        uv_i2 = Point2(24.88, 82)

        # add measurements to the track
        self.tracks.addMeasurement(i1, uv_i1)
        self.tracks.addMeasurement(i2, uv_i2)

        # Number of measurements in the track is 2
        self.assertEqual(self.tracks.numberMeasurements(), 2)

        # camera_idx in the first measurement of the track corresponds to i1
        cam_idx, img_measurement = self.tracks.measurement(0)
        self.assertEqual(cam_idx, i1)
        np.testing.assert_array_almost_equal(Point3(0., 0., 0.),
                                             self.tracks.point3())
コード例 #24
0
ファイル: sim2.py プロジェクト: Alexma3312/Blimp
    def test_align(self):
        """Test generating similarity transform with Point2 pairs."""
        # Create expected sim2
        expected_R = Rot2(math.radians(90))
        expected_s = 2
        expected_t = Point2(10, 10)

        # Create source points
        s_point1 = Point2(0.5, 0.5)
        s_point2 = Point2(2, 0.5)

        # Create destination points
        d_point1 = Point2(9, 11)
        d_point2 = Point2(9, 14)

        # Align
        point_pairs = [[s_point1, d_point1], [s_point2, d_point2]]
        sim2 = Similarity2()
        sim2.align(point_pairs)

        # Check actual sim2 equals to expected sim2
        assert (expected_R.equals(sim2._R, 1e-6))
        self.assertEqual(sim2._s, expected_s)
        self.assert_gtsam_equals(sim2._t, expected_t)
コード例 #25
0
 def back_projection(self, key_point=Point2(), pose=Pose3(), depth=20):
     """
     Back Projection Function.
     Input:
         key_point-gtsam.Point2, key point location within the image.
         pose-gtsam.Pose3, camera pose in world coordinate.
     Output:
         gtsam.Pose3, landmark pose in world coordinate.
     """
     # Normalize input key_point
     pn = self._calibration.calibrate(key_point)
     # Transfer normalized key_point into homogeneous coordinate and scale with depth
     ph = Point3(depth * pn.x(), depth * pn.y(), depth)
     # Transfer the point into the world coordinate
     return pose.transformFrom(ph)
コード例 #26
0
ファイル: test_Pose2.py プロジェクト: borglab/gtsam
    def test_align(self) -> None:
        """Ensure estimation of the Pose2 element to align two 2d point clouds succeeds.

        Two point clouds represent horseshoe-shapes of the same size, just rotated and translated:

                |  X---X
                |  |
                |  X---X
        ------------------
                |
                |
              O | O
              | | |
              O---O
        """
        pts_a = [
            Point2(1, -3),
            Point2(1, -5),
            Point2(-1, -5),
            Point2(-1, -3),
        ]
        pts_b = [
            Point2(3, 1),
            Point2(1, 1),
            Point2(1, 3),
            Point2(3, 3),
        ]

        # fmt: on
        ab_pairs = Point2Pairs(list(zip(pts_a, pts_b)))
        aTb = Pose2.Align(ab_pairs)
        self.assertIsNotNone(aTb)

        for pt_a, pt_b in zip(pts_a, pts_b):
            pt_a_ = aTb.transformFrom(pt_b)
            np.testing.assert_allclose(pt_a, pt_a_)

        # Matrix version
        A = np.array(pts_a).T
        B = np.array(pts_b).T
        aTb = Pose2.Align(A, B)
        self.assertIsNotNone(aTb)

        for pt_a, pt_b in zip(pts_a, pts_b):
            pt_a_ = aTb.transformFrom(pt_b)
            np.testing.assert_allclose(pt_a, pt_a_)
コード例 #27
0
    def landmark_association(self, superpoint_features, observed_landmarks):
        """ Associate Superpoint feature points with landmark points by matching all superpoint features with projected features.
        Parameters:
            superpoint_features - A Features Object
            observed_landmarks - A Landmarks Object
        Returns:
            observations - [(Point2(), Point3())]

        """
        if superpoint_features.get_length(
        ) == 0 or observed_landmarks.get_length() == 0:
            print("Input data is empty.")
            return [[]]
        if self.l2_threshold < 0.0:
            raise ValueError('\'nn_thresh\' should be non-negative')

        observations = []
        for i, projected_point in enumerate(observed_landmarks.keypoints):
            nearby_feature_indices = []
            min_score = self.l2_threshold
            # Calculate the pixels distances between current superpoint and all the points in the map
            for j, superpoint in enumerate(superpoint_features.keypoints):
                x_diff = abs(superpoint[0] - projected_point[0])
                y_diff = abs(superpoint[1] - projected_point[1])
                if (x_diff < self.x_distance_thresh
                        and y_diff < self.y_distance_thresh):
                    # if((x_diff*x_diff+y_diff*y_diff)**0.5 < 2):
                    nearby_feature_indices.append(j)
            # print("nearby_feature_indices", nearby_feature_indices)
            if nearby_feature_indices == []:
                continue

            for feature_index in nearby_feature_indices:
                # Compute L2 distance. Easy since vectors are unit normalized.
                dmat = np.dot(
                    np.array(superpoint_features.descriptors[feature_index]),
                    np.array(observed_landmarks.descriptors[i]).T)
                dmat = np.sqrt(2 - 2 * np.clip(dmat, -1, 1))
                # Select the minimal L2 distance point
                if dmat < min_score:
                    min_score = dmat
                    key_point = superpoint_features.keypoints[feature_index]
                    landmark = observed_landmarks.landmarks[i]
                    observations.append((Point2(key_point[0], key_point[1]),
                                         Point3(landmark[0], landmark[1],
                                                landmark[2])))

        return observations
コード例 #28
0
    def bundle_adjustment(self):
        """
        Bundle Adjustment.
        Input:
            self._image_features
            self._image_points
        Output:
            result - gtsam optimzation result
        """
        # Initialize factor Graph
        graph = gtsam.NonlinearFactorGraph()

        pose_indices, point_indices = self.create_index_sets()

        initial_estimate = self.create_initial_estimate(pose_indices)

        # Create Projection Factor
        sigma = 1.0
        measurementNoise = gtsam.noiseModel_Isotropic.Sigma(2, sigma)
        for img_idx, features in enumerate(self._image_features):
            for kp_idx, keypoint in enumerate(features.keypoints):
                if self.image_points[img_idx].kp_3d_exist(kp_idx):
                    landmark_id = self.image_points[img_idx].kp_3d(kp_idx)
                    if self._landmark_estimates[
                            landmark_id].seen >= self._min_landmark_seen:
                        key_point = Point2(keypoint[0], keypoint[1])
                        graph.add(
                            gtsam.GenericProjectionFactorCal3_S2(
                                key_point, measurementNoise, X(img_idx),
                                P(landmark_id), self._calibration))

        # Create priors for first two poses
        s = np.radians(60)
        poseNoiseSigmas = np.array([s, s, s, 1, 1, 1])
        posePriorNoise = gtsam.noiseModel_Diagonal.Sigmas(poseNoiseSigmas)
        for idx in (0, 1):
            pose_i = initial_estimate.atPose3(X(idx))
            graph.add(gtsam.PriorFactorPose3(X(idx), pose_i, posePriorNoise))

        # Optimization
        optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate)
        sfm_result = optimizer.optimize()

        return sfm_result, pose_indices, point_indices
コード例 #29
0
def world2robot(robotPose: tuple, objectPose: tuple) -> tuple:
    #transforms objectPose from world frame to robot frame

    #objectPose: pose of object in robot pose
    #robotPose: pose of robot base in world frame

    rx, ry, rth = robotPose
    ox, oy, oth = objectPose
    rPos = Pose2(vector3(
        rx,
        ry,
    ))
    rAngle = Pose2(vector3(0, 0, np.radians(rth)))
    oPose = Pose2(vector3(ox, oy, np.radians(oth)))

    rPose = compose(rPos, rAngle)
    newPose = rPose.transformTo(Point2(ox, oy))
    x, y = newPose.x(), newPose.y()
    print(newPose)
    return x, y, (oth - rth) % 360
コード例 #30
0
ファイル: test_Pose2.py プロジェクト: acxz/gtsam
    def test_align(self) -> None:
        """Ensure estimation of the Pose2 element to align two 2d point clouds succeeds.

        Two point clouds represent horseshoe-shapes of the same size, just rotated and translated:

                |  X---X
                |  |
                |  X---X
        ------------------
                |
                |
              O | O
              | | |
              O---O
        """
        pts_a = [
            Point2(3, 1),
            Point2(1, 1),
            Point2(1, 3),
            Point2(3, 3),
        ]
        pts_b = [
            Point2(1, -3),
            Point2(1, -5),
            Point2(-1, -5),
            Point2(-1, -3),
        ]

        # fmt: on
        ab_pairs = Point2Pairs(list(zip(pts_a, pts_b)))
        bTa = gtsam.align(ab_pairs)
        aTb = bTa.inverse()
        assert aTb is not None

        for pt_a, pt_b in zip(pts_a, pts_b):
            pt_a_ = aTb.transformFrom(pt_b)
            assert np.allclose(pt_a, pt_a_)