示例#1
0
    def test_H(self):
        # Setup feature track
        track_id = 0
        frame_id = 3
        data0 = KeyPoint(np.array([0.0, 0.0]), 21)
        data1 = KeyPoint(np.array([0.0, 0.0]), 21)
        track = FeatureTrack(track_id, frame_id, data0, data1)

        # Setup track cam states
        self.msckf.augment_state()
        self.msckf.augment_state()
        self.msckf.augment_state()
        track_cam_states = self.msckf.track_cam_states(track)

        # Feature position
        p_G_f = np.array([[1.0], [2.0], [3.0]])

        # Test
        H_f_j, H_x_j = self.msckf.H(track, track_cam_states, p_G_f)

        # Assert
        self.assertEqual(H_f_j.shape, (4, 3))
        self.assertEqual(H_x_j.shape, (4, 39))

        # Plot matrix
        debug = True
        # debug = False
        if debug:
            plt.matshow(H_f_j)
            plt.show()
            plt.matshow(H_x_j)
            plt.show()
示例#2
0
    def test_calculate_residuals(self):
        # Camera states
        # -- Camera state 0
        p_G_C0 = np.array([0.0, 0.0, 0.0])
        rpy_C0G = np.array([deg2rad(0.0), deg2rad(0.0), deg2rad(0.0)])
        q_C0G = euler2quat(rpy_C0G)
        C_C0G = C(q_C0G)
        # -- Camera state 1
        p_G_C1 = np.array([1.0, 1.0, 0.0])
        rpy_C1G = np.array([deg2rad(0.0), deg2rad(0.0), deg2rad(0.0)])
        q_C1G = euler2quat(rpy_C1G)
        C_C1G = C(q_C1G)

        # Features
        landmark = np.array([0.0, 0.0, 10.0])
        kp1 = self.cam_model.project(landmark, C_C0G, p_G_C0)[0:2]
        kp2 = self.cam_model.project(landmark, C_C1G, p_G_C1)[0:2]

        # Setup feature track
        track_id = 0
        frame_id = 1
        data1 = KeyPoint(kp1, 21)
        data2 = KeyPoint(kp2, 21)
        track = FeatureTrack(track_id, frame_id, data1, data2)

        # Setup track cam states
        self.msckf.augment_state()
        self.msckf.min_track_length = 2
        self.msckf.cam_states[0].p_G = p_G_C0.reshape((3, 1))
        self.msckf.cam_states[0].q_CG = q_C0G.reshape((4, 1))
        self.msckf.cam_states[1].p_G = p_G_C1.reshape((3, 1))
        self.msckf.cam_states[1].q_CG = q_C1G.reshape((4, 1))

        # Test
        self.msckf.calculate_residuals([track])
示例#3
0
    def extract_descriptors(self, frame, kps):
        """Extract feature descriptors

        Parameters
        ----------
        frame : np.array
            Image frame
        kps: list of KeyPoint
            Key points

        Returns
        -------
        des : list of np.array
            Descriptors

        """
        cv_kps = convert2cvkeypoints(kps)
        cv_kps, des = self.orb.compute(frame, cv_kps)

        # Convert OpenCV KeyPoint to KeyPoint
        kps = []
        for cv_kp in cv_kps:
            kps.append(KeyPoint(cv_kp.pt,
                                cv_kp.size,
                                cv_kp.angle,
                                cv_kp.response,
                                cv_kp.octave,
                                cv_kp.class_id))

        return kps, des
示例#4
0
    def detect_keypoints(self, frame):
        """Detect keypoints

        Parameters
        ----------
        frame : np.array
            Image frame

        Returns
        -------
        kps : list of KeyPoint
            Keypoints

        """
        # Detect keypoints
        keypoints = self.orb.detect(frame, None)

        # Convert OpenCV KeyPoint to KeyPoint
        kps = []
        for i in range(len(keypoints)):
            kp = keypoints[i]
            kps.append(KeyPoint(kp.pt,
                                kp.size,
                                kp.angle,
                                kp.response,
                                kp.octave))

        return kps
示例#5
0
    def test_residualize_track(self):
        # Camera states
        # -- Camera state 0
        p_G_C0 = np.array([0.0, 0.0, 0.0])
        rpy_C0G = np.array([deg2rad(0.0), deg2rad(0.0), deg2rad(0.0)])
        q_C0G = euler2quat(rpy_C0G)
        C_C0G = C(q_C0G)
        # -- Camera state 1
        p_G_C1 = np.array([1.0, 1.0, 0.0])
        rpy_C1G = np.array([deg2rad(0.0), deg2rad(0.0), deg2rad(0.0)])
        q_C1G = euler2quat(rpy_C1G)
        C_C1G = C(q_C1G)

        # Features
        landmark = np.array([0.0, 0.0, 10.0])
        kp1 = self.cam_model.project(landmark, C_C0G, p_G_C0)[0:2]
        kp2 = self.cam_model.project(landmark, C_C1G, p_G_C1)[0:2]

        # Setup feature track
        track_id = 0
        frame_id = 1
        data1 = KeyPoint(kp1, 21)
        data2 = KeyPoint(kp2, 21)
        track = FeatureTrack(track_id, frame_id, data1, data2)

        # Setup track cam states
        self.msckf.augment_state()
        self.msckf.min_track_length = 2
        self.msckf.cam_states[0].p_G = p_G_C0.reshape((3, 1))
        self.msckf.cam_states[0].q_CG = q_C0G.reshape((4, 1))
        self.msckf.cam_states[1].p_G = p_G_C1.reshape((3, 1))
        self.msckf.cam_states[1].q_CG = q_C1G.reshape((4, 1))

        print(self.msckf.cam_states[0])
        print(self.msckf.cam_states[1])

        # Test
        # self.msckf.enable_ns_trick = False
        H_o_j, r_o_j, R_o_j = self.msckf.residualize_track(track)

        plt.matshow(H_o_j)
        plt.show()
示例#6
0
    def test_track_cam_states(self):
        # Setup feature track
        track_id = 0
        frame_id = 3
        data0 = KeyPoint(np.array([0.0, 0.0]), 21)
        data1 = KeyPoint(np.array([0.0, 0.0]), 21)
        track = FeatureTrack(track_id, frame_id, data0, data1)

        # Push dummy camera states into MSCKF
        self.msckf.augment_state()
        self.msckf.augment_state()
        self.msckf.augment_state()
        self.msckf.augment_state()

        # Test
        track_cam_states = self.msckf.track_cam_states(track)

        # Assert
        self.assertEqual(len(track_cam_states), track.tracked_length())
        self.assertEqual(track_cam_states[0].frame_id, track.frame_start)
        self.assertEqual(track_cam_states[1].frame_id, track.frame_end)
示例#7
0
    def test_track_residuals(self):
        # Setup track cam states
        # -- Camera state 0
        self.msckf.imu_state.p_G = np.array([0.0, 0.0, 0.0]).reshape((3, 1))
        self.msckf.augment_state()
        # -- Camera state 1
        self.msckf.imu_state.p_G = np.array([1.0, 1.0, 0.0]).reshape((3, 1))
        self.msckf.augment_state()
        # -- Camera state 2
        self.msckf.imu_state.p_G = np.array([2.0, 2.0, 0.0]).reshape((3, 1))
        self.msckf.augment_state()
        # -- Camera state 3
        self.msckf.imu_state.p_G = np.array([3.0, 3.0, 0.0]).reshape((3, 1))
        self.msckf.augment_state()

        # Setup feature
        p_G_f = np.array([[10.0], [1.0], [0.0]])

        # Setup feature track
        C_C2G = C(self.msckf.cam_states[2].q_CG)
        C_C3G = C(self.msckf.cam_states[3].q_CG)
        p_G_C2 = self.msckf.cam_states[2].p_G
        p_G_C3 = self.msckf.cam_states[3].p_G
        kp1 = self.cam_model.project(p_G_f, C_C2G, p_G_C2.reshape((3, 1)))[0:2]
        kp2 = self.cam_model.project(p_G_f, C_C3G, p_G_C3.reshape((3, 1)))[0:2]
        track_id = 0
        frame_id = 3
        data0 = KeyPoint(kp1, 21)
        data1 = KeyPoint(kp2, 21)
        track = FeatureTrack(track_id, frame_id, data0, data1)

        # Test
        track_cam_states = self.msckf.track_cam_states(track)
        r_j = self.msckf.track_residuals(self.cam_model, track,
                                         track_cam_states, p_G_f)

        # Assert
        self.assertEqual(r_j.shape, (4, 1))
        self.assertTrue(np.allclose(r_j, np.zeros((4, 1))))
示例#8
0
    def test_estimate(self):
        estimator = DatasetFeatureEstimator()

        # Pinhole Camera model
        image_width = 640
        image_height = 480
        fov = 60
        fx, fy = focal_length(image_width, image_height, fov)
        cx, cy = (image_width / 2.0, image_height / 2.0)
        K = camera_intrinsics(fx, fy, cx, cy)
        cam_model = PinholeCameraModel(image_width, image_height, K)

        # Camera states
        track_cam_states = []
        # -- Camera state 0
        p_G_C0 = np.array([0.0, 0.0, 0.0])
        rpy_C0G = np.array([deg2rad(0.0), deg2rad(0.0), deg2rad(0.0)])
        q_C0G = euler2quat(rpy_C0G)
        C_C0G = C(q_C0G)
        track_cam_states.append(CameraState(0, q_C0G, p_G_C0))

        # -- Camera state 1
        p_G_C1 = np.array([1.0, 0.0, 0.0])
        rpy_C1G = np.array([deg2rad(0.0), deg2rad(0.0), deg2rad(0.0)])
        q_C1G = euler2quat(rpy_C1G)
        C_C1G = C(q_C1G)
        track_cam_states.append(CameraState(1, q_C1G, p_G_C1))

        # Feature track
        p_G_f = np.array([[0.0], [0.0], [10.0]])
        kp0 = KeyPoint(cam_model.project(p_G_f, C_C0G, p_G_C0)[0:2], 0)
        kp1 = KeyPoint(cam_model.project(p_G_f, C_C1G, p_G_C1)[0:2], 0)
        track = FeatureTrack(0, 1, kp0, kp1, ground_truth=p_G_f)
        estimate = estimator.estimate(cam_model, track, track_cam_states)

        self.assertTrue(np.allclose(p_G_f.ravel(), estimate.ravel(), atol=0.1))
示例#9
0
    def track_features(self, image_ref, image_cur):
        """Track Features

        Parameters
        ----------
        image_ref : np.array
            Reference image
        image_cur : np.array
            Current image

        """
        # Re-detect new feature points if too few
        if len(self.tracks_tracking) < self.min_nb_features:
            self.tracks_tracking = []  # reset alive feature tracks
            self.detect(image_ref)

        # LK parameters
        win_size = (21, 21)
        max_level = 2
        criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 30, 0.03)

        # Convert reference keypoints to numpy array
        self.kp_ref = self.last_keypoints()

        # Perform LK tracking
        lk_params = {
            "winSize": win_size,
            "maxLevel": max_level,
            "criteria": criteria
        }
        self.kp_cur, statuses, err = cv2.calcOpticalFlowPyrLK(
            image_ref, image_cur, self.kp_ref, None, **lk_params)

        # Filter out bad matches (choose only good keypoints)
        status = statuses.reshape(statuses.shape[0])
        still_alive = []
        for i in range(len(status)):
            if status[i] == 1:
                track_id = self.tracks_tracking[i]
                still_alive.append(track_id)
                kp = KeyPoint(self.kp_cur[i], 0)
                self.tracks[track_id].update(self.frame_id, kp)

        self.tracks_tracking = still_alive
示例#10
0
    def detect(self, pos, rpy):
        """Update tracker with current image

        Parameters
        ----------
        pos : np.array
            Robot position (x, y, z)

        rpy : np.array
            Robot attitude (roll, pitch, yaw)

        """
        # Convert both euler angles and translation from NWU to EDN
        # Note: We assume here that we are using a robot model
        # where x = [pos_x, pos_y, theta] in world frame
        rpy = T_camera_global * rpy  # Motion model only modelled yaw
        t = T_camera_global * pos  # Translation

        # Obtain list of features observed at this time step
        fea_cur = self.cam_model.observed_features(self.features, rpy, t)
        if fea_cur is None:
            return

        # Remove lost feature tracks
        feature_ids_cur = [feature_id for _, feature_id in list(fea_cur)]
        for feature_id in list(self.features_tracking):
            if feature_id not in feature_ids_cur:
                self.remove_feature_track(feature_id)

        # Add or update feature tracks
        for feature in fea_cur:
            kp, feature_id = feature
            kp = KeyPoint(kp, 0)

            if feature_id not in self.features_tracking:
                self.add_feature_track(feature_id, kp, pos, rpy)
            else:
                self.update_feature_track(feature_id, kp, pos, rpy)

        self.debug("tracks_tracking: {}".format(self.tracks_tracking))
        self.debug("features_tracking: {}\n".format(self.features_tracking))
        self.counter_frame_id += 1
示例#11
0
    def detect_keypoints(self, frame):
        """Detect

        Parameters
        ----------
        frame : np.array
            Image frame
        debug : bool
            Debug mode (Default value = False)

        Returns
        -------
        results : list of KeyPoint
            List of KeyPoints

        """
        # Detect
        keypoints = self.detector.detect(frame)
        results = [KeyPoint(kp.pt, kp.size) for kp in keypoints]

        return results
示例#12
0
    def test_estimate(self):
        nb_features = 100
        bounds = {
            "x": {"min": 5.0, "max": 10.0},
            "y": {"min": -1.0, "max": 1.0},
            "z": {"min": -1.0, "max": 1.0}
        }
        features = rand3dfeatures(nb_features, bounds)

        dt = 0.1
        p_G = np.array([0.0, 0.0, 0.0])
        v_G = np.array([0.1, 0.0, 0.0])
        q_CG = np.array([0.5, -0.5, 0.5, -0.5])

        # Setup camera states
        track_cam_states = []
        for i in range(10):
            p_G = p_G + v_G * dt
            track_cam_states.append(CameraState(i, q_CG, p_G))

        # Feature Track
        track_length = 10
        start = 0
        end = track_length

        for i in range(10):
            feature_idx = random.randint(0, features.shape[1] - 1)
            feature = T_camera_global * features[:, feature_idx]
            print("feature in global frame:",
                  (T_global_camera * feature).ravel())

            R_C0G = dot(R_global_camera, C(track_cam_states[0].q_CG))
            R_C1G = dot(R_global_camera, C(track_cam_states[1].q_CG))
            p_C_C0 = T_camera_global * track_cam_states[0].p_G
            p_C_C1 = T_camera_global * track_cam_states[1].p_G

            kp1 = self.cam_model.project(feature, R_C0G, p_C_C0)
            kp2 = self.cam_model.project(feature, R_C1G, p_C_C1)
            kp1 = KeyPoint(kp1.ravel()[:2], 21)
            kp2 = KeyPoint(kp2.ravel()[:2], 21)
            track = FeatureTrack(start, end, kp1, kp2)
            track.ground_truth = T_global_camera * feature

            for i in range(2, track_length):
                R_CG = dot(R_global_camera, C(track_cam_states[i].q_CG))
                p_C_Ci = T_camera_global * track_cam_states[i].p_G
                kp = self.cam_model.project(feature, R_CG, p_C_Ci)
                kp = KeyPoint(kp.ravel()[:2], 21)
                # kp[0] += np.random.normal(0, 0.1)
                # kp[1] += np.random.normal(0, 0.1)
                track.update(i, kp)

            # Estimate feature
            p_G_f = self.estimator.estimate(self.cam_model,
                                            track,
                                            track_cam_states)
            print("estimation: ", p_G_f.ravel())
            print()

            # Debug
            # debug = False
            # debug = True
            # if debug:
            #     C_CG = C(track_cam_states[-1].q_CG)
            #     p_G_C = track_cam_states[-1].p_G
            #     p_C_f = dot(C_CG, (p_G_f - p_G_C))

            # Assert
            feature_G = T_global_camera * feature
            self.assertTrue(abs(p_G_f[0, 0] - feature_G[0]) < 0.1)
            self.assertTrue(abs(p_G_f[1, 0] - feature_G[1]) < 0.1)
            self.assertTrue(abs(p_G_f[2, 0] - feature_G[2]) < 0.1)
示例#13
0
 def test_init(self):
     kp = KeyPoint([1, 2], 31)
     self.assertEqual(kp.pt[0], 1)
     self.assertEqual(kp.pt[1], 2)
     self.assertEqual(kp.size, 31)