Example #1
0
    def test_J(self):
        # Setup
        cam_q_CI = np.array([0.0, 0.0, 0.0, 1.0])
        cam_p_IC = np.array([1.0, 1.0, 1.0])
        q_hat_IG = np.array([0.0, 0.0, 0.0, 1.0])
        N = 1
        J = self.imu_state.J(cam_q_CI, cam_p_IC, q_hat_IG, N)

        # Assert
        C_CI = C(cam_q_CI)
        C_IG = C(q_hat_IG)
        # -- First row --
        self.assertTrue(np_equal(J[0:3, 0:3], C_CI))
        # -- Second row --
        self.assertTrue(np_equal(J[3:6, 0:3], skew(dot(C_IG.T, cam_p_IC))))
        # -- Third row --
        self.assertTrue(np_equal(J[3:6, 12:15], I(3)))

        # Plot matrix
        # debug = True
        debug = False
        if debug:
            ax = plt.subplot(111)
            ax.matshow(J)
            plt.show()
Example #2
0
    def test_F(self):
        w_hat = np.array([1.0, 2.0, 3.0])
        q_hat = np.array([0.0, 0.0, 0.0, 1.0])
        a_hat = np.array([1.0, 2.0, 3.0])
        w_G = np.array([0.1, 0.1, 0.1])

        F = self.imu_state.F(w_hat, q_hat, a_hat, w_G)

        # -- First row --
        self.assertTrue(np_equal(F[0:3, 0:3], -skew(w_hat)))
        self.assertTrue(np_equal(F[0:3, 3:6], -np.eye(3)))
        # -- Third Row --
        self.assertTrue(np_equal(F[6:9, 0:3], dot(-C(q_hat).T, skew(a_hat))))
        self.assertTrue(np_equal(F[6:9, 6:9], -2.0 * skew(w_G)))
        self.assertTrue(np_equal(F[6:9, 9:12], -C(q_hat).T))
        self.assertTrue(np_equal(F[6:9, 12:15], -skewsq(w_G)))
        # -- Fifth Row --
        # self.assertTrue(np_equal(F[12:15, 6:9], np.eye(3)))

        # Plot matrix
        # debug = True
        debug = False
        if debug:
            ax = plt.subplot(111)
            ax.matshow(F)
            plt.show()
Example #3
0
    def J(self, cam_q_CI, cam_p_IC, q_hat_IG, N):
        """Jacobian J matrix

        Parameters
        ----------
        cam_q_CI : np.array
            Rotation from IMU to camera frame
            in quaternion (x, y, z, w)
        cam_p_IC : np.array
            Position of camera in IMU frame
        q_hat_IG : np.array
            Rotation from global to IMU frame
        N : float
            Number of camera states

        Returns
        -------
        J: np.array
            Jacobian matrix J

        """
        C_CI = C(cam_q_CI)
        C_IG = C(q_hat_IG)
        J = zeros((6, 15 + 6 * N))

        # -- First row --
        J[0:3, 0:3] = C_CI
        # -- Second row --
        J[3:6, 0:3] = skew(dot(C_IG.T, cam_p_IC))
        J[3:6, 12:15] = I(3)

        return J
Example #4
0
    def test_calculate_residuals(self):
        # Camera states
        # -- Camera state 0
        p_G_C0 = np.array([0.0, 0.0, 0.0])
        rpy_C0G = np.array([deg2rad(0.0), deg2rad(0.0), deg2rad(0.0)])
        q_C0G = euler2quat(rpy_C0G)
        C_C0G = C(q_C0G)
        # -- Camera state 1
        p_G_C1 = np.array([1.0, 1.0, 0.0])
        rpy_C1G = np.array([deg2rad(0.0), deg2rad(0.0), deg2rad(0.0)])
        q_C1G = euler2quat(rpy_C1G)
        C_C1G = C(q_C1G)

        # Features
        landmark = np.array([0.0, 0.0, 10.0])
        kp1 = self.cam_model.project(landmark, C_C0G, p_G_C0)[0:2]
        kp2 = self.cam_model.project(landmark, C_C1G, p_G_C1)[0:2]

        # Setup feature track
        track_id = 0
        frame_id = 1
        data1 = KeyPoint(kp1, 21)
        data2 = KeyPoint(kp2, 21)
        track = FeatureTrack(track_id, frame_id, data1, data2)

        # Setup track cam states
        self.msckf.augment_state()
        self.msckf.min_track_length = 2
        self.msckf.cam_states[0].p_G = p_G_C0.reshape((3, 1))
        self.msckf.cam_states[0].q_CG = q_C0G.reshape((4, 1))
        self.msckf.cam_states[1].p_G = p_G_C1.reshape((3, 1))
        self.msckf.cam_states[1].q_CG = q_C1G.reshape((4, 1))

        # Test
        self.msckf.calculate_residuals([track])
Example #5
0
    def test_triangulate(self):
        # Camera states
        # -- Camera state 0
        p_G_C0 = np.array([0.0, 0.0, 0.0])
        rpy_C0G = np.array([deg2rad(0.0), deg2rad(0.0), deg2rad(0.0)])
        q_C0G = euler2quat(rpy_C0G)
        C_C0G = C(q_C0G)
        # -- Camera state 1
        p_G_C1 = np.array([1.0, 1.0, 0.0])
        rpy_C1G = np.array([deg2rad(0.0), deg2rad(0.0), deg2rad(0.0)])
        q_C1G = euler2quat(rpy_C1G)
        C_C1G = C(q_C1G)

        # Features
        landmark = np.array([0.0, 0.0, 10.0])
        kp1 = self.cam_model.project(landmark, C_C0G, p_G_C0)[0:2]
        kp2 = self.cam_model.project(landmark, C_C1G, p_G_C1)[0:2]

        # Calculate rotation and translation of first and last camera states
        # -- Obtain rotation and translation from camera 0 to camera 1
        C_C0C1 = dot(C_C0G, C_C1G.T)
        t_C0_C1C0 = dot(C_C0G, (p_G_C1 - p_G_C0))
        # -- Convert from pixel coordinates to image coordinates
        pt1 = self.cam_model.pixel2image(kp1)
        pt2 = self.cam_model.pixel2image(kp2)

        # Triangulate
        p_C0_C1C0, r = self.estimator.triangulate(pt1, pt2, C_C0C1, t_C0_C1C0)

        # Assert
        self.assertTrue(np.allclose(p_C0_C1C0.ravel(), landmark))
    def reprojection_error(self, x, *args):
        """Reprojection error

        Parameters
        ----------
        cam_model : CameraModel
            Camera model
        track : FeatureTrack
            Feature track
        track_cam_states : list of CameraState
            Camera states where feature track was observed

        Returns
        -------
        r : np.array
            Residual

        """
        cam_model, track, track_cam_states = args

        # Calculate residuals
        N = len(track_cam_states)
        r = zeros((2 * N, 1))
        C_C0G = C(track_cam_states[0].q_CG)
        p_G_C0 = track_cam_states[0].p_G

        alpha, beta, rho = x.ravel()

        for i in range(N):
            # Get camera current rotation and translation
            C_CiG = C(track_cam_states[i].q_CG)
            p_G_Ci = track_cam_states[i].p_G

            # Set camera 0 as origin, work out rotation and translation
            # of camera i relative to to camera 0
            C_CiC0 = dot(C_CiG, C_C0G.T)
            t_Ci_CiC0 = dot(C_CiG, (p_G_C0 - p_G_Ci))

            # Project estimated feature location to image plane
            h = dot(C_CiC0, np.array([[alpha], [beta], [1]
                                      ])) + rho * t_Ci_CiC0  # noqa

            # Calculate reprojection error
            # -- Convert measurment to image coordinates
            z = cam_model.pixel2image(track.track[i].pt).reshape((2, 1))
            # -- Convert feature location to normalized coordinates
            z_hat = np.array([[h[0, 0] / h[2, 0]], [h[1, 0] / h[2, 0]]])
            # -- Reprojcetion error
            r[2 * i:(2 * (i + 1))] = z - z_hat

        return r.ravel()
Example #7
0
    def augment_state(self):
        """Augment state

        Augment state and covariance matrix with a copy of the current camera
        pose estimate when a new image is recorded

        """
        x_imu_size = self.imu_state.size
        x_cam_size = self.cam_states[0].size * self.N() if self.N() else 0

        # Camera pose Jacobian
        J = self.imu_state.J(self.ext_q_CI, self.ext_p_IC,
                             self.imu_state.q_IG, self.N())

        # Augment MSCKF covariance matrix (with new camera state)
        X = np.block([[I(x_imu_size + x_cam_size)], [J]])
        P = dot(X, dot(self.P(), X.T))
        self.imu_state.P = P[0:x_imu_size, 0:x_imu_size]
        self.P_cam = P[x_imu_size:, x_imu_size:]
        self.P_imu_cam = P[0:x_imu_size, x_imu_size:]

        # Add new camera state to sliding window by using current IMU pose
        # estimate to calculate camera pose
        # -- Create camera state in global frame
        imu_q_IG = self.imu_state.q_IG
        imu_p_G = self.imu_state.p_G
        cam_q_CG = dot(quatlcomp(self.ext_q_CI), imu_q_IG)
        cam_p_G = imu_p_G + dot(C(imu_q_IG).T, self.ext_p_IC)
        # -- Add camera state to sliding window
        cam_state = CameraState(self.counter_frame_id, cam_q_CG, cam_p_G)
        self.cam_states.append(cam_state)
        self.counter_frame_id += 1
Example #8
0
    def G(self, q_hat):
        """Input Jacobian G matrix

        A matrix that maps the input vector (IMU gaussian noise) to the state
        vector (IMU error state vector), it tells us how the inputs affect the
        state vector.

        Parameters
        ----------
        q_hat : np.array
            Estimated quaternion (x, y, z, w)

        Returns
        -------
        G : np.array - 15x12
            Input jacobian matrix G

        """
        # G matrix
        G = zeros((15, 12))
        # -- First row --
        G[0:3, 0:3] = -I(3)
        # -- Second row --
        G[3:6, 3:6] = I(3)
        # -- Third row --
        G[6:9, 6:9] = -C(q_hat).T
        # -- Fourth row --
        G[9:12, 9:12] = I(3)

        return G
Example #9
0
    def test_sandbox(self):
        p_I_C = np.array([1.0, 2.0, 3.0])
        p_G_I = np.array([1.0, 0.0, 0.0])

        rpy_IG = np.array([deg2rad(0.0), deg2rad(0.0), deg2rad(0.0)])
        q_IG = euler2quat(rpy_IG)
        C_IG = C(q_IG)

        print(p_G_I + np.dot(C_IG, p_I_C))
Example #10
0
    def test_residualize_track(self):
        # Camera states
        # -- Camera state 0
        p_G_C0 = np.array([0.0, 0.0, 0.0])
        rpy_C0G = np.array([deg2rad(0.0), deg2rad(0.0), deg2rad(0.0)])
        q_C0G = euler2quat(rpy_C0G)
        C_C0G = C(q_C0G)
        # -- Camera state 1
        p_G_C1 = np.array([1.0, 1.0, 0.0])
        rpy_C1G = np.array([deg2rad(0.0), deg2rad(0.0), deg2rad(0.0)])
        q_C1G = euler2quat(rpy_C1G)
        C_C1G = C(q_C1G)

        # Features
        landmark = np.array([0.0, 0.0, 10.0])
        kp1 = self.cam_model.project(landmark, C_C0G, p_G_C0)[0:2]
        kp2 = self.cam_model.project(landmark, C_C1G, p_G_C1)[0:2]

        # Setup feature track
        track_id = 0
        frame_id = 1
        data1 = KeyPoint(kp1, 21)
        data2 = KeyPoint(kp2, 21)
        track = FeatureTrack(track_id, frame_id, data1, data2)

        # Setup track cam states
        self.msckf.augment_state()
        self.msckf.min_track_length = 2
        self.msckf.cam_states[0].p_G = p_G_C0.reshape((3, 1))
        self.msckf.cam_states[0].q_CG = q_C0G.reshape((4, 1))
        self.msckf.cam_states[1].p_G = p_G_C1.reshape((3, 1))
        self.msckf.cam_states[1].q_CG = q_C1G.reshape((4, 1))

        print(self.msckf.cam_states[0])
        print(self.msckf.cam_states[1])

        # Test
        # self.msckf.enable_ns_trick = False
        H_o_j, r_o_j, R_o_j = self.msckf.residualize_track(track)

        plt.matshow(H_o_j)
        plt.show()
Example #11
0
    def test_track_residuals(self):
        # Setup track cam states
        # -- Camera state 0
        self.msckf.imu_state.p_G = np.array([0.0, 0.0, 0.0]).reshape((3, 1))
        self.msckf.augment_state()
        # -- Camera state 1
        self.msckf.imu_state.p_G = np.array([1.0, 1.0, 0.0]).reshape((3, 1))
        self.msckf.augment_state()
        # -- Camera state 2
        self.msckf.imu_state.p_G = np.array([2.0, 2.0, 0.0]).reshape((3, 1))
        self.msckf.augment_state()
        # -- Camera state 3
        self.msckf.imu_state.p_G = np.array([3.0, 3.0, 0.0]).reshape((3, 1))
        self.msckf.augment_state()

        # Setup feature
        p_G_f = np.array([[10.0], [1.0], [0.0]])

        # Setup feature track
        C_C2G = C(self.msckf.cam_states[2].q_CG)
        C_C3G = C(self.msckf.cam_states[3].q_CG)
        p_G_C2 = self.msckf.cam_states[2].p_G
        p_G_C3 = self.msckf.cam_states[3].p_G
        kp1 = self.cam_model.project(p_G_f, C_C2G, p_G_C2.reshape((3, 1)))[0:2]
        kp2 = self.cam_model.project(p_G_f, C_C3G, p_G_C3.reshape((3, 1)))[0:2]
        track_id = 0
        frame_id = 3
        data0 = KeyPoint(kp1, 21)
        data1 = KeyPoint(kp2, 21)
        track = FeatureTrack(track_id, frame_id, data0, data1)

        # Test
        track_cam_states = self.msckf.track_cam_states(track)
        r_j = self.msckf.track_residuals(self.cam_model, track,
                                         track_cam_states, p_G_f)

        # Assert
        self.assertEqual(r_j.shape, (4, 1))
        self.assertTrue(np.allclose(r_j, np.zeros((4, 1))))
    def initial_estimate(self, cam_model, track, track_cam_states):
        """Initial feature estimate

        Parameters
        ----------
        cam_model : CameraModel
            Camera model
        track : FeatureTrack
            Feature track
        track_cam_states : list of CameraState
            Camera states where feature track was observed
        debug :
            Debug mode (default: False)

        Returns
        -------
        p_C0_f : np.array - 3x1
            Initial estimated feature position expressed in first camera frame

        """
        # Calculate rotation and translation of first and last camera states
        # -- Get rotation and translation of camera 0 and camera 1
        C_C0G = C(track_cam_states[0].q_CG)
        C_C1G = C(track_cam_states[1].q_CG)
        p_G_C0 = track_cam_states[0].p_G
        p_G_C1 = track_cam_states[1].p_G
        # -- Calculate rotation and translation from camera 0 to camera 1
        C_C0C1 = dot(C_C0G, C_C1G.T)
        t_C1_C0C1 = dot(C_C0G, (p_G_C1 - p_G_C0))
        # -- Convert from pixel coordinates to image coordinates
        pt1 = cam_model.pixel2image(track.track[0].pt).reshape((2, 1))
        pt2 = cam_model.pixel2image(track.track[1].pt).reshape((2, 1))

        # Calculate initial estimate of 3D position
        p_C0_f, residual = self.triangulate(pt1, pt2, C_C0C1, t_C1_C0C1)

        return p_C0_f, residual
Example #13
0
    def F(self, w_hat, q_hat, a_hat, w_G):
        """Transition Jacobian F matrix

        Predicition or transition matrix in an EKF

        Parameters
        ----------
        w_hat : np.array
            Estimated angular velocity
        q_hat : np.array
            Estimated quaternion (x, y, z, w)
        a_hat : np.array
            Estimated acceleration
        w_G : np.array
            Earth's angular velocity (i.e. Earth's rotation)

        Returns
        -------
        F : np.array - 15x15
            Transition jacobian matrix F

        """
        # F matrix
        F = zeros((15, 15))
        # -- First row --
        F[0:3, 0:3] = -skew(w_hat)
        F[0:3, 3:6] = -I(3)
        # -- Third Row --
        F[6:9, 0:3] = -dot(C(q_hat).T, skew(a_hat))
        F[6:9, 6:9] = -2.0 * skew(w_G)
        F[6:9, 9:12] = -C(q_hat).T
        F[6:9, 12:15] = -skewsq(w_G)
        # -- Fifth Row --
        F[12:15, 6:9] = np.ones(3)

        return F
Example #14
0
    def test_estimate(self):
        estimator = DatasetFeatureEstimator()

        # Pinhole Camera model
        image_width = 640
        image_height = 480
        fov = 60
        fx, fy = focal_length(image_width, image_height, fov)
        cx, cy = (image_width / 2.0, image_height / 2.0)
        K = camera_intrinsics(fx, fy, cx, cy)
        cam_model = PinholeCameraModel(image_width, image_height, K)

        # Camera states
        track_cam_states = []
        # -- Camera state 0
        p_G_C0 = np.array([0.0, 0.0, 0.0])
        rpy_C0G = np.array([deg2rad(0.0), deg2rad(0.0), deg2rad(0.0)])
        q_C0G = euler2quat(rpy_C0G)
        C_C0G = C(q_C0G)
        track_cam_states.append(CameraState(0, q_C0G, p_G_C0))

        # -- Camera state 1
        p_G_C1 = np.array([1.0, 0.0, 0.0])
        rpy_C1G = np.array([deg2rad(0.0), deg2rad(0.0), deg2rad(0.0)])
        q_C1G = euler2quat(rpy_C1G)
        C_C1G = C(q_C1G)
        track_cam_states.append(CameraState(1, q_C1G, p_G_C1))

        # Feature track
        p_G_f = np.array([[0.0], [0.0], [10.0]])
        kp0 = KeyPoint(cam_model.project(p_G_f, C_C0G, p_G_C0)[0:2], 0)
        kp1 = KeyPoint(cam_model.project(p_G_f, C_C1G, p_G_C1)[0:2], 0)
        track = FeatureTrack(0, 1, kp0, kp1, ground_truth=p_G_f)
        estimate = estimator.estimate(cam_model, track, track_cam_states)

        self.assertTrue(np.allclose(p_G_f.ravel(), estimate.ravel(), atol=0.1))
Example #15
0
    def update(self, a_m, w_m, dt):
        """IMU state update

        Parameters
        ----------
        a_m : np.array
            Accelerometer measurement
        w_m : np.array
            Gyroscope measurement
        dt : float
            Time difference (s)

        Returns
        -------
        a: np.array
            Corrected accelerometer measurement
        w: np.array
            Corrected gyroscope measurement

        """
        # Calculate new accel and gyro estimates
        # a_hat = (a_m - self.b_a)
        # w_hat = (w_m - self.b_g - dot(C(self.q_IG), self.w_G))
        a_hat = a_m
        w_hat = w_m

        # Build the jacobians F and G
        F = self.F(w_hat, self.q_IG, a_hat, self.w_G)
        G = self.G(self.q_IG)

        # Propagate IMU states
        # -- Orientation
        self.q_IG += dot(0.5 * Omega(w_hat), self.q_IG) * dt
        # self.q_IG = quatnormalize(self.q_IG)
        # -- Velocity
        self.v_G += (dot(C(self.q_IG).T, a_hat) -
                     2 * dot(skew(self.w_G), self.v_G) -
                     dot(skewsq(self.w_G), self.p_G)) * dt
        # self.v_G = self.v_G + (dot(C(self.q_IG).T, a_hat) - 2 * dot(skew(self.w_G), self.v_G) - dot(skewsq(self.w_G), self.p_G) + self.g_G) * dt  # NOQA
        # -- Position
        self.p_G += self.v_G * dt

        # Update covariance
        Phi = I(self.size) + F * dt
        self.P = dot(Phi, dot(self.P, Phi.T)) + dot(G, dot(self.Q, G.T)) * dt
        self.P = enforce_psd(self.P)

        return self.P, Phi
Example #16
0
    def test_G(self):
        q_hat = np.array([0.0, 0.0, 0.0, 1.0]).reshape((4, 1))
        G = self.imu_state.G(q_hat)

        # -- First row --
        self.assertTrue(np_equal(G[0:3, 0:3], -np.eye(3)))
        # -- Second row --
        self.assertTrue(np_equal(G[3:6, 3:6], np.eye(3)))
        # -- Third row --
        self.assertTrue(np_equal(G[6:9, 6:9], -C(q_hat).T))
        # -- Fourth row --
        self.assertTrue(np_equal(G[9:12, 9:12], np.eye(3)))

        # Plot matrix
        # debug = True
        debug = False
        if debug:
            ax = plt.subplot(111)
            ax.matshow(G)
            plt.show()
Example #17
0
    def track_residuals(self, cam_model, track, track_cam_states, p_f_G):
        """Calculate track residual

        Parameters
        ----------
        cam_model : CameraModel
            Camera model
        track : FeatureTrack
            Feature track
        track_cam_states : list of CameraState
            Camera states where feature track was observed
        p_G_f : np.array - 3x1
            Estimated feature position in global frame

        Returns
        -------
        r : np.array - 2Nx1
            Residual over camera states where feature was tracked, where N is
            the length of the feature track

        """
        N = len(track_cam_states)
        r_j = np.zeros((2 * N, 1))

        for i in range(N):
            # Transform feature from global frame to i-th camera frame
            C_CG = C(track_cam_states[i].q_CG)
            p_C_f = dot(C_CG, (p_f_G - track_cam_states[i].p_G))
            cu = p_C_f[0, 0] / p_C_f[2, 0]
            cv = p_C_f[1, 0] / p_C_f[2, 0]
            z_hat = np.array([cu, cv])

            # Transform idealized measurement
            z = cam_model.pixel2image(track.track[i].pt).reshape((1, 2))

            # Calculate reprojection error and add it to the residual vector
            rs = 2 * i
            re = 2 * i + 2
            r_j[rs:re, 0] = (z - z_hat).ravel()

        return r_j
Example #18
0
    def estimate(self, cam_model, track, track_cam_states, plot=False):
        # Get ground truth
        p_G_f = track.ground_truth

        # Convert ground truth expressed in global frame
        # to be expressed in camera 0
        C_C0G = C(track_cam_states[0].q_CG)
        p_G_C0 = track_cam_states[0].p_G
        p_C0_f = dot(C_C0G, (p_G_f - p_G_C0))

        # Create inverse depth params (these are to be optimized)
        alpha = p_C0_f[0, 0] / p_C0_f[2, 0]
        beta = p_C0_f[1, 0] / p_C0_f[2, 0]
        rho = 1.0 / p_C0_f[2, 0]

        # Setup residual calculation
        N = len(track_cam_states)
        r = np.zeros((2 * N, 1))

        # Calculate residuals
        estimates = []
        for i in range(N):
            # Get camera current rotation and translation
            C_CiG = C(track_cam_states[i].q_CG)
            p_G_Ci = track_cam_states[i].p_G

            # Set camera 0 as origin, work out rotation and translation
            # of camera i relative to to camera 0
            C_CiC0 = dot(C_CiG, C_C0G.T)
            t_Ci_CiC0 = dot(C_CiG, (p_G_C0 - p_G_Ci))

            # Project estimated feature location to image plane
            h = dot(C_CiC0, np.array([[alpha], [beta], [1]])) + rho * t_Ci_CiC0
            estimates.append(h)
            # -- Convert feature location to normalized pixel coordinates
            z_hat = np.array([h[0] / h[2], h[1] / h[2]])

            # Calculate reprojection error
            # -- Convert measurment to normalized pixel coordinates
            z = cam_model.pixel2image(track.track[i].pt).reshape((2, 1))
            # -- Reprojection error
            reprojection_error = z - z_hat
            r[2 * i:(2 * (i + 1))] = reprojection_error

        # Plot
        # plot=True
        if plot:
            estimates = np.array(estimates)
            self.plot(track, track_cam_states, estimates)

        # Convert estimated inverse depth params back to feature position in
        # global frame.  See (Eq.38, Mourikis2007 (A Multi-State Constraint
        # Kalman Filter for Vision-aided Inertial Navigation)
        # z = 1 / rho
        # X = np.array([[alpha], [beta], [1.0]])
        # p_G_f = z * dot(C_C0G.T, X) + p_G_C0
        # p_G_f[0] += np.random.normal(0.0, 0.01)
        # p_G_f[1] += np.random.normal(0.0, 0.01)
        # p_G_f[2] += np.random.normal(0.0, 0.01)

        return p_G_f
    def jacobian(self, x, *args):
        """Jacobian

        Parameters
        ----------
        cam_model : CameraModel
            Camera model
        track : FeatureTrack
            Feature track
        track_cam_states : list of CameraState
            Camera states where feature track was observed

        Returns
        -------
        J : np.array
            Jacobian

        """
        cam_model, track, track_cam_states = args

        N = len(track_cam_states)
        J = zeros((2 * N, 3))

        C_C0G = C(track_cam_states[0].q_CG)
        p_G_C0 = track_cam_states[0].p_G
        alpha, beta, rho = x.ravel()

        # Form the Jacobian
        for i in range(N):
            # Get camera current rotation and translation
            C_CiG = C(track_cam_states[i].q_CG)
            p_G_Ci = track_cam_states[i].p_G

            # Set camera 0 as origin, work out rotation and translation
            # of camera i relative to to camera 0
            C_CiC0 = dot(C_CiG, C_C0G.T)
            t_Ci_CiC0 = dot(C_CiG, (p_G_C0 - p_G_Ci))

            # Project estimated feature location to image plane
            h = dot(C_CiC0, np.array([[alpha], [beta], [1]
                                      ])) + rho * t_Ci_CiC0  # noqa

            drdalpha = np.array([
                -C_CiC0[0, 0] / h[2, 0] +
                (h[0, 0] / h[2, 0]**2) * C_CiC0[2, 0],  # noqa
                -C_CiC0[1, 0] / h[2, 0] +
                (h[1, 0] / h[2, 0]**2) * C_CiC0[2, 0]  # noqa
            ])
            drdbeta = np.array([
                -C_CiC0[0, 1] / h[2, 0] +
                (h[0, 0] / h[2, 0]**2) * C_CiC0[2, 1],  # noqa
                -C_CiC0[1, 1] / h[2, 0] +
                (h[1, 0] / h[2, 0]**2) * C_CiC0[2, 1]  # noqa
            ])
            drdrho = np.array([
                -t_Ci_CiC0[0, 0] / h[2, 0] +
                (h[0, 0] / h[2, 0]**2) * t_Ci_CiC0[2, 0],  # noqa
                -t_Ci_CiC0[1, 0] / h[2, 0] +
                (h[1, 0] / h[2, 0]**2) * t_Ci_CiC0[2, 0]  # noqa
            ])
            J[2 * i:(2 * (i + 1)), 0] = drdalpha.ravel()
            J[2 * i:(2 * (i + 1)), 1] = drdbeta.ravel()
            J[2 * i:(2 * (i + 1)), 2] = drdrho.ravel()

        return J
Example #20
0
    def H(self, track, track_cam_states, p_G_f):
        """Form the Jacobian measurement matrix

        - $H^{(j)}_x$ of j-th feature track with respect to state $X$
        - $H^{(j)}_f$ of j-th feature track with respect to feature position

        Parameters
        ----------
        track : FeatureTrack
            Feature track of length M
        track_cam_states : list of CameraState
            N Camera states
        p_G_f : np.array
            Feature position in global frame

        Returns
        -------
        H_x_j: np.matrix
            Measurement jacobian matrix w.r.t state (size: 2*M x (15+6)N)

        """
        x_imu_size = self.imu_state.size      # Size of imu state
        x_cam_size = self.cam_states[0].size  # Size of cam state

        N = self.N()    # Number of camera states
        M = track.tracked_length()  # Length of feature track

        # Measurement jacobian w.r.t feature
        H_f_j = zeros((2 * M, 3))

        # Measurement jacobian w.r.t state
        H_x_j = zeros((2 * M, x_imu_size + x_cam_size * N))

        # Pose index
        pose_idx = track.frame_start - self.cam_states[0].frame_id
        assert track_cam_states[0].frame_id == track.frame_start
        assert track_cam_states[-1].frame_id == track.frame_end

        # Form measurement jacobians
        for i in range(M):
            # Feature position in camera frame
            C_CG = C(track_cam_states[i].q_CG)
            p_G_C = track_cam_states[i].p_G
            p_C_f = dot(C_CG, (p_G_f - p_G_C))
            X, Y, Z = p_C_f.ravel()

            # dh / dg
            dhdg = (1.0 / Z) * np.array([[1.0, 0.0, -X / Z],
                                         [0.0, 1.0, -Y / Z]])

            # Row start and end index
            rs = 2 * i
            re = 2 * i + 2

            # Column start and end index
            cs_dhdq = x_imu_size + (x_cam_size * pose_idx)
            ce_dhdq = x_imu_size + (x_cam_size * pose_idx) + 3

            cs_dhdp = x_imu_size + (x_cam_size * pose_idx) + 3
            ce_dhdp = x_imu_size + (x_cam_size * pose_idx) + 6

            # H_f_j measurement jacobian w.r.t feature
            H_f_j[rs:re, :] = dot(dhdg, C_CG)

            # H_x_j measurement jacobian w.r.t state
            H_x_j[rs:re, cs_dhdq:ce_dhdq] = dot(dhdg, skew(p_C_f))
            H_x_j[rs:re, cs_dhdp:ce_dhdp] = dot(-dhdg, C_CG)

            # Update pose_idx
            pose_idx += 1

        return H_f_j, H_x_j
    def estimate(self, cam_model, track, track_cam_states, debug=False):
        """Estimate feature 3D location by optimizing over inverse depth
        parameterization using Gauss Newton Optimization

        Parameters
        ----------
        cam_model : CameraModel
            Camera model
        track : FeatureTrack
            Feature track
        track_cam_states : list of CameraState
            Camera states where feature track was observed
        debug :
            Debug mode (default: False)

        Returns
        -------
        p_G_f : np.array - 3x1
            Estimated feature position in global frame

        """
        # # Calculate initial estimate of 3D position
        p_C0_f, residual = self.initial_estimate(cam_model, track,
                                                 track_cam_states)

        # Get ground truth
        if track.ground_truth is not None:
            return track.ground_truth

        # Convert ground truth expressed in global frame
        # to be expressed in camera 0
        # C_C0G = C(track_cam_states[0].q_CG)
        # p_G_C0 = track_cam_states[0].p_G
        # p_C0_f = dot(C_C0G, (p_G_f - p_G_C0))

        # print("true: ", p_C0_f.ravel())

        # Create inverse depth params (these are to be optimized)
        alpha = p_C0_f[0, 0] / p_C0_f[2, 0]
        beta = p_C0_f[1, 0] / p_C0_f[2, 0]
        rho = 1.0 / p_C0_f[2, 0]
        theta_k = np.array([alpha, beta, rho])

        # z = 1 / rho
        # X = np.array([[alpha], [beta], [1.0]])
        # C_C0G = C(track_cam_states[0].q_CG)
        # p_G_C0 = track_cam_states[0].p_G
        # init = z * dot(C_C0G.T, X) + p_G_C0

        # Optimize feature location
        args = (cam_model, track, track_cam_states)
        result = least_squares(self.reprojection_error,
                               theta_k,
                               args=args,
                               jac=self.jacobian,
                               verbose=1,
                               method="lm")

        # if result.cost > 1e-4:
        #     return None

        # Calculate feature position in global frame
        alpha, beta, rho = result.x.ravel()
        z = 1 / rho
        X = np.array([[alpha], [beta], [1.0]])
        C_C0G = C(track_cam_states[0].q_CG)
        p_G_C0 = track_cam_states[0].p_G
        p_G_f = z * dot(C_C0G.T, X) + p_G_C0

        # print("ground truth: ", track.ground_truth.ravel())
        # print("cost: ", result.cost)
        # print("initial: ", init.ravel())
        # print("final: ", p_G_f.ravel())

        # p_C_f = dot(C_C0G, (p_G_f - p_G_C0))
        # if p_C_f[2, 0] < 2.0:
        #     return None
        # if p_C_f[2, 0] > 200.0:
        #     return None

        return p_G_f
Example #22
0
    def test_estimate2(self):
        # Load RAW KITTI dataset
        data = RawSequence(RAW_DATASET, "2011_09_26", "0001")
        # data = RawSequence(RAW_DATASET, "2011_09_26", "0046")
        # data = RawSequence(RAW_DATASET, "2011_09_26", "0005")
        K = data.calib_cam2cam["P_rect_00"].reshape((3, 4))[0:3, 0:3]
        cam_model = PinholeCameraModel(1242, 375, K)

        # Initialize feature tracker
        img = cv2.imread(data.image_00_files[0])
        tracker = FeatureTracker()
        tracker.update(img)

        # Setup plot
        features = None
        features_plot = None

        pos_data = data.get_local_position(0)

        debug = False
        if debug:
            fig = plt.figure()
            plt.ion()
            ax = fig.add_subplot(111)
            pos_plot = ax.plot(pos_data[0], pos_data[1],
                               marker=".", color="blue")[0]
            # ax.set_xlim([-60.0, 5.0])
            # ax.set_ylim([-60.0, 5.0])
            ax.set_xlim([0.0, 50.0])
            ax.set_ylim([-100.0, 0.0])
            fig.canvas.draw()
            plt.show(block=False)

        # Setup feature tracks
        tracks = []
        for i in range(1, 10):
            # Track features
            img = cv2.imread(data.image_00_files[i])
            tracker.update(img, True)
            if cv2.waitKey(1) == 113:
                exit(0)
            tracks = tracker.remove_lost_tracks()

            # Loop feature tracks
            p_G_f = None
            for track in tracks:
                if track.tracked_length() < 8:
                    continue

                # Setup feature track camera states
                track_cam_states = []
                for j in range(track.tracked_length()):
                    frame_id = track.frame_start + j

                    imu_q_IG = euler2quat(data.get_attitude(frame_id))
                    imu_p_G = data.get_local_position(frame_id)

                    ext_q_CI = np.array([0.5, -0.5, 0.5, -0.5])
                    cam_q_IG = dot(quatlcomp(ext_q_CI), imu_q_IG)
                    cam_p_G = imu_p_G

                    cam_state = CameraState(frame_id, cam_q_IG, cam_p_G)
                    track_cam_states.append(cam_state)

                # Estimate feature track
                p_G_f = self.estimator.estimate(cam_model,
                                                track,
                                                track_cam_states)
                if p_G_f is not None:
                    C_CG = C(track_cam_states[-1].q_CG)
                    p_G_C = track_cam_states[-1].p_G
                    p_C_f = dot(C_CG, (p_G_f - p_G_C))

                    print("p_G_f: ", p_G_f.ravel())
                    print("p_C_f: ", p_C_f.ravel())
                print()

            # Plot
            pos = data.get_local_position(i)
            pos_data = np.hstack((pos_data, pos))

            if debug:
                if features is None and p_G_f is not None:
                    features = p_G_f
                    features_plot = ax.plot(p_G_f[0], p_G_f[1],
                                            marker="x", color="red", ls='')[0]
                elif p_G_f is not None:
                    features = np.hstack((features, p_G_f))
                    features_plot.set_xdata(features[0, :])
                    features_plot.set_ydata(features[1, :])

                    pos_plot.set_xdata(pos_data[0, :])
                    pos_plot.set_ydata(pos_data[1, :])

                ax.relim()
                ax.autoscale_view(True, True, True)
                fig.canvas.draw()
Example #23
0
    def test_estimate(self):
        nb_features = 100
        bounds = {
            "x": {"min": 5.0, "max": 10.0},
            "y": {"min": -1.0, "max": 1.0},
            "z": {"min": -1.0, "max": 1.0}
        }
        features = rand3dfeatures(nb_features, bounds)

        dt = 0.1
        p_G = np.array([0.0, 0.0, 0.0])
        v_G = np.array([0.1, 0.0, 0.0])
        q_CG = np.array([0.5, -0.5, 0.5, -0.5])

        # Setup camera states
        track_cam_states = []
        for i in range(10):
            p_G = p_G + v_G * dt
            track_cam_states.append(CameraState(i, q_CG, p_G))

        # Feature Track
        track_length = 10
        start = 0
        end = track_length

        for i in range(10):
            feature_idx = random.randint(0, features.shape[1] - 1)
            feature = T_camera_global * features[:, feature_idx]
            print("feature in global frame:",
                  (T_global_camera * feature).ravel())

            R_C0G = dot(R_global_camera, C(track_cam_states[0].q_CG))
            R_C1G = dot(R_global_camera, C(track_cam_states[1].q_CG))
            p_C_C0 = T_camera_global * track_cam_states[0].p_G
            p_C_C1 = T_camera_global * track_cam_states[1].p_G

            kp1 = self.cam_model.project(feature, R_C0G, p_C_C0)
            kp2 = self.cam_model.project(feature, R_C1G, p_C_C1)
            kp1 = KeyPoint(kp1.ravel()[:2], 21)
            kp2 = KeyPoint(kp2.ravel()[:2], 21)
            track = FeatureTrack(start, end, kp1, kp2)
            track.ground_truth = T_global_camera * feature

            for i in range(2, track_length):
                R_CG = dot(R_global_camera, C(track_cam_states[i].q_CG))
                p_C_Ci = T_camera_global * track_cam_states[i].p_G
                kp = self.cam_model.project(feature, R_CG, p_C_Ci)
                kp = KeyPoint(kp.ravel()[:2], 21)
                # kp[0] += np.random.normal(0, 0.1)
                # kp[1] += np.random.normal(0, 0.1)
                track.update(i, kp)

            # Estimate feature
            p_G_f = self.estimator.estimate(self.cam_model,
                                            track,
                                            track_cam_states)
            print("estimation: ", p_G_f.ravel())
            print()

            # Debug
            # debug = False
            # debug = True
            # if debug:
            #     C_CG = C(track_cam_states[-1].q_CG)
            #     p_G_C = track_cam_states[-1].p_G
            #     p_C_f = dot(C_CG, (p_G_f - p_G_C))

            # Assert
            feature_G = T_global_camera * feature
            self.assertTrue(abs(p_G_f[0, 0] - feature_G[0]) < 0.1)
            self.assertTrue(abs(p_G_f[1, 0] - feature_G[1]) < 0.1)
            self.assertTrue(abs(p_G_f[2, 0] - feature_G[2]) < 0.1)