Ejemplo n.º 1
0
    def boxplus(self, x, dx):
        # assert  x.shape == (xZ+5*self.len_features, 1) and dx.shape == (dxZ+3*self.len_features, 1)

        out = np.zeros((xZ + 5 * self.len_features, 1))

        # Add position and velocity vector states
        out[xPOS:xPOS + 3] = x[xPOS:xPOS + 3] + dx[xPOS:xPOS + 3]
        out[xVEL:xVEL + 3] = x[xVEL:xVEL + 3] + dx[xVEL:xVEL + 3]

        # Add attitude quaternion state on the manifold
        out[xATT:xATT + 4] = (Quaternion(x[xATT:xATT + 4]) +
                              dx[dxATT:dxATT + 3]).elements

        # add bias and drag term vector states
        out[xB_A:xB_A + 3] = x[xB_A:xB_A + 3] + dx[dxB_A:dxB_A + 3]
        out[xB_G:xB_G + 3] = x[xB_G:xB_G + 3] + dx[dxB_G:dxB_G + 3]
        out[xMU] = x[xMU] + dx[dxMU]

        # add Feature quaternion states
        for i in range(self.len_features):
            xFEAT = xZ + i * 5
            xRHO = xZ + i * 5 + 4
            dxFEAT = dxZ + 3 * i
            dxRHO = dxZ + 3 * i + 2
            dqzeta = dx[dxFEAT:
                        dxRHO, :]  # 2-vector which is the derivative of qzeta
            qzeta = Quaternion(x[xFEAT:xFEAT + 4, :])  # 4-vector quaternion

            # Feature Quaternion States (use manifold)
            out[xFEAT:xRHO, :] = q_feat_boxplus(qzeta, dqzeta).elements

            # Inverse Depth State
            out[xRHO, :] = x[xRHO] + dx[dxRHO]
        return out
def plot_3d_trajectory(ground_truth,
                       feat_time=None,
                       qzetas=None,
                       depths=None,
                       ids=None,
                       p_b_c=np.zeros((3, 1)),
                       q_b_c=Quaternion.Identity(),
                       fighandle=None):
    pos_time = ground_truth[:, 0]
    position = ground_truth[:, 1:4]
    orientation = ground_truth[:, 4:8]
    if fighandle is not None:
        ax = fighandle
    else:
        plt.figure(2, figsize=(14, 10))
        ax = plt.subplot(111, projection='3d')
    plt.plot(position[:1, 0], position[:1, 1], position[:1, 2], 'kx')
    plt.plot(position[:, 0], position[:, 1], position[:, 2], 'c-')
    ax.set_xlabel('X axis')
    ax.set_ylabel('Y axis')
    ax.set_zlabel('Z axis')
    e_x = 0.1 * np.array([[1., 0, 0]]).T
    e_y = 0.1 * np.array([[0, 1., 0]]).T
    e_z = 0.1 * np.array([[0, 0, 1.]]).T
    if qzetas is not None:
        id_indices = np.unique(np.hstack(ids))
        colors = get_colors(len(id_indices), plt.cm.jet)
    for i in range(len(position) / 25):
        j = i * 25
        q_i_b = Quaternion(orientation[j, :, None])
        body_pos = position[j, :, None]
        x_end = body_pos + q_i_b.rot(e_x)
        y_end = body_pos + q_i_b.rot(e_y)
        z_end = body_pos + q_i_b.rot(e_z)
        plt.plot([body_pos[0, 0], x_end[0, 0]], [body_pos[1, 0], x_end[1, 0]],
                 [body_pos[2, 0], x_end[2, 0]], 'r-')
        plt.plot([body_pos[0, 0], y_end[0, 0]], [body_pos[1, 0], y_end[1, 0]],
                 [body_pos[2, 0], y_end[2, 0]], 'g-')
        plt.plot([body_pos[0, 0], z_end[0, 0]], [body_pos[1, 0], z_end[1, 0]],
                 [body_pos[2, 0], z_end[2, 0]], 'b-')
        if feat_time is not None:
            feat_idx = np.argmin(feat_time < pos_time[j])
            camera_pos = body_pos + q_i_b.invrot(p_b_c)
            for qz, d, id in zip(qzetas[feat_idx], depths[feat_idx],
                                 ids[feat_idx]):
                if np.isfinite(qz).all() and np.isfinite(d):
                    q_c_z = Quaternion(qz[:, None])
                    zeta_end = camera_pos + q_i_b.rot(
                        q_b_c.rot(q_c_z.rot(10. * e_z * d)))
                    plt.plot([camera_pos[0, 0], zeta_end[0, 0]],
                             [camera_pos[1, 0], zeta_end[1, 0]],
                             [camera_pos[2, 0], zeta_end[2, 0]],
                             '--',
                             color=colors[np.where(id_indices == id)[0][0]],
                             lineWidth='1.0')
Ejemplo n.º 3
0
 def get_zetas(self):
     zetas = np.zeros((3, self.len_features))
     for i in range(self.len_features):
         qzeta = self.x[xZ + 5 * i:xZ + 5 * i + 4, :]  # 4-vector quaternion
         zetas[:, i, None] = Quaternion(qzeta).rot(
             self.khat
         )  # 3-vector pointed at the feature in the camera frame
     return zetas
Ejemplo n.º 4
0
def calculate_velocity_from_position(t, position, orientation):
    # Calculate body-fixed velocity by differentiating position and rotating
    # into the body frame
    b, a = scipy.signal.butter(8, 0.03)  # Create a Butterworth Filter
    # differentiate Position
    delta_x = np.diff(position, axis=0)
    delta_t = np.diff(t)
    unfiltered_inertial_velocity = np.vstack((np.zeros((1, 3)), delta_x / delta_t[:, None]))
    # Filter
    v_inertial = scipy.signal.filtfilt(b, a, unfiltered_inertial_velocity, axis=0)
    # Rotate into Body Frame
    vel_data = []
    for i in range(len(t)):
        q_I_b = Quaternion(orientation[i, :, None])
        vel_data.append(q_I_b.rot(v_inertial[i, None].T).T)

    vel_data = np.array(vel_data).squeeze()
    return vel_data
Ejemplo n.º 5
0
    def h_feat(self, x, **kwargs):
        # assert x.shape == (xZ + 5 * self.len_features, 1) and isinstance(i, int)
        i = self.global_to_local_feature_id[kwargs['i']]
        dxZETA_i = dxZ + i * 3
        q_zeta = Quaternion(x[xZ + i * 5:xZ + i * 5 + 4])

        zeta = q_zeta.rot(self.khat)
        sk_zeta = skew(zeta)
        ezT_zeta = (self.khat.T.dot(zeta)).squeeze()
        T_z = T_zeta(q_zeta)

        h = self.cam_F.dot(zeta) / ezT_zeta + self.cam_center

        dhdx = np.zeros((2, dxZ + 3 * self.len_features))
        dhdx[:, dxZETA_i:dxZETA_i +
             2] = -self.cam_F.dot((sk_zeta.dot(T_z)) / ezT_zeta -
                                  zeta.dot(self.khat.T).dot(sk_zeta).dot(T_z) /
                                  (ezT_zeta * ezT_zeta))

        return h, dhdx
Ejemplo n.º 6
0
    def h_pixel_vel(self, x, i, u):
        # assert x.shape == (xZ + 5 * self.len_features, 1) and isinstance(i, int) and u.shape == (6, 1)

        vel = x[xVEL:xVEL + 3]
        omega = u[uG:uG + 3] - x[xB_G:xB_G + 3]

        # Camera Dynamics
        vel_c_i = self.q_b_c.invrot(vel + skew(omega).dot(self.p_b_c))
        omega_c_i = self.q_b_c.invrot(omega)

        q_c_z = Quaternion(x[xZ + i * 5:xZ + i * 5 + 4])
        rho = x[xZ + i * 5 + 4]
        zeta = q_c_z.rot(self.khat)

        sk_vel = skew(vel_c_i)
        sk_ez = skew(self.khat)
        sk_zeta = skew(zeta)
        R_b_c = self.q_b_c.R

        # TODO: Need to convert to camera dynamics

        h = -self.focal_len * I_2x3.dot(sk_ez).dot(rho *
                                                   (sk_zeta.dot(vel_c_i)) +
                                                   omega_c_i)

        ZETA_i = dxZ + 3 * i
        RHO_i = dxZ + 3 * i + 2
        dhdx = np.zeros((2, dxZ + 3 * self.len_features))
        dhdx[:, dxVEL:dxVEL +
             3] = -self.focal_len * rho * I_2x3.dot(sk_ez).dot(sk_zeta)
        dhdx[:, ZETA_i:ZETA_i + 2] = -self.focal_len * rho * I_2x3.dot(
            sk_ez).dot(sk_vel).dot(sk_zeta).dot(T_zeta(q_c_z))
        dhdx[:, RHO_i, None] = -self.focal_len * I_2x3.dot(sk_ez).dot(
            sk_zeta).dot(vel_c_i)
        dhdx[:, dxB_G:dxB_G + 3] = self.focal_len * I_2x3.dot(sk_ez).dot(
            R_b_c - rho * sk_zeta.dot(R_b_c).dot(skew(self.p_b_c)))
        # dhdx[:, dxB_G:dxB_G + 3] = self.focal_len * I_2x3.dot(sk_ez).dot(I_zz)

        return h, dhdx
def test():
    landmarks = np.random.uniform(-100, 100, (3, 10))
    truth = []
    position = np.zeros((3, 1))
    orientation = Quaternion.Identity()
    for i in range(1000):
        position += np.random.normal(0.0, 0.025, (3, 1))
        orientation += np.random.normal(0.0, 0.025, (3, 1))

        truth.append(
            np.hstack(np.array([[i]]), position.T, orientation.elements.T))
    truth = np.array(truth).squeeze()

    feature_time, zetas, depths, ids = add_landmark(truth, landmarks)
def generate_data():
    dt = 0.0033
    t = np.arange(dt, 120.1, dt)

    g = np.array([[0, 0, 9.80665]]).T

    q0 = Quaternion(np.array([[1, 0.0, 0, 0]]).T)
    q0.normalize()

    q = np.zeros((len(t), 4))
    q[0, :, None] = q0.elements

    frequencies = np.array([[1.0, 1.5, -1.0]]).T
    amplitudes = np.array([[1.0, 1.0, 1.0]]).T

    omega = amplitudes * np.sin(frequencies * t)

    acc = np.zeros([3, len(t)])

    for i in tqdm(range(len(t))):
        if i == 0.0:
            continue
        quat = Quaternion(q[i - 1, :, None])

        q[i, :, None] = (quat + omega[:, i, None] * dt).elements

        acc[:, i, None] = -quat.rot(g)

    data = dict()
    data['truth_NED'] = dict()
    data['truth_NED']['pos'] = np.zeros((len(t), 3))
    data['truth_NED']['vel'] = np.zeros((len(t), 3))
    data['truth_NED']['att'] = q
    data['truth_NED']['t'] = t

    data['imu_data'] = dict()
    data['imu_data']['t'] = t
    data['imu_data']['acc'] = acc.T
    data['imu_data']['gyro'] = omega.T

    landmarks = np.array([[0, 0, 1], [0, 0, 1], [1, 0, 1], [1, 1, 1]])

    landmarks = np.random.uniform(-25, 25, (2, 3))
    #[0, 9, 1],
    #[2, 3, 5]

    data['features'] = dict()
    data['features']['t'] = data['truth_NED']['t']
    data['features']['zeta'], data['features']['depth'] = add_landmark(
        data['truth_NED']['pos'], data['truth_NED']['att'], landmarks)

    cPickle.dump(data, open('generated_data.pkl', 'wb'))
def add_landmark(truth, landmarks, p_b_c, q_b_c, F, lambda_0):
    assert truth.shape[1] > 7 and landmarks.shape[1] == 3

    ids = []
    depths = []
    lambdas = []
    q_zetas = []
    time = truth[:, 0]

    khat = np.array([[0, 0, 1.]]).T
    all_ids = np.array([i for i in range(len(landmarks))])

    for i in range(len(truth)):
        q = Quaternion(truth[i, 4:8, None])
        delta_pose = landmarks[:, :3] - (truth[i, 1:4] + q.invrot(p_b_c).T)
        dist = norm(delta_pose, axis=1)
        q = Quaternion(truth[i, 4:8, None])
        zetas = q_b_c.invrot(q.invrot((delta_pose / dist[:, None]).T))

        # Remove Features Behind the camera
        valid_ids = all_ids[zetas[2, :] > 0.2]
        frame_lambdas = []
        frame_depths = []
        frame_ids = []
        frame_qzetas = []
        for id in valid_ids:
            # Simulate pixel measurements
            l = 1.0 / khat.T.dot(zetas[:, id]) * F.dot(zetas[:, id,
                                                             None]) + lambda_0
            if 0 < l[0, 0] < 640 and 0 < l[1, 0] < 480:
                frame_lambdas.append(l[:, 0])
                frame_depths.append(dist[id])
                frame_ids.append(id)
                frame_qzetas.append(
                    Quaternion.from_two_unit_vectors(khat,
                                                     zetas[:, id,
                                                           None]).elements[:,
                                                                           0])
        frame_lambdas = np.array(frame_lambdas)
        frame_depths = np.array(frame_depths)
        frame_ids = np.array(frame_ids)
        frame_qzetas = np.array(frame_qzetas)

        ids.append(frame_ids)
        depths.append(frame_depths)
        lambdas.append(frame_lambdas)
        q_zetas.append(frame_qzetas)

    return time, ids, lambdas, depths, q_zetas
Ejemplo n.º 10
0
    def init_feature(self, l, id, depth=np.nan):
        # assert l.shape == (2, 1) and depth.shape == (1, 1)

        self.len_features += 1
        # self.feature_ids.append(self.next_feature_id)
        self.global_to_local_feature_id[id] = self.next_feature_id
        self.next_feature_id += 1

        # Adjust lambdas to be with respect to the center of the image
        l_centered = l - self.cam_center

        # Calculate Quaternion to feature
        f = self.cam_F[0, 0]
        zeta = np.array([[l_centered[0, 0], l_centered[1, 0], f]]).T
        zeta /= norm(zeta)
        q_zeta = Quaternion.from_two_unit_vectors(self.khat, zeta).elements

        if np.isnan(depth):
            # Best guess of depth without other information
            if self.len_features > 0:
                depth = np.average(1.0 / self.x[xZ + 4::5])
            else:
                depth = self.default_depth
        self.x = np.vstack(
            (self.x, q_zeta, 1. / depth))  # add 5 states to the state vector

        # Add three states to the process noise matrix
        self.Qx = scipy.linalg.block_diag(self.Qx, self.Qx_feat)
        self.P = scipy.linalg.block_diag(self.P, self.P0_feat)

        # Adjust the matrix workspace allocation to fit this new feature
        self.A = np.zeros(
            (dxZ + 3 * self.len_features, dxZ + 3 * self.len_features))
        self.G = np.zeros((dxZ + 3 * self.len_features, 6))
        self.I_big = np.eye(dxZ + 3 * self.len_features)
        self.dx = np.zeros((dxZ + 3 * self.len_features, 1))

        return self.next_feature_id - 1
Ejemplo n.º 11
0
    def update(self, z, measurement_type, R, passive=False, **kwargs):
        # assert measurement_type in self.measurement_functions.keys(), "Unknown Measurement Type"

        passive_update = passive

        # If we haven't seen this feature before, then initialize it
        if measurement_type == 'feat':
            if kwargs['i'] not in self.global_to_local_feature_id.keys():
                self.init_feature(
                    z,
                    id=kwargs['i'],
                    depth=(kwargs['depth'] if 'depth' in kwargs else np.nan))

        zhat, H = self.measurement_functions[measurement_type](self.x,
                                                               **kwargs)

        # Calculate residual in the proper manner
        if measurement_type == 'qzeta':
            residual = q_feat_boxminus(Quaternion(z), Quaternion(zhat))
        elif measurement_type == 'att':
            residual = Quaternion(z) - Quaternion(zhat)
            if (abs(residual) > 1).any():
                residual = Quaternion(z) - Quaternion(zhat)
        else:
            residual = z - zhat

        # Perform state and covariance update
        if not passive_update:
            try:
                K = self.P.dot(H.T).dot(
                    scipy.linalg.inv(R + H.dot(self.P).dot(H.T)))
                self.P = (self.I_big - K.dot(H)).dot(self.P)
                self.x = self.boxplus(self.x, K.dot(residual))
            except:
                print "Nan detected in", measurement_type, "update"

        return residual, zhat
Ejemplo n.º 12
0
def load_data(filename,
              start=0,
              end=np.inf,
              sim_features=False,
              show_image=False,
              plot_trajectory=True):
    cam_center = np.array([[316.680559, 230.660661]]).T
    cam_F = np.array([[533.013144, 0.0, 0.0], [0.0, 533.503964, 0.0]])

    if not os.path.isfile('data/bag_tmp.pkl'):
        print "loading rosbag", filename
        # First, load IMU data
        bag = rosbag.Bag(filename)
        imu_data = []
        truth_pose_data = []
        image_data = []
        depth_data = []
        image_time = []
        depth_time = []

        topic_list = [
            '/imu/data', '/vrpn_client_node/Leo/pose', '/vrpn/Leo/pose',
            '/vrpn/RGBD_Camera/pose', '/baro/data', '/sonar/data',
            '/is_flying', '/gps/data', '/mag/data', '/camera/depth/image_rect',
            '/camera/rgb/image_rect_mono/compressed',
            '/camera/rgb/image_rect_color/compressed'
        ]

        for topic, msg, t in tqdm(bag.read_messages(topics=topic_list),
                                  total=bag.get_message_count(topic_list)):
            if topic == '/imu/data':
                imu_meas = [
                    msg.header.stamp.to_sec(), msg.angular_velocity.x,
                    msg.angular_velocity.y, msg.angular_velocity.z,
                    msg.linear_acceleration.x, msg.linear_acceleration.y,
                    msg.linear_acceleration.z
                ]
                imu_data.append(imu_meas)

            if topic == '/vrpn_client_node/Leo/pose' or topic == '/vrpn/Leo/pose' or topic == '/vrpn/RGBD_Camera/pose':
                truth_meas = [
                    msg.header.stamp.to_sec(), msg.pose.position.z,
                    -msg.pose.position.x, -msg.pose.position.y,
                    -msg.pose.orientation.w, -msg.pose.orientation.z,
                    msg.pose.orientation.x, msg.pose.orientation.y
                ]
                truth_pose_data.append(truth_meas)

            if topic == '/camera/rgb/image_rect_mono/compressed' or topic == '/camera/rgb/image_rect_color/compressed':
                np_arr = np.fromstring(msg.data, np.uint8)
                image = cv2.imdecode(np_arr, cv2.IMREAD_GRAYSCALE)
                image_time.append(msg.header.stamp.to_sec())
                image_data.append(image)

            if topic == '/camera/depth/image_rect':
                img = np.fromstring(msg.data,
                                    np.float32).reshape(msg.height, msg.width)

                depth_time.append(msg.header.stamp.to_sec())
                depth_data.append(img)

        tmp_dict = dict()
        tmp_dict['imu_data'] = np.array(imu_data)
        tmp_dict['truth_pose_data'] = np.array(truth_pose_data)
        tmp_dict['image_data'] = np.array(image_data)
        tmp_dict['depth_data'] = np.array(depth_data)
        tmp_dict['image_time'] = np.array(image_time)
        tmp_dict['depth_time'] = np.array(depth_time)
        cPickle.dump(tmp_dict, open('data/bag_tmp.pkl', 'wb'), protocol=2)
    else:
        tmp_dict = cPickle.load(open('data/bag_tmp.pkl', 'rb'))

    imu_data = tmp_dict['imu_data']
    truth_pose_data = tmp_dict['truth_pose_data']
    image_data = tmp_dict['image_data']
    depth_data = tmp_dict['depth_data']
    image_time = tmp_dict['image_time']
    depth_time = tmp_dict['depth_time']

    # assert np.abs(truth_pose_data[0, 0] - imu_data[0, 0]) < 1e5, 'truth and imu timestamps are vastly different: {} (truth) vs. {} (imu)'.format(truth_pose_data[0, 0], imu_data[0, 0])

    # Remove Bad Truth Measurements
    good_indexes = np.hstack((True, np.diff(truth_pose_data[:, 0]) > 1e-3))
    truth_pose_data = truth_pose_data[good_indexes]

    vel_data = calculate_velocity_from_position(truth_pose_data[:, 0],
                                                truth_pose_data[:, 1:4],
                                                truth_pose_data[:, 4:8])

    ground_truth = np.hstack((truth_pose_data, vel_data))

    # Adjust timestamp
    imu_t0 = imu_data[0, 0] + 1
    gt_t0 = ground_truth[0, 0]
    imu_data[:, 0] -= imu_t0
    ground_truth[:, 0] -= gt_t0
    image_time -= imu_t0
    depth_time -= imu_t0

    # Chop Data to start and end
    imu_data = imu_data[(imu_data[:, 0] > start) & (imu_data[:, 0] < end), :]
    ground_truth = ground_truth[(ground_truth[:, 0] > start) &
                                (ground_truth[:, 0] < end), :]
    image_data = image_data[(image_time > start) & (image_time < end)]
    depth_data = depth_data[(depth_time > start) & (depth_time < end)]
    image_time = image_time[(image_time > start) & (image_time < end)]
    depth_time = depth_time[(depth_time > start) & (depth_time < end)]

    # Simulate camera-to-body transform
    q_b_c = Quaternion.from_R(np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]]))
    # q_b_c = Quaternion.Identity()
    # p_b_c = np.array([[0.16, -0.05, 0.1]]).T
    p_b_c = np.zeros((3, 1))

    if sim_features:
        # Simulate Landmark Measurements
        # landmarks = np.random.uniform(-25, 25, (2,3))
        # landmarks = np.array([[1, 0, 1, 0, np.inf],
        #                       [0, 1, 1, 0, np.inf],
        #                       [0, 0, 1, 0, 3],
        #                       [1, 1, 1, 0, 3],
        #                       [-1, 0, 1, 10, 25],
        #                       [0, -1, 1, 10, 25],
        #                       [-1, -1, 1, 10, np.inf],
        #                       [1, -1, 1, 20, np.inf],
        #                       [-1, 1, 1, 20, np.inf]])
        N = 500
        last_t = imu_data[-1, 0]
        landmarks = np.hstack([
            np.random.uniform(-4, 4, (N, 1)),
            np.random.uniform(-4, 4, (N, 1)),
            np.random.uniform(-4, 4, (N, 1))
        ])

        feat_time, ids, lambdas, depths, q_zetas = add_landmark(
            ground_truth, landmarks, p_b_c, q_b_c, cam_F, cam_center)
        play_trajectory_with_sim_features(image_time, image_data, feat_time,
                                          ids, lambdas)

    else:
        tracker = klt_tracker.KLT_tracker(3, show_image=show_image)
        lambdas, depths, ids, feat_time, q_zetas = [], [], [], [], []

        _, image_height, image_width = image_data.shape
        _, depth_height, depth_width = depth_data.shape

        assert image_height == depth_height and image_width == depth_width

        for i, image in enumerate(image_data):
            frame_lambdas, frame_ids = tracker.load_image(image, image_time[i])

            # KLT tracker doesn't consider image bounds :(
            frame_lambdas = np.clip(frame_lambdas[:, 0], [0, 0], [639, 479])

            frame_depths = []
            frame_qzetas = []
            nearest_depth = np.abs(depth_time - image_time[i]).argmin()
            for x, y in frame_lambdas:
                d = depth_data[nearest_depth][int(y), int(x)]
                frame_depths.append(d)
                zeta = np.array([[
                    x - cam_center[0, 0],
                    (y - cam_center[1, 0]) * (cam_F[1, 1] / cam_F[0, 0]),
                    cam_F[0, 0]
                ]]).T
                zeta /= norm(zeta)
                frame_qzetas.append(
                    Quaternion.from_two_unit_vectors(e_z, zeta).elements[:, 0])

            depths.append(np.array(frame_depths)[:, None])
            lambdas.append(frame_lambdas)
            ids.append(frame_ids)
            feat_time.append(image_time[i])
            q_zetas.append(np.array(frame_qzetas))
        tracker.close()

    # self.undistort, P = data_loader.make_undistort_funtion(intrinsics=self.data['cam0_sensor']['intrinsics'],
    #                                                     resolution=self.data['cam0_sensor']['resolution'],
    #                                                     distortion_coefficients=self.data['cam0_sensor']['distortion_coefficients'])

    # self.inverse_projection = np.linalg.inv(P)

    # lambdas, ids = self.tracker.load_image(image)
    #
    # if lambdas is not None and len(lambdas) > 0:
    #     lambdas = np.pad(lambdas[:, 0], [(0, 0), (0, 1)], 'constant', constant_values=0)

    if plot_trajectory:
        plot_3d_trajectory(ground_truth,
                           feat_time=feat_time,
                           qzetas=q_zetas,
                           depths=depths,
                           ids=ids,
                           p_b_c=p_b_c,
                           q_b_c=q_b_c)

    plt.ioff()
    out_dict = dict()
    out_dict['imu'] = imu_data
    out_dict['truth'] = ground_truth
    out_dict['feat_time'] = feat_time
    out_dict['lambdas'] = lambdas
    out_dict['depths'] = depths
    out_dict['ids'] = ids
    out_dict['p_b_c'] = p_b_c
    out_dict['q_b_c'] = q_b_c
    out_dict['q_zetas'] = q_zetas
    # out_dict['image'] = image_data
    out_dict['image_t'] = image_time

    # out_dict['depth'] = depth_data
    out_dict['depth_t'] = depth_time
    out_dict['cam_center'] = cam_center
    out_dict['cam_F'] = cam_F

    return out_dict
import csv
import numpy as np
import glob, os, sys
sys.path.append('/usr/local/lib/python2.7/site-packages')
import cv2
from tqdm import tqdm
from pyquat import Quaternion
from add_landmark import add_landmark
import yaml
import matplotlib.pyplot as plt

# Rotation from NWU to NED
R_NWU_NED = np.array([[1., 0, 0], [0, -1., 0], [0, 0, -1.]])
q_NWU_NED = Quaternion.from_R(R_NWU_NED)

R_DWN_NED = np.array([[0., 0., 1.], [0., -1., 0.], [1., 0., 0.]])
q_DWN_NED = Quaternion.from_R(R_DWN_NED)

# Ground Truth is the state of the IMU frame with respect to the world frame
# It appears that the world frame is in DWN, I figure that from inspection of the pose trajectory
# The IMU frame is different from the body frame of the vehicle

# Rotation from the IMU frame to the body frame
R_IMU_B = np.array([[0.33638, -0.01749, 0.94156], [0.02078, 0.99972, 0.01114],
                    [-0.9415, 0.01582, 0.33665]])
q_IMU_B = Quaternion.from_R(R_IMU_B)

R_IMU_NED = R_DWN_NED.dot(R_IMU_B.T)


def load_from_file(filename):
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt

e = 1e-6

## Scratch remove axis of rotation from quaternion
u = np.array([[0, 0, 1.]]).T
# u = np.random.random((3,1))
u /= norm(u)

psi_hist, yaw_hist, s_hist, v_hist, qz_hist, qx_hist, qy_hist = [], [], [], [], [], [], []
for i in tqdm(range(10000)):
    # qm0 = Quaternion.from_euler(0.0, 10.0, 2*np.pi*np.random.random() - np.pi)
    # qm0 = Quaternion.from_euler(0.0, np.pi*np.random.random() - np.pi/2.0, 0.0)
    qm0 = Quaternion.from_euler(2*np.pi*np.random.random() - np.pi, np.pi * np.random.random() - np.pi / 2.0, 0.0)
    # qm0 = Quaternion.from_euler(270.0 * np.pi / 180.0, 85.0 * np.pi / 180.0, 90.0 * np.pi / 180.0)
    # qm0 = Quaternion.random()

    w = qm0.rot(u)
    th = u.T.dot(w)
    ve = skew(u).dot(w)
    qp0 = Quaternion.exp(th * ve)

    epsilon = np.eye(3) * e

    t = u.T.dot(qm0.rot(u))
    v = skew(u).dot(qm0.rot(u))

    tv0 = u.T.dot(qm0.rot(u)) * (skew(u).dot(qm0.rot(u)))
    a_dtvdq = -v.dot(u.T.dot(qm0.R.T).dot(skew(u))) - t*skew(u).dot(qm0.R.T.dot(skew(u)))
Ejemplo n.º 15
0
                                              passive=np.isnan(depth),
                                              i=id)
            h.store(t,
                    id,
                    depth_hat=depth_hat,
                    depth=depth,
                    depth_res=depth_res)
            h.store(t,
                    id,
                    lambda_res=lambda_res,
                    lambda_hat=lambda_hat,
                    lmda=l)
            h.store(t, id, qzeta=qz, qzeta_hat=ekf.get_qzeta(id))
            h.store(t,
                    id,
                    zeta=Quaternion(qz).rot(e_z),
                    zeta_hat=ekf.get_zeta(id))
            dxRHO_i = viekf.dxZ + 3 * ekf.global_to_local_feature_id[id] + 2
            h.store(t, id, Pfeat=ekf.P[dxRHO_i, dxRHO_i, None, None])
        h.store(t, ids=ids)

# plot
if True:
    # convert all our linked lists to contiguous numpy arrays and initialize the plot parameters
    h.tonumpy()
    init_plots(start, end)
    print "plotting"

    plot_side_by_side('x_pos',
                      viekf.xPOS,
                      viekf.xPOS + 3,
Ejemplo n.º 16
0
from pyquat import Quaternion
from math_helper import norm, skew
import numpy as np

# for i in range(100):
q = Quaternion.random()
# q = Quaternion.from_axis_angle(np.array([[0, 0, 1]]).T, np.pi/2.)
yaw_true = q.yaw
v = np.array([[0, 0, 1]]).T
beta = q.elements[1:]
beta /= norm(beta)
alpha = 2. * np.arccos(q.elements[0, 0])
yaw_steven = 2. * np.arctan(beta.T.dot(v) * np.tan(alpha / 2.))

w = q.rot(v)
s = v.T.dot(w)
delta = skew(v).dot(w)
qhat = Quaternion.exp(s * delta)
qstar = q * qhat.inverse
yaw_superjax = qstar.yaw

print "superjax", (yaw_superjax)
print "steven", (yaw_steven)
print "true", (yaw_true)
# assert abs(yaw_true - yaw_test) < 1e-8, "wrong: true = %f, test = %f" % (yaw_true, yaw_test)
Ejemplo n.º 17
0
def import_log(log_stamp):
    log_dir = os.path.dirname(os.path.realpath(__file__)) + "/../logs/"
    prop_file = open(log_dir + str(log_stamp) + "/prop.txt")
    perf_file = open(log_dir + str(log_stamp) + "/perf.txt")
    meas_file = open(log_dir + str(log_stamp) + "/meas.txt")
    conf_file = open(log_dir + str(log_stamp) + "/conf.txt")
    input_file = open(log_dir + str(log_stamp) + "/input.txt")
    xdot_file = open(log_dir + str(log_stamp) + "/xdot.txt")

    h = History()
    len_prop_file = 0
    for line in prop_file:
        line_arr = np.array([float(item) for item in line.split()])
        if len_prop_file == 0: len_prop_file = len(line_arr)
        if len(line_arr) < len_prop_file: continue
        num_features = (len(line_arr) - 34) / 8
        X = 1
        COV = 1 + 17 + 5 * num_features
        t = line_arr[0]
        h.store(t,
                xhat=line_arr[1:18],
                cov=np.diag(line_arr[COV:]),
                num_features=num_features)

    for i, line in enumerate(perf_file):
        if i == 0: continue
        line_arr = np.array([float(item) for item in line.split()])
        if len(line_arr) == 12:
            t = line_arr[0]
            h.store(t,
                    prop_time=line_arr[1],
                    acc_time=line_arr[2],
                    pos_time=line_arr[5],
                    feat_time=line_arr[8],
                    depth_time=line_arr[10])

    ids = []
    for line in meas_file:
        try:
            meas_type = line.split()[0]
            line_arr = np.array([float(item) for item in line.split()[2:]])
            t = float(line.split()[1])
        except:
            continue

        if meas_type == 'ACC':
            if len(line_arr) < 6: continue
            h.store(t,
                    acc=line_arr[0:2],
                    acc_hat=line_arr[2:4],
                    acc_active=line_arr[5])
        elif meas_type == 'ATT':
            if len(line_arr) < 10: continue
            h.store(t,
                    att=line_arr[0:4],
                    att_hat=line_arr[4:8],
                    att_active=line_arr[9])
        elif meas_type == 'GLOBAL_ATT':
            if len(line_arr) < 8: continue
            h.store(t, global_att=line_arr[0:4], global_att_hat=line_arr[4:8])
        elif meas_type == 'POS':
            if len(line_arr) < 8: continue
            h.store(t,
                    pos=line_arr[0:3],
                    pos_hat=line_arr[3:6],
                    pos_active=line_arr[7])
        elif meas_type == 'GLOBAL_POS':
            if len(line_arr) < 6: continue
            h.store(t, global_pos=line_arr[0:3], global_pos_hat=line_arr[3:6])
        elif meas_type == 'FEAT':
            if len(line_arr) < 7: continue
            id = line_arr[6]
            h.store(t,
                    id,
                    feat_hat=line_arr[0:2],
                    feat=line_arr[2:4],
                    feat_cov=np.diag(line_arr[4:6]))
            ids.append(id) if id not in ids else None
        elif meas_type == 'DEPTH':
            if len(line_arr) < 4: continue
            # Invert the covariance measurement
            p = 1.0 / line_arr[0]
            s = line_arr[2]
            cov = 1. / (p + s) - 1. / p
            h.store(t,
                    line_arr[3],
                    depth=line_arr[1],
                    depth_hat=line_arr[0],
                    depth_cov=[[cov]])
        elif meas_type == 'ALT':
            if len(line_arr) < 3: continue
            h.store(t, alt=line_arr[0], alt_hat=line_arr[1])
        else:
            print("unsupported measurement type ", meas_type)

    for line in input_file:
        line_arr = np.array([float(item) for item in line.split()])
        if len(line_arr) < 6: continue
        h.store(line_arr[0], u_acc=line_arr[1:4], u_gyro=line_arr[4:])

    for line in xdot_file:
        line_arr = np.array([float(item) for item in line.split()])
        if len(line_arr) < 18: continue
        h.store(line_arr[0], dt=line_arr[1], xdot=line_arr[2:18])

    h.tonumpy()

    # Calculate true body-fixed velocity by differentiating position and rotating
    # into the body frame
    delta_t = np.diff(h.t.global_pos)
    good_ids = delta_t > 0.001  # only take truth measurements with a reasonable time difference
    delta_t = delta_t[good_ids]
    h.t.vel = h.t.global_pos[np.hstack((good_ids, False))]
    delta_x = np.diff(h.global_pos, axis=0)
    delta_x = delta_x[good_ids]
    unfiltered_inertial_velocity = np.vstack(
        delta_x / delta_t[:, None])  # Differentiate Truth
    b_vel, a_vel = scipy.signal.butter(3, 0.50)  # Smooth Differentiated Truth
    v_inertial = scipy.signal.filtfilt(b_vel,
                                       a_vel,
                                       unfiltered_inertial_velocity,
                                       axis=0)

    # Rotate into Body Frame
    vel_data = []
    try:
        att = h.global_att[np.hstack((good_ids))]
    except:
        att = h.global_att[np.hstack((good_ids, False))]
    for i in range(len(h.t.vel)):
        q_I_b = Quaternion(att[i, :, None])
        vel_data.append(q_I_b.invrot(v_inertial[i, None].T).T)

    h.vel = np.array(vel_data).squeeze()

    # Calculate Euler Angles from attitudes
    # Convert global attitude to euler angles
    true_euler, est_euler = np.zeros((len(h.global_att), 3)), np.zeros(
        (len(h.global_att_hat), 3))
    for i, true_quat in enumerate(h.global_att):
        true_euler[i, :, None] = Quaternion(true_quat[:, None]).euler
    for i, est_quat in enumerate(h.global_att_hat):
        est_euler[i, :, None] = (Quaternion(est_quat[:, None]).euler)
    h.global_euler = true_euler
    h.global_euler_hat = est_euler

    # Convert relative attitude to euler angles
    true_euler, est_euler = np.zeros((len(h.att), 3)), np.zeros(
        (len(h.xhat), 3))
    for i, true_quat in enumerate(h.att):
        true_euler[i, :, None] = Quaternion(true_quat[:, None]).euler
    for i, est_quat in enumerate(h.xhat[:, 6:10]):
        est_euler[i, :, None] = (Quaternion(est_quat[:, None]).euler)
    h.euler = true_euler
    h.euler_hat = est_euler

    h.ids = ids

    return h
         			[ 0.999557249008, 0.0149672133247, 0.025715529948, -0.064676986768],
         			[-0.0257744366974, 0.00375618835797, 0.999660727178, 0.00981073058949],
         			[ 0.0, 0.0, 0.0, 1.0]])
R_IMU_c = T_IMU_c[:3,:3]
p_b_c_IMU = T_IMU_c[:3,3,None]

T_IMU_vicon = np.array([[ 0.33638, -0.01749,  0.94156,  0.06901],
         	  			[-0.02078, -0.99972, -0.01114, -0.02781],
         	  			[ 0.94150, -0.01582, -0.33665, -0.12395],
              			[ 0.0,      0.0,      0.0,      1.0]])
R_IMU_vicon = T_IMU_vicon[:3,:3]

# R_vicon_b = np.array([[1, 0, 0],
# 					  [0, -1, 0],
# 					  [0, 0, -1]]).dot(np.array([[-1, 0, 0],[0, 1, 0], [0, 0, -1]]))
q_vicon_b = Quaternion(np.array([[0, 0, 0, -1.0]]).T)
R_vicon_b = np.array([[1, 0, 0],
					  [0, -1, 0],
					  [0, 0, -1]])
q_vicon_b = Quaternion(np.array([[0, 0, 0, -1.0]]).T) * Quaternion(np.array([[0, 1.0, 0, 0]]).T)
q0 = Quaternion(np.array([[0, 0, 0, 1]]).T)
print ("q0 = ", q_vicon_b * q0)
print ("####################################")

R_b_IMU =(R_vicon_b.dot(R_IMU_vicon))


R_b_c = R_b_IMU.dot(R_IMU_c)
p_b_c_b = R_b_IMU.T.dot(p_b_c_IMU)

Ejemplo n.º 19
0
    def dynamics(self, x, u):
        # assert x.shape == (xZ+5*self.len_features, 1) and u.shape == (6,1)

        # Reset Matrix Workspace
        self.dx.fill(0.0)
        self.A.fill(0.0)
        self.G.fill(0.0)

        vel = x[xVEL:xVEL + 3]
        q_I_b = Quaternion(x[xATT:xATT + 4])

        omega = u[uG:uG + 3] - x[xB_G:xB_G + 3]
        acc = u[uA:uA + 3] - x[xB_A:xB_A + 3]
        acc_z = np.array([[0, 0, acc[2, 0]]]).T
        mu = x[xMU, 0]

        gravity_B = q_I_b.invrot(self.gravity)
        vel_I = q_I_b.invrot(vel)
        vel_xy = I_2x3.T.dot(I_2x3).dot(vel)

        # CALCULATE STATE DYNAMICS
        self.dx[dxPOS:dxPOS + 3] = vel_I
        if self.use_drag_term:
            self.dx[dxVEL:dxVEL + 3] = acc_z + gravity_B - mu * vel_xy
        else:
            self.dx[dxVEL:dxVEL + 3] = acc + gravity_B
        self.dx[dxATT:dxATT + 3] = omega

        ###################################
        # STATE JACOBIAN
        self.A[dxPOS:dxPOS + 3, dxVEL:dxVEL + 3] = q_I_b.R
        self.A[dxPOS:dxPOS + 3, dxATT:dxATT + 3] = skew(vel_I)
        if self.use_drag_term:
            self.A[dxVEL:dxVEL + 3, dxVEL:dxVEL + 3] = -mu * I_2x3.T.dot(I_2x3)
            self.A[dxVEL:dxVEL + 3,
                   dxB_A:dxB_A + 3] = -self.khat.dot(self.khat.T)
            self.A[dxVEL:dxVEL + 3, dxMU, None] = -vel_xy
        else:
            self.A[dxVEL:dxVEL + 3, dxB_A:dxB_A + 3] = -I_3x3
        self.A[dxVEL:dxVEL + 3, dxATT:dxATT + 3] = skew(gravity_B)
        self.A[dxATT:dxATT + 3, dxB_G:dxB_G + 3] = -I_3x3

        #################################
        ## INPUT JACOBIAN
        if self.use_drag_term:
            self.G[dxVEL:dxVEL + 3, uA:uA + 3] = self.khat.dot(self.khat.T)
        else:
            self.G[dxVEL:dxVEL + 3, uA:uA + 3] = I_3x3
        self.G[dxATT:dxATT + 3, uG:uG + 3] = I_3x3

        # Camera Dynamics
        omega_c_i = self.q_b_c.invrot(omega)
        vel_c_i = self.q_b_c.invrot(vel - skew(omega).dot(self.p_b_c))

        for i in range(self.len_features):
            xZETA_i = xZ + i * 5
            xRHO_i = xZ + 5 * i + 4
            dxZETA_i = dxZ + i * 3
            dxRHO_i = dxZ + i * 3 + 2

            q_zeta = Quaternion(x[xZETA_i:xZETA_i + 4, :])
            rho = x[xRHO_i, 0]
            zeta = q_zeta.rot(self.khat)
            T_z = T_zeta(q_zeta)
            skew_zeta = skew(zeta)
            skew_vel_c = skew(vel_c_i)
            skew_p_b_c = skew(self.p_b_c)
            R_b_c = self.q_b_c.R
            rho2 = rho * rho

            #################################
            ## FEATURE DYNAMICS
            self.dx[dxZETA_i:dxZETA_i +
                    2, :] = -T_z.T.dot(omega_c_i +
                                       rho * skew_zeta.dot(vel_c_i))
            self.dx[dxRHO_i, :] = rho2 * zeta.T.dot(vel_c_i)

            #################################
            ## FEATURE STATE JACOBIAN
            self.A[dxZETA_i:dxZETA_i + 2,
                   dxVEL:dxVEL + 3] = -rho * T_z.T.dot(skew_zeta).dot(R_b_c)
            self.A[dxZETA_i:dxZETA_i + 2, dxB_G:dxB_G +
                   3] = T_z.T.dot(rho * skew_zeta.dot(R_b_c).dot(skew_p_b_c) +
                                  R_b_c)
            self.A[dxZETA_i:dxZETA_i + 2, dxZETA_i:dxZETA_i + 2] = -T_z.T.dot(
                skew(rho * skew_zeta.dot(vel_c_i) + omega_c_i) +
                (rho * skew_vel_c.dot(skew_zeta))).dot(T_z)
            self.A[dxZETA_i:dxZETA_i + 2, dxRHO_i,
                   None] = -T_z.T.dot(skew_zeta).dot(vel_c_i)
            self.A[dxRHO_i, dxVEL:dxVEL + 3] = rho2 * zeta.T.dot(R_b_c)
            self.A[dxRHO_i,
                   dxB_G:dxB_G + 3] = -rho2 * zeta.T.dot(R_b_c).dot(skew_p_b_c)
            self.A[dxRHO_i, dxZETA_i:dxZETA_i +
                   2] = -rho2 * vel_c_i.T.dot(skew_zeta).dot(T_z)
            self.A[dxRHO_i, dxRHO_i] = 2 * rho * zeta.T.dot(vel_c_i).squeeze()

            #################################
            ## FEATURE INPUT JACOBIAN
            self.G[dxZETA_i:dxZETA_i + 2, uG:uG +
                   3] = -T_z.T.dot(R_b_c +
                                   rho * skew_zeta.dot(R_b_c).dot(skew_p_b_c))
            self.G[dxRHO_i, uG:] = rho2 * zeta.T.dot(R_b_c).dot(skew_p_b_c)

        return self.dx, self.A, self.G
Ejemplo n.º 20
0
import numpy as np
from geometry_msgs.msg import PoseStamped
from geometry_msgs.msg import Vector3
import scipy.signal
import sys
from tqdm import tqdm

bags = [
    'V1_01_easy', 'V1_02_medium', 'V1_03_difficult', 'V2_01_easy',
    'V2_02_medium', 'V2_03_difficult', 'MH_01_easy', 'MH_02_easy',
    'MH_03_medium', 'MH_04_difficult', 'MH_05_difficult'
]

print("Fix ETH Bag")

q_NED_NWU = Quaternion(np.array([[0., 1., 0., 0.]]).T)
q_IMU_NWU = Quaternion.from_R(
    np.array([[0.33638, -0.01749, 0.94156], [-0.02078, -0.99972, -0.01114],
              [0.94150, -0.01582, -0.33665]]))
q_IMU_NED = q_IMU_NWU * q_NED_NWU

T_IMU_C = np.array(
    [[0.0148655429818, -0.999880929698, 0.00414029679422, -0.0216401454975],
     [0.999557249008, 0.0149672133247, 0.025715529948, -0.064676986768],
     [-0.0257744366974, 0.00375618835797, 0.999660727178, 0.00981073058949],
     [0.0, 0.0, 0.0, 1.0]])
q_IMU_C = Quaternion.from_R(T_IMU_C[:3, :3])
p_IMU_C_IMU = T_IMU_C[:3, 3, None]

print("q_IMU_C = ", q_IMU_C)
print("p_IMU_C_IMU", p_IMU_C_IMU.T)
Ejemplo n.º 21
0
    def __init__(self, x0, multirotor=True):
        # assert x0.shape == (xZ, 1)

        # 17 main states + 5N feature states
        # pos, vel, att, b_gyro, b_acc, mu, q_feat, rho_feat, q_feat, rho_feat ...
        self.x = x0
        self.u = np.zeros((6, 1))

        # Process noise matrix for the 16 main delta states
        self.Qx = np.diag([
            0.001,
            0.001,
            0.001,  # pos
            0.1,
            0.1,
            0.1,  # vel
            0.005,
            0.005,
            0.005,  # att
            1e-7,
            1e-7,
            1e-7,  # b_acc
            1e-8,
            1e-8,
            1e-8,  # b_omega
            0.0
        ])  # mu

        # process noise matrix for the features (assumed all the same) 3x3
        self.Qx_feat = np.diag([0.001, 0.001, 0.01])  # x, y, and 1/depth

        # Process noise assumed from inputs (mechanized sensors)
        self.Qu = np.diag([
            0.05,
            0.05,
            0.05,  # y_acc
            0.001,
            0.001,
            0.001
        ])  # y_omega

        # State covariances.  Size is (16 + 3N) x (16 + 3N) where N is the number of
        # features currently being tracked
        self.P = np.diag([
            0.0001,
            0.0001,
            0.0001,  # pos
            0.01,
            0.01,
            0.01,  # vel
            0.001,
            0.001,
            0.001,  # att
            1e-2,
            1e-2,
            1e-3,  # b_acc
            1e-3,
            1e-3,
            1e-3,  # b_omega
            1e-7
        ])  # mu

        # Initial Covariance estimate for new features
        self.P0_feat = np.diag([0.01, 0.01, 0.1])  # x, y, and 1/depth

        # gravity vector (NED)
        self.gravity = np.array([[0, 0, 9.80665]]).T

        # Unit vectors in the x, y, and z directions (used a lot for projection functions)
        self.ihat = np.array([[1, 0, 0]]).T
        self.jhat = np.array([[0, 1, 0]]).T
        self.khat = np.array([[0, 0, 1]]).T

        # The number of features currently being tracked
        self.len_features = 0

        # The next feature id to be assigned to a feature
        self.next_feature_id = 0

        # Set of initialized feature ids
        self.initialized_features = set()
        self.global_to_local_feature_id = {}

        # Body-to-Camera transform
        self.q_b_c = Quaternion(np.array([[1, 0, 0, 0]
                                          ]).T)  # Rotation from body to camera
        self.p_b_c = np.array(
            [[0, 0, 0]]).T  # translation from body to camera (in body frame)

        self.measurement_functions = dict()
        self.measurement_functions['acc'] = self.h_acc
        self.measurement_functions['alt'] = self.h_alt
        self.measurement_functions['att'] = self.h_att
        self.measurement_functions['pos'] = self.h_pos
        self.measurement_functions['vel'] = self.h_vel
        self.measurement_functions['qzeta'] = self.h_qzeta
        self.measurement_functions['feat'] = self.h_feat
        self.measurement_functions['pixel_vel'] = self.h_pixel_vel
        self.measurement_functions['depth'] = self.h_depth
        self.measurement_functions['inv_depth'] = self.h_inv_depth

        # Matrix Workspace
        self.A = np.zeros((dxZ, dxZ))
        self.G = np.zeros((dxZ, 6))
        self.I_big = np.eye(dxZ)
        self.dx = np.zeros((dxZ, 1))

        self.use_drag_term = True
        self.default_depth = np.array([[1.5]])

        self.last_propagate = None
        self.cam_center = np.array([[320.0, 240.0]]).T
        self.cam_F = np.array([[250.0, 250.0]]).dot(I_2x3)
        self.use_drag_term = multirotor
Ejemplo n.º 22
0
 def get_zeta(self, i):
     ft_id = self.global_to_local_feature_id[i]
     qzeta = self.x[xZ + 5 * ft_id:xZ + 5 * ft_id +
                    4, :]  # 4-vector quaternion
     return Quaternion(qzeta).rot(
         self.khat)  # 3-vector pointed at the feature in the camera frame
Ejemplo n.º 23
0
def q_feat_boxplus(q, dq):
    # assert isinstance(q, Quaternion) and dq.shape == (2,1)
    return Quaternion.exp(T_zeta(q).dot(dq)) * q
Ejemplo n.º 24
0
def run_tests():
    # run some math helper tests

    # Test vectorized quat from two unit vectors
    v1 = np.random.uniform(-1, 1, (3, 100))
    v2 = np.random.uniform(-1, 1, (3, 1))
    v3 = np.random.uniform(-1, 1, (3, 1))
    v1 /= norm(v1, axis=0)
    v2 /= norm(v2)
    v3 /= norm(v3)
    # On a single vector
    assert norm(
        Quaternion(q_array_from_two_unit_vectors(v3, v2)).rot(v3) - v2) < 1e-8
    # on a bunch of vectors
    quat_array = q_array_from_two_unit_vectors(v2, v1)
    for q, v in zip(quat_array.T, v1.T):
        Quaternion(q[:, None]).rot(v2) - v[:, None]

    # Test T_zeta
    q2 = q_array_from_two_unit_vectors(e_z, v2)
    assert norm(T_zeta(Quaternion(q2)).T.dot(v2)) < 1e-8

    # Check derivative of T_zeta - This was giving me trouble
    d_dTdq = np.zeros((2, 2))
    q = Quaternion(np.random.uniform(-1, 1, (4, 1)))
    q.arr[3] = 0.0
    q.normalize()
    x0 = T_zeta(q).T.dot(v2)
    epsilon = 1e-6
    I = np.eye(2) * epsilon
    for i in range(2):
        qplus = q_feat_boxplus(q, I[:, i, None])
        xprime = T_zeta(qplus).T.dot(v2)
        d_dTdq[i, :, None] = (xprime - x0) / epsilon
    a_dTdq = -T_zeta(q).T.dot(skew(v2).dot(T_zeta(q)))
    assert (abs(a_dTdq - d_dTdq) < 1e-6).all()

    # Check Derivative  dqzeta/dqzeta <- this was also giving me trouble
    for j in range(1000):
        d_dqdq = np.zeros((2, 2))
        if j == 0:
            q = Quaternion.Identity()
        else:
            q = Quaternion(np.random.uniform(-1, 1, (4, 1)))
            q.arr[3] = 0.0
            q.normalize()
        for i in range(2):
            d_dqdq[i, :, None] = q_feat_boxminus(
                q_feat_boxplus(q, I[:, i, None]), q) / epsilon
        a_dqdq = T_zeta(q).T.dot(T_zeta(q))
        assert (abs(a_dqdq - d_dqdq) < 1e-1).all()

    # Check Manifold Consistency
    for i in range(1000):
        omega = np.random.uniform(-1, 1, (3, 1))
        omega2 = np.random.uniform(-1, 1, (3, 1))
        omega[2] = 0.0
        omega2[2] = 0.0
        x = Quaternion.exp(omega)
        y = Quaternion.exp(omega2)
        dx = np.random.normal(0.0, 0.5, (2, 1))

        # Check x [+] 0 == x
        assert norm(q_feat_boxplus(x, np.zeros((2, 1))) - x) < 1e-8

        # Check x [+] (y [-] x) == y (compare the rotated zetas, because there are infinitely
        # many quaternions which return the same zeta.)  We don't have the framework to handle
        # forcing the quaternion to actually be the same
        assert norm((q_feat_boxplus(x, q_feat_boxminus(y, x))).rot(e_z) -
                    y.rot(e_z)) < 1e-8

        # Check (x [+] dx) [-] x == dx
        assert norm(q_feat_boxminus(q_feat_boxplus(x, dx), x) - dx) < 1e-8
    # assert norm( q_feat_boxminus(q_feat_boxplus(qzeta, dqzeta), qzeta) - dqzeta) < 1e-8

    print "math helper test: [PASSED]"
Ejemplo n.º 25
0
class VI_EKF():
    def __init__(self, x0, multirotor=True):
        # assert x0.shape == (xZ, 1)

        # 17 main states + 5N feature states
        # pos, vel, att, b_gyro, b_acc, mu, q_feat, rho_feat, q_feat, rho_feat ...
        self.x = x0
        self.u = np.zeros((6, 1))

        # Process noise matrix for the 16 main delta states
        self.Qx = np.diag([
            0.001,
            0.001,
            0.001,  # pos
            0.1,
            0.1,
            0.1,  # vel
            0.005,
            0.005,
            0.005,  # att
            1e-7,
            1e-7,
            1e-7,  # b_acc
            1e-8,
            1e-8,
            1e-8,  # b_omega
            0.0
        ])  # mu

        # process noise matrix for the features (assumed all the same) 3x3
        self.Qx_feat = np.diag([0.001, 0.001, 0.01])  # x, y, and 1/depth

        # Process noise assumed from inputs (mechanized sensors)
        self.Qu = np.diag([
            0.05,
            0.05,
            0.05,  # y_acc
            0.001,
            0.001,
            0.001
        ])  # y_omega

        # State covariances.  Size is (16 + 3N) x (16 + 3N) where N is the number of
        # features currently being tracked
        self.P = np.diag([
            0.0001,
            0.0001,
            0.0001,  # pos
            0.01,
            0.01,
            0.01,  # vel
            0.001,
            0.001,
            0.001,  # att
            1e-2,
            1e-2,
            1e-3,  # b_acc
            1e-3,
            1e-3,
            1e-3,  # b_omega
            1e-7
        ])  # mu

        # Initial Covariance estimate for new features
        self.P0_feat = np.diag([0.01, 0.01, 0.1])  # x, y, and 1/depth

        # gravity vector (NED)
        self.gravity = np.array([[0, 0, 9.80665]]).T

        # Unit vectors in the x, y, and z directions (used a lot for projection functions)
        self.ihat = np.array([[1, 0, 0]]).T
        self.jhat = np.array([[0, 1, 0]]).T
        self.khat = np.array([[0, 0, 1]]).T

        # The number of features currently being tracked
        self.len_features = 0

        # The next feature id to be assigned to a feature
        self.next_feature_id = 0

        # Set of initialized feature ids
        self.initialized_features = set()
        self.global_to_local_feature_id = {}

        # Body-to-Camera transform
        self.q_b_c = Quaternion(np.array([[1, 0, 0, 0]
                                          ]).T)  # Rotation from body to camera
        self.p_b_c = np.array(
            [[0, 0, 0]]).T  # translation from body to camera (in body frame)

        self.measurement_functions = dict()
        self.measurement_functions['acc'] = self.h_acc
        self.measurement_functions['alt'] = self.h_alt
        self.measurement_functions['att'] = self.h_att
        self.measurement_functions['pos'] = self.h_pos
        self.measurement_functions['vel'] = self.h_vel
        self.measurement_functions['qzeta'] = self.h_qzeta
        self.measurement_functions['feat'] = self.h_feat
        self.measurement_functions['pixel_vel'] = self.h_pixel_vel
        self.measurement_functions['depth'] = self.h_depth
        self.measurement_functions['inv_depth'] = self.h_inv_depth

        # Matrix Workspace
        self.A = np.zeros((dxZ, dxZ))
        self.G = np.zeros((dxZ, 6))
        self.I_big = np.eye(dxZ)
        self.dx = np.zeros((dxZ, 1))

        self.use_drag_term = True
        self.default_depth = np.array([[1.5]])

        self.last_propagate = None
        self.cam_center = np.array([[320.0, 240.0]]).T
        self.cam_F = np.array([[250.0, 250.0]]).dot(I_2x3)
        self.use_drag_term = multirotor

    def set_camera_intrinsics(self, center, F):
        assert center.shape == (2, 1) and F.shape == (2, 3)
        self.cam_center = center
        self.cam_F = F

    def set_camera_to_IMU(self, translation, rotation):
        # assert translation.shape == (3,1) and isinstance(rotation, Quaternion)
        self.p_b_c = translation
        self.q_b_c = rotation

    # Returns the depth to all features
    def get_depths(self):
        return 1. / self.x[xZ + 4::5]

    # Returns the estimated bearing vector to all features
    def get_zetas(self):
        zetas = np.zeros((3, self.len_features))
        for i in range(self.len_features):
            qzeta = self.x[xZ + 5 * i:xZ + 5 * i + 4, :]  # 4-vector quaternion
            zetas[:, i, None] = Quaternion(qzeta).rot(
                self.khat
            )  # 3-vector pointed at the feature in the camera frame
        return zetas

    # Returns the estimated bearing vector to a single feature with id i
    def get_zeta(self, i):
        ft_id = self.global_to_local_feature_id[i]
        qzeta = self.x[xZ + 5 * ft_id:xZ + 5 * ft_id +
                       4, :]  # 4-vector quaternion
        return Quaternion(qzeta).rot(
            self.khat)  # 3-vector pointed at the feature in the camera frame

    # Returns all quaternions which rotate the camera z axis to the bearing vectors directed at the tracked features
    def get_qzetas(self):
        qzetas = np.zeros((self.len_features, 4))
        for i in range(self.len_features):
            qzetas[i, :, None] = self.x[xZ + 5 * i:xZ + 5 * i +
                                        4]  # 4-vector quaternion
        return qzetas

    # Returns all quaternions which rotate the camera z axis to the bearing vectors directed at the tracked features
    def get_qzeta(self, i):
        ft_id = self.global_to_local_feature_id[i]
        return self.x[xZ + 5 * ft_id:xZ + 5 * ft_id +
                      4, :]  # 4-vector quaternion

    def get_camera_state(self):
        vel = self.x[xVEL:xVEL + 3]
        omega = self.u[uG:uG + 3] - self.x[xB_G:xB_G + 3]
        vel_c_i = self.q_b_c.invrot(vel + skew(omega).dot(self.p_b_c))
        omega_c_i = self.q_b_c.invrot(omega)
        return vel_c_i, omega_c_i

    # Adds the state with the delta state on the manifold
    def boxplus(self, x, dx):
        # assert  x.shape == (xZ+5*self.len_features, 1) and dx.shape == (dxZ+3*self.len_features, 1)

        out = np.zeros((xZ + 5 * self.len_features, 1))

        # Add position and velocity vector states
        out[xPOS:xPOS + 3] = x[xPOS:xPOS + 3] + dx[xPOS:xPOS + 3]
        out[xVEL:xVEL + 3] = x[xVEL:xVEL + 3] + dx[xVEL:xVEL + 3]

        # Add attitude quaternion state on the manifold
        out[xATT:xATT + 4] = (Quaternion(x[xATT:xATT + 4]) +
                              dx[dxATT:dxATT + 3]).elements

        # add bias and drag term vector states
        out[xB_A:xB_A + 3] = x[xB_A:xB_A + 3] + dx[dxB_A:dxB_A + 3]
        out[xB_G:xB_G + 3] = x[xB_G:xB_G + 3] + dx[dxB_G:dxB_G + 3]
        out[xMU] = x[xMU] + dx[dxMU]

        # add Feature quaternion states
        for i in range(self.len_features):
            xFEAT = xZ + i * 5
            xRHO = xZ + i * 5 + 4
            dxFEAT = dxZ + 3 * i
            dxRHO = dxZ + 3 * i + 2
            dqzeta = dx[dxFEAT:
                        dxRHO, :]  # 2-vector which is the derivative of qzeta
            qzeta = Quaternion(x[xFEAT:xFEAT + 4, :])  # 4-vector quaternion

            # Feature Quaternion States (use manifold)
            out[xFEAT:xRHO, :] = q_feat_boxplus(qzeta, dqzeta).elements

            # Inverse Depth State
            out[xRHO, :] = x[xRHO] + dx[dxRHO]
        return out

    # propagates all states, features and covariances
    def propagate(self, y_acc, y_gyro, t):
        # assert y_acc.shape == (3, 1) and y_gyro.shape == (3, 1) and isinstance(t, float)

        if self.last_propagate is not None:
            # calculate dt from t
            dt = t - self.last_propagate
            self.u[uA:uA + 3] = y_acc
            self.u[uG:uG + 3] = y_gyro

            # Propagate
            xdot, A, G = self.dynamics(self.x, self.u)
            Pdot = A.dot(self.P) + self.P.dot(A.T) + G.dot(self.Qu).dot(
                G.T) + self.Qx
            self.x = self.boxplus(self.x, xdot * dt)
            self.P += Pdot * dt

        # Update last_propagate
        self.last_propagate = t

        return self.x.copy(), self.P.copy()

    def update(self, z, measurement_type, R, passive=False, **kwargs):
        # assert measurement_type in self.measurement_functions.keys(), "Unknown Measurement Type"

        passive_update = passive

        # If we haven't seen this feature before, then initialize it
        if measurement_type == 'feat':
            if kwargs['i'] not in self.global_to_local_feature_id.keys():
                self.init_feature(
                    z,
                    id=kwargs['i'],
                    depth=(kwargs['depth'] if 'depth' in kwargs else np.nan))

        zhat, H = self.measurement_functions[measurement_type](self.x,
                                                               **kwargs)

        # Calculate residual in the proper manner
        if measurement_type == 'qzeta':
            residual = q_feat_boxminus(Quaternion(z), Quaternion(zhat))
        elif measurement_type == 'att':
            residual = Quaternion(z) - Quaternion(zhat)
            if (abs(residual) > 1).any():
                residual = Quaternion(z) - Quaternion(zhat)
        else:
            residual = z - zhat

        # Perform state and covariance update
        if not passive_update:
            try:
                K = self.P.dot(H.T).dot(
                    scipy.linalg.inv(R + H.dot(self.P).dot(H.T)))
                self.P = (self.I_big - K.dot(H)).dot(self.P)
                self.x = self.boxplus(self.x, K.dot(residual))
            except:
                print "Nan detected in", measurement_type, "update"

        return residual, zhat

    # Used for overriding imu biases, Not to be used in real life
    def set_imu_bias(self, b_g, b_a):
        # assert b_g.shape == (3,1) and b_a.shape == (3,1)
        self.x[xB_G:xB_G + 3] = b_g
        self.x[xB_A:xB_A + 3] = b_a

    # Used to initialize a new feature.  Returns the feature id associated with this feature
    def init_feature(self, l, id, depth=np.nan):
        # assert l.shape == (2, 1) and depth.shape == (1, 1)

        self.len_features += 1
        # self.feature_ids.append(self.next_feature_id)
        self.global_to_local_feature_id[id] = self.next_feature_id
        self.next_feature_id += 1

        # Adjust lambdas to be with respect to the center of the image
        l_centered = l - self.cam_center

        # Calculate Quaternion to feature
        f = self.cam_F[0, 0]
        zeta = np.array([[l_centered[0, 0], l_centered[1, 0], f]]).T
        zeta /= norm(zeta)
        q_zeta = Quaternion.from_two_unit_vectors(self.khat, zeta).elements

        if np.isnan(depth):
            # Best guess of depth without other information
            if self.len_features > 0:
                depth = np.average(1.0 / self.x[xZ + 4::5])
            else:
                depth = self.default_depth
        self.x = np.vstack(
            (self.x, q_zeta, 1. / depth))  # add 5 states to the state vector

        # Add three states to the process noise matrix
        self.Qx = scipy.linalg.block_diag(self.Qx, self.Qx_feat)
        self.P = scipy.linalg.block_diag(self.P, self.P0_feat)

        # Adjust the matrix workspace allocation to fit this new feature
        self.A = np.zeros(
            (dxZ + 3 * self.len_features, dxZ + 3 * self.len_features))
        self.G = np.zeros((dxZ + 3 * self.len_features, 6))
        self.I_big = np.eye(dxZ + 3 * self.len_features)
        self.dx = np.zeros((dxZ + 3 * self.len_features, 1))

        return self.next_feature_id - 1

    # Used to remove a feature from the EKF.  Removes the feature from the features array and
    # Clears the associated rows and columns from the covariance.  The covariance matrix will
    # now be 3x3 smaller than before and the feature array will be 5 smaller
    def clear_feature(self, id):
        local_feature_id = self.global_to_local_feature_id[id]
        xZETA_i = xZ + 5 * local_feature_id
        dxZETA_i = dxZ + 3 * local_feature_id
        del self.feature_ids[local_feature_id]
        self.len_features -= 1
        self.global_to_local_feature_id = {
            self.feature_ids[i]: i
            for i in range(len(self.feature_ids))
        }

        # Create masks to remove portions of state and covariance
        xmask = np.ones_like(self.x, dtype=bool).squeeze()
        dxmask = np.ones_like(self.dx, dtype=bool).squeeze()
        xmask[xZETA_i:xZETA_i + 5] = False
        dxmask[dxZETA_i:dxZETA_i + 3] = False

        # Matrix Workspace Modifications
        self.x = self.x[xmask, ...]
        self.P = self.P[dxmask, :][:, dxmask]
        self.dx = self.dx[dxmask, ...]
        self.A = np.zeros_like(self.P)
        self.G = np.zeros((len(self.dx), 4))
        self.I_big = np.eye(len(self.dx))

    def keep_only_features(self, features):
        features_to_clear = self.initialized_features.difference(set(features))
        for f in features_to_clear:
            self.clear_feature(f)

    # Determines the derivative of state x given inputs u and Jacobian of state with respect to x and u
    # the returned value of f is a delta state, delta features, and therefore is a different
    # size than the state and features and needs to be applied with boxplus
    def dynamics(self, x, u):
        # assert x.shape == (xZ+5*self.len_features, 1) and u.shape == (6,1)

        # Reset Matrix Workspace
        self.dx.fill(0.0)
        self.A.fill(0.0)
        self.G.fill(0.0)

        vel = x[xVEL:xVEL + 3]
        q_I_b = Quaternion(x[xATT:xATT + 4])

        omega = u[uG:uG + 3] - x[xB_G:xB_G + 3]
        acc = u[uA:uA + 3] - x[xB_A:xB_A + 3]
        acc_z = np.array([[0, 0, acc[2, 0]]]).T
        mu = x[xMU, 0]

        gravity_B = q_I_b.invrot(self.gravity)
        vel_I = q_I_b.invrot(vel)
        vel_xy = I_2x3.T.dot(I_2x3).dot(vel)

        # CALCULATE STATE DYNAMICS
        self.dx[dxPOS:dxPOS + 3] = vel_I
        if self.use_drag_term:
            self.dx[dxVEL:dxVEL + 3] = acc_z + gravity_B - mu * vel_xy
        else:
            self.dx[dxVEL:dxVEL + 3] = acc + gravity_B
        self.dx[dxATT:dxATT + 3] = omega

        ###################################
        # STATE JACOBIAN
        self.A[dxPOS:dxPOS + 3, dxVEL:dxVEL + 3] = q_I_b.R
        self.A[dxPOS:dxPOS + 3, dxATT:dxATT + 3] = skew(vel_I)
        if self.use_drag_term:
            self.A[dxVEL:dxVEL + 3, dxVEL:dxVEL + 3] = -mu * I_2x3.T.dot(I_2x3)
            self.A[dxVEL:dxVEL + 3,
                   dxB_A:dxB_A + 3] = -self.khat.dot(self.khat.T)
            self.A[dxVEL:dxVEL + 3, dxMU, None] = -vel_xy
        else:
            self.A[dxVEL:dxVEL + 3, dxB_A:dxB_A + 3] = -I_3x3
        self.A[dxVEL:dxVEL + 3, dxATT:dxATT + 3] = skew(gravity_B)
        self.A[dxATT:dxATT + 3, dxB_G:dxB_G + 3] = -I_3x3

        #################################
        ## INPUT JACOBIAN
        if self.use_drag_term:
            self.G[dxVEL:dxVEL + 3, uA:uA + 3] = self.khat.dot(self.khat.T)
        else:
            self.G[dxVEL:dxVEL + 3, uA:uA + 3] = I_3x3
        self.G[dxATT:dxATT + 3, uG:uG + 3] = I_3x3

        # Camera Dynamics
        omega_c_i = self.q_b_c.invrot(omega)
        vel_c_i = self.q_b_c.invrot(vel - skew(omega).dot(self.p_b_c))

        for i in range(self.len_features):
            xZETA_i = xZ + i * 5
            xRHO_i = xZ + 5 * i + 4
            dxZETA_i = dxZ + i * 3
            dxRHO_i = dxZ + i * 3 + 2

            q_zeta = Quaternion(x[xZETA_i:xZETA_i + 4, :])
            rho = x[xRHO_i, 0]
            zeta = q_zeta.rot(self.khat)
            T_z = T_zeta(q_zeta)
            skew_zeta = skew(zeta)
            skew_vel_c = skew(vel_c_i)
            skew_p_b_c = skew(self.p_b_c)
            R_b_c = self.q_b_c.R
            rho2 = rho * rho

            #################################
            ## FEATURE DYNAMICS
            self.dx[dxZETA_i:dxZETA_i +
                    2, :] = -T_z.T.dot(omega_c_i +
                                       rho * skew_zeta.dot(vel_c_i))
            self.dx[dxRHO_i, :] = rho2 * zeta.T.dot(vel_c_i)

            #################################
            ## FEATURE STATE JACOBIAN
            self.A[dxZETA_i:dxZETA_i + 2,
                   dxVEL:dxVEL + 3] = -rho * T_z.T.dot(skew_zeta).dot(R_b_c)
            self.A[dxZETA_i:dxZETA_i + 2, dxB_G:dxB_G +
                   3] = T_z.T.dot(rho * skew_zeta.dot(R_b_c).dot(skew_p_b_c) +
                                  R_b_c)
            self.A[dxZETA_i:dxZETA_i + 2, dxZETA_i:dxZETA_i + 2] = -T_z.T.dot(
                skew(rho * skew_zeta.dot(vel_c_i) + omega_c_i) +
                (rho * skew_vel_c.dot(skew_zeta))).dot(T_z)
            self.A[dxZETA_i:dxZETA_i + 2, dxRHO_i,
                   None] = -T_z.T.dot(skew_zeta).dot(vel_c_i)
            self.A[dxRHO_i, dxVEL:dxVEL + 3] = rho2 * zeta.T.dot(R_b_c)
            self.A[dxRHO_i,
                   dxB_G:dxB_G + 3] = -rho2 * zeta.T.dot(R_b_c).dot(skew_p_b_c)
            self.A[dxRHO_i, dxZETA_i:dxZETA_i +
                   2] = -rho2 * vel_c_i.T.dot(skew_zeta).dot(T_z)
            self.A[dxRHO_i, dxRHO_i] = 2 * rho * zeta.T.dot(vel_c_i).squeeze()

            #################################
            ## FEATURE INPUT JACOBIAN
            self.G[dxZETA_i:dxZETA_i + 2, uG:uG +
                   3] = -T_z.T.dot(R_b_c +
                                   rho * skew_zeta.dot(R_b_c).dot(skew_p_b_c))
            self.G[dxRHO_i, uG:] = rho2 * zeta.T.dot(R_b_c).dot(skew_p_b_c)

        return self.dx, self.A, self.G

    # Accelerometer model
    # Returns estimated measurement (2 x 1) and Jacobian (2 x 16+3N)
    def h_acc(self, x):
        # assert x.shape==(xZ+5*self.len_features,1)

        vel = x[xVEL:xVEL + 3]
        b_a = x[xB_A:xB_A + 3]
        mu = x[xMU, 0]

        h = I_2x3.dot(-mu * vel + b_a)

        dhdx = np.zeros((2, dxZ + 3 * self.len_features))
        dhdx[:, dxVEL:dxVEL + 3] = -mu * I_2x3
        dhdx[:, dxB_A:dxB_A + 3] = I_2x3
        dhdx[:, dxMU, None] = -I_2x3.dot(vel)

        return h, dhdx

    # Altimeter model
    # Returns estimated measurement (1x1) and Jacobian (1 x 16+3N)
    def h_alt(self, x):
        # assert x.shape == (xZ + 5 * self.len_features, 1)

        h = -x[xPOS + 2, :, None]

        dhdx = np.zeros((1, dxZ + 3 * self.len_features))
        dhdx[0, dxPOS + 2] = -1.0

        return h, dhdx

    # Attitude Model
    # Returns the estimated attitude measurement (4x1) and Jacobian (3 x 16+3N)
    def h_att(self, x, **kwargs):
        # assert x.shape == (xZ + 5 * self.len_features, 1)

        h = x[xATT:xATT + 4]

        dhdx = np.zeros((3, dxZ + 3 * self.len_features))
        dhdx[:, dxATT:dxATT + 3] = I_3x3

        return h, dhdx

    # Position Model
    # Returns the estimated Position measurement (3x1) and Jacobian (3 x 16+3N)
    def h_pos(self, x):
        # assert x.shape == (xZ + 5 * self.len_features, 1)

        h = x[xPOS:xPOS + 3]

        dhdx = np.zeros((3, dxZ + 3 * self.len_features))
        dhdx[:, dxPOS:dxPOS + 3] = I_3x3

        return h, dhdx

    # Velocity Model
    # Returns the estimated Position measurement (3x1) and Jacobian (3 x 16+3N)
    def h_vel(self, x):
        # assert x.shape == (xZ + 5 * self.len_features, 1)

        h = x[xVEL:xVEL + 3]

        dhdx = np.zeros((3, dxZ + 3 * self.len_features))
        dhdx[:, dxVEL:dxVEL + 3] = I_3x3

        return h, dhdx

    # qzeta model for feature index i
    # Returns estimated qzeta (4x1) and Jacobian (3 x 16+3N)
    def h_qzeta(self, x, **kwargs):
        # assert x.shape == (xZ + 5 * self.len_features, 1) and isinstance(i, int)
        i = self.global_to_local_feature_id[kwargs['i']]
        dxZETA_i = dxZ + i * 3
        q_c_z = x[xZ + i * 5:xZ + i * 5 + 4]

        h = q_c_z

        dhdx = np.zeros((2, dxZ + 3 * self.len_features))
        dhdx[:, dxZETA_i:dxZETA_i + 2] = I_2x2

        return h, dhdx

    # Feature model for feature index i
    # Returns estimated pixel measurement (2x1) and Jacobian (2 x 16+3N)
    def h_feat(self, x, **kwargs):
        # assert x.shape == (xZ + 5 * self.len_features, 1) and isinstance(i, int)
        i = self.global_to_local_feature_id[kwargs['i']]
        dxZETA_i = dxZ + i * 3
        q_zeta = Quaternion(x[xZ + i * 5:xZ + i * 5 + 4])

        zeta = q_zeta.rot(self.khat)
        sk_zeta = skew(zeta)
        ezT_zeta = (self.khat.T.dot(zeta)).squeeze()
        T_z = T_zeta(q_zeta)

        h = self.cam_F.dot(zeta) / ezT_zeta + self.cam_center

        dhdx = np.zeros((2, dxZ + 3 * self.len_features))
        dhdx[:, dxZETA_i:dxZETA_i +
             2] = -self.cam_F.dot((sk_zeta.dot(T_z)) / ezT_zeta -
                                  zeta.dot(self.khat.T).dot(sk_zeta).dot(T_z) /
                                  (ezT_zeta * ezT_zeta))

        return h, dhdx

    # Feature depth measurement
    # Returns estimated measurement (1x1) and Jacobian (1 x 16+3N)
    def h_depth(self, x, i):
        # assert x.shape == (xZ + 5 * self.len_features, 1) and isinstance(i, int) and i in self.feature_ids
        local_id = self.global_to_local_feature_id[i]
        rho = x[xZ + local_id * 5 + 4, 0]

        h = np.array([[1.0 / rho]])

        dhdx = np.zeros((1, dxZ + 3 * self.len_features))
        dhdx[0, dxZ + 3 * local_id + 2, None] = -1 / (rho * rho)

        return h, dhdx

    # Feature inverse depth measurement
    # Returns estimated measurement (1x1) and Jacobian (1 x 16+3N)
    def h_inv_depth(self, x, i):
        # assert x.shape == (xZ + 5 * self.len_features, 1) and isinstance(i, int)
        h = x[xZ + i * 5 + 4, None]

        dhdx = np.zeros((1, dxZ + 3 * self.len_features))
        dhdx[0, dxZ + 3 * i + 2] = 1

        return h, dhdx

    # Feature pixel velocity measurement
    # Returns estimated measurement (2x1) and Jacobian (2 x 16+3N)
    def h_pixel_vel(self, x, i, u):
        # assert x.shape == (xZ + 5 * self.len_features, 1) and isinstance(i, int) and u.shape == (6, 1)

        vel = x[xVEL:xVEL + 3]
        omega = u[uG:uG + 3] - x[xB_G:xB_G + 3]

        # Camera Dynamics
        vel_c_i = self.q_b_c.invrot(vel + skew(omega).dot(self.p_b_c))
        omega_c_i = self.q_b_c.invrot(omega)

        q_c_z = Quaternion(x[xZ + i * 5:xZ + i * 5 + 4])
        rho = x[xZ + i * 5 + 4]
        zeta = q_c_z.rot(self.khat)

        sk_vel = skew(vel_c_i)
        sk_ez = skew(self.khat)
        sk_zeta = skew(zeta)
        R_b_c = self.q_b_c.R

        # TODO: Need to convert to camera dynamics

        h = -self.focal_len * I_2x3.dot(sk_ez).dot(rho *
                                                   (sk_zeta.dot(vel_c_i)) +
                                                   omega_c_i)

        ZETA_i = dxZ + 3 * i
        RHO_i = dxZ + 3 * i + 2
        dhdx = np.zeros((2, dxZ + 3 * self.len_features))
        dhdx[:, dxVEL:dxVEL +
             3] = -self.focal_len * rho * I_2x3.dot(sk_ez).dot(sk_zeta)
        dhdx[:, ZETA_i:ZETA_i + 2] = -self.focal_len * rho * I_2x3.dot(
            sk_ez).dot(sk_vel).dot(sk_zeta).dot(T_zeta(q_c_z))
        dhdx[:, RHO_i, None] = -self.focal_len * I_2x3.dot(sk_ez).dot(
            sk_zeta).dot(vel_c_i)
        dhdx[:, dxB_G:dxB_G + 3] = self.focal_len * I_2x3.dot(sk_ez).dot(
            R_b_c - rho * sk_zeta.dot(R_b_c).dot(skew(self.p_b_c)))
        # dhdx[:, dxB_G:dxB_G + 3] = self.focal_len * I_2x3.dot(sk_ez).dot(I_zz)

        return h, dhdx