Exemple #1
0
 def get_kalman_error(self, use_groundtruth=True):
     """ Return error of the Kalman in filter in the map frame 
         use_groundtruth: if False compare Kalman filter performance with radar images GPS
     """
     if hasattr(self.reader, "groundtruth") and use_groundtruth:
         error_pos = np.array([
             self.kalman.mapdata.attitude.apply(
                 self.get_position(ts) -
                 self.reader.get_groundtruth_pos(ts))
             for ts in self.get_timestamps(0, np.inf)
         ])
         error_att = np.array([
             rotation_proj(self.reader.get_groundtruth_att(ts),
                           self.get_attitude(ts)).as_euler('zxy')[0]
             for ts in self.get_timestamps(0, np.inf)
         ])
     else:
         error_pos = np.array([
             self.kalman.mapdata.attitude.apply(
                 self.get_position(ts) - self.reader.get_gps_pos(ts))
             for ts in self.get_timestamps(0, np.inf)
         ])
         error_att = np.array([
             rotation_proj(self.reader.get_gps_att(ts),
                           self.get_attitude(ts)).as_euler('zxy')[0]
             for ts in self.get_timestamps(0, np.inf)
         ])
     return error_pos, error_att
Exemple #2
0
    def plot_attitude(self,
                      cv2=False,
                      cv2_corrected=False,
                      projection="ENU",
                      car_position=None):
        """ Plot the orientation in the map frame given by the GPS and after fusion 
            cv2: if True plot CV2 measurements integrated alone
            cv2_corrected: if True plot CV2 measurements with bias and altitude correction
            projection: projection in given 2D plane
            car_position: calculate position of the car if True, if False use position of top-left corner, if not given choose the most appropriate
        """
        (pos0, att0), _ = get_origin(self, projection, car_position)

        plt.figure()
        reader = define_reader(self)
        q = rot.from_dcm([[0, -1, 0], [-1, 0, 0], [0, 0, -1]])
        if hasattr(reader, "groundtruth"):
            plt.plot(reader.get_timestamps(),
                     np.unwrap([
                         rotation_proj(att0, q * r).as_euler('zxy')[0]
                         for r in reader.get_groundtruth_att()
                     ]),
                     'black',
                     label="Groundtruth")
        plt.plot(reader.get_timestamps(),
                 np.unwrap([
                     rotation_proj(att0, q * r).as_euler('zxy')[0]
                     for r in reader.get_gps_att()
                 ]),
                 'green',
                 label="GPS")
        if cv2:
            plt.plot(reader.get_timestamps(),
                     np.unwrap([
                         rotation_proj(att0, q * r).as_euler('zxy')[0]
                         for r in self.get_measured_attitudes()
                     ]),
                     'red',
                     label="CV2")
        if cv2_corrected:
            plt.plot(reader.get_timestamps(),
                     np.unwrap([
                         rotation_proj(att0, q * r).as_euler('zxy')[0]
                         for r in self.get_measured_attitudes(cv2_corrected)
                     ]),
                     'r--',
                     label="CV2 corrected")
        if not is_reader(self) and len(self.get_attitudes()) != 0:
            plt.plot(self.get_timestamps(0, np.inf),
                     np.unwrap([
                         rotation_proj(att0, q * r).as_euler('zxy')[0]
                         for r in self.get_attitudes()
                     ]),
                     'cornflowerblue',
                     label="Output")

        plt.legend()
        plt.title("Yaw")
        plt.xlabel("Times (s)")
        plt.ylabel("Yaw (rad)")
Exemple #3
0
    def plot_attitude(self, gps_only=False, cv2_corrected=False):
        """ Plot the orientation in the map frame given by the GPS and after fusion 
            gps_only: if True plot only GPS data from dataset
            cv2_corrected: if True add a line with bias correction on CV2 measurements
        """
        plt.figure()
        reader = define_reader(self)
        pos0, att0 = get_plot_origin(self)
        q = rot.from_dcm([[0, -1, 0], [-1, 0, 0], [0, 0, -1]])

        if hasattr(reader, "groundtruth"):
            plt.plot(reader.get_timestamps(),
                     np.unwrap([
                         rotation_proj(att0, q * r).as_euler('zxy')[0]
                         for r in reader.get_groundtruth_att()
                     ]),
                     'black',
                     label="Groundtruth")
        plt.plot(reader.get_timestamps(),
                 np.unwrap([
                     rotation_proj(att0, q * r).as_euler('zxy')[0]
                     for r in reader.get_gps_att()
                 ]),
                 'green',
                 label="GPS")
        if not gps_only:
            plt.plot(reader.get_timestamps(),
                     np.unwrap([
                         rotation_proj(att0, q * r).as_euler('zxy')[0]
                         for r in self.get_measured_attitudes()
                     ]),
                     'red',
                     label="CV2")
        if cv2_corrected and not gps_only:
            plt.plot(reader.get_timestamps(),
                     np.unwrap([
                         rotation_proj(att0, q * r).as_euler('zxy')[0]
                         for r in self.get_measured_attitudes(cv2_corrected)
                     ]),
                     'r--',
                     label="CV2 corrected")
        if hasattr(self, "get_attitude") and len(self.get_attitude()) != 0:
            plt.plot(self.get_timestamps(0, np.inf),
                     np.unwrap([
                         rotation_proj(att0, q * r).as_euler('zxy')[0]
                         for r in self.get_attitude()
                     ]),
                     'cornflowerblue',
                     label="Output")

        plt.legend()
        plt.title("Yaw")
        plt.xlabel("Times (s)")
        plt.ylabel("Yaw (rad)")
Exemple #4
0
    def extract_from_map(self, gps_pos, attitude, shape, scale=1):
        """ Return an image from the map for a given position and attitude and with a given shape """
        data_temp = RadarData(
            0,
            np.ones((int(np.ceil(shape[0] / scale)),
                     int(np.ceil(shape[1] / scale)))), gps_pos, attitude,
            self.precision)
        img1, cov_img1, new_origin = self.build_partial_map(data_temp)

        P_start = self.attitude.apply(new_origin -
                                      gps_pos)[0:2] / self.precision
        R = rotation_proj(self.attitude, attitude).inv().as_dcm()[:2, :2]
        M2 = np.concatenate((R, R.dot(np.array([[P_start[0]], [P_start[1]]]))),
                            axis=1)

        M2 = scale * np.eye(2).dot(M2)
        img2 = cv2.warpAffine(img1,
                              M2, (shape[1], shape[0]),
                              flags=cv2.INTER_LINEAR,
                              borderValue=0)
        cov_img2 = cv2.warpAffine(cov_img1,
                                  M2, (shape[1], shape[0]),
                                  flags=cv2.INTER_LINEAR,
                                  borderValue=0)
        return img2, cov_img2
    def update(self, new_data):
        """ Update the state X thanks to GPS information """
        pos2D = self.mapdata.attitude.apply(new_data.gps_pos -
                                            self.mapdata.gps_pos)[0:2]
        att2D = rotation_proj(self.mapdata.attitude,
                              new_data.attitude).as_euler("zxy")[0]
        Y = np.append(pos2D, att2D)
        Yhat = np.append(self.pos2D, self.att2D)

        if self.bias_estimation:
            H = np.block([np.eye(3), np.zeros((3, 3))])
        else:
            H = np.eye(3)
        S = H.dot(self.P).dot(H.T) + self.R
        K = self.P.dot(H.T).dot(np.linalg.inv(S))
        #Z = stat_test(Y, Yhat, S, 0.99)*(Y - Yhat)
        Z = (Y - Yhat)
        e = K.dot(Z)

        self.pos2D = self.pos2D + e[0:2]
        self.att2D = self.att2D + e[2]
        if self.bias_estimation:
            self.bias = self.bias + e[3:6]

        self.P = (np.eye(len(self.P)) - K.dot(H)).dot(self.P)
        self.innovation = (Z, S)
    def predict(self, new_data):
        """ Based on image transformation measurement, predict new state X """
        trans, rotation = new_data.image_transformation_from(self.last_data)

        R = np.array([[np.cos(self.att2D), -np.sin(self.att2D)],
                      [np.sin(self.att2D),
                       np.cos(self.att2D)]])
        if not np.any(np.isnan(trans)):
            self.pos2D = self.pos2D + R.dot(trans[0:2] - self.bias[0:2])
            self.att2D = self.att2D + rotation.as_euler(
                'zxy')[0] - self.bias[2]
        else:
            self.pos2D = self.mapdata.attitude.apply(new_data.gps_pos -
                                                     self.mapdata.gps_pos)[0:2]
            self.att2D = rotation_proj(self.mapdata.attitude,
                                       new_data.attitude).as_euler("zxy")[0]

        F = np.array([[
            1, 0, -np.sin(self.att2D) * (trans[0] - self.bias[0]) -
            np.cos(self.att2D) * (trans[1] - self.bias[1])
        ],
                      [
                          0, 1,
                          np.cos(self.att2D) * (trans[0] - self.bias[0]) -
                          np.sin(self.att2D) * (trans[1] - self.bias[1])
                      ], [0, 0, 1]])
        M = np.block([[R, np.zeros((2, 1))], [np.zeros(2), 1]])
        if self.bias_estimation:
            F = np.block(
                [[F, np.block([[-R, np.zeros((2, 1))], [np.zeros(2), -1]])],
                 [np.zeros((3, 3)), np.eye(3)]])
            M = np.block([[M], [np.zeros((3, 3))]])
            self.P = F.dot(self.P).dot(F.T) + M.dot(self.Q).dot(M.T)
        else:
            self.P = F.dot(self.P).dot(F.T) + M.dot(self.Q).dot(M.T)
    def predict_image(self, gps_pos, attitude, shape=None):
        """ Give the prediction of an observation in a different position based on actual radar image """
        exp_rot_matrix = rotation_proj(attitude,
                                       self.attitude).as_dcm()[:2, :2]
        exp_trans = attitude.apply(gps_pos -
                                   self.gps_pos)[0:2] / self.precision

        if shape is None:
            shape = (np.shape(self.img)[1], np.shape(self.img)[0])
        else:
            shape = (shape[1], shape[0])

        warp_matrix = np.concatenate(
            (exp_rot_matrix, np.array([[-exp_trans[0]], [-exp_trans[1]]])),
            axis=1)
        predict_img = cv2.warpAffine(self.img,
                                     warp_matrix,
                                     shape,
                                     flags=cv2.INTER_LINEAR,
                                     borderValue=0)

        mask = cv2.warpAffine(np.ones(np.shape(self.img)),
                              warp_matrix,
                              shape,
                              flags=cv2.INTER_LINEAR,
                              borderValue=0)
        diff = mask - np.ones((shape[1], shape[0]))
        diff[diff != -1] = 0
        diff[diff == -1] = np.nan
        prediction = diff + predict_img

        return prediction
Exemple #8
0
 def get_kalman_error(self, t_ini=None, t_final=None, use_groundtruth=True):
     """ Return error of the Kalman in filter in the map frame 
         use_groundtruth: if False compare Kalman filter performance with radar images GPS
     """
     times = self.get_timestamps(t_ini, t_final)
     if not t_ini is None and t_final is None:
         if hasattr(self.reader, "groundtruth") and use_groundtruth:
             error_pos = self.kalman.mapdata.attitude.apply(
                 self.get_positions(times) -
                 self.reader.get_groundtruth_pos(times))[0:2]
             error_att = rotation_proj(
                 self.reader.get_groundtruth_att(times),
                 self.get_attitudes(times)).as_euler('zxy')[0]
         else:
             error_pos = self.kalman.mapdata.attitude.apply(
                 self.get_positions(times) -
                 self.reader.get_gps_pos(times))[0:2]
             error_att = rotation_proj(
                 self.reader.get_gps_att(times),
                 self.get_attitudes(times)).as_euler('zxy')[0]
     else:
         if hasattr(self.reader, "groundtruth") and use_groundtruth:
             error_pos = np.array([
                 self.kalman.mapdata.attitude.apply(
                     self.get_positions(ts) -
                     self.reader.get_groundtruth_pos(ts))[0:2]
                 for ts in times
             ])
             error_att = np.array([
                 rotation_proj(self.reader.get_groundtruth_att(ts),
                               self.get_attitudes(ts)).as_euler('zxy')[0]
                 for ts in times
             ])
         else:
             error_pos = np.array([
                 self.kalman.mapdata.attitude.apply(
                     self.get_positions(ts) -
                     self.reader.get_gps_pos(ts))[0:2] for ts in times
             ])
             error_att = np.array([
                 rotation_proj(self.reader.get_gps_att(ts),
                               self.get_attitudes(ts)).as_euler('zxy')[0]
                 for ts in times
             ])
     return error_pos, error_att
Exemple #9
0
    def predict(self, new_data):
        """ Based on GPS pos of the new data, predict new state X """
        pos2D = self.mapdata.attitude.apply(new_data.gps_pos -
                                            self.mapdata.gps_pos)[0:2]
        att2D = rotation_proj(self.mapdata.attitude,
                              new_data.attitude).as_euler('zxy')[0]

        F1 = np.array([[
            1, 0, -self.trans[0] * np.sin(self.prev_att2D) -
            self.trans[1] * np.cos(self.prev_att2D),
            np.cos(self.prev_att2D), -np.sin(self.prev_att2D), 0
        ],
                       [
                           0, 1, self.trans[0] * np.cos(self.prev_att2D) -
                           self.trans[1] * np.sin(self.prev_att2D),
                           np.sin(self.prev_att2D),
                           np.cos(self.prev_att2D), 0
                       ], [0, 0, 1, 0, 0, 1]])
        #F2 = np.array([[0, 0, -(pos2D-self.prev_pos2D)[0]*np.sin(self.prev_att2D+self.rot)+(pos2D-self.prev_pos2D)[0]*np.cos(self.prev_att2D+self.rot), 0, 0, -(pos2D-self.prev_pos2D)[0]*np.sin(self.prev_att2D+self.rot)+(pos2D-self.prev_pos2D)[0]*np.cos(self.prev_att2D+self.rot)],
        #               [0, 0, -(pos2D-self.prev_pos2D)[1]*np.cos(self.prev_att2D+self.rot)-(pos2D-self.prev_pos2D)[1]*np.sin(self.prev_att2D+self.rot), 0, 0, -(pos2D-self.prev_pos2D)[1]*np.cos(self.prev_att2D+self.rot)-(pos2D-self.prev_pos2D)[1]*np.sin(self.prev_att2D+self.rot)],
        #               [0, 0, -1, 0, 0, 0]])
        F2 = np.array([[
            -np.cos(self.prev_att2D + self.rot),
            -np.sin(self.prev_att2D + self.rot),
            -(pos2D - self.prev_pos2D)[0] * np.sin(self.prev_att2D + self.rot)
            +
            (pos2D - self.prev_pos2D)[0] * np.cos(self.prev_att2D + self.rot),
            -np.cos(self.rot), -np.sin(self.rot),
            -(pos2D - self.prev_pos2D)[0] * np.sin(self.prev_att2D + self.rot)
            +
            (pos2D - self.prev_pos2D)[0] * np.cos(self.prev_att2D + self.rot) +
            self.trans[0] * np.sin(self.rot) - self.trans[1] * np.cos(self.rot)
        ],
                       [
                           np.sin(self.prev_att2D + self.rot),
                           -np.cos(self.prev_att2D + self.rot),
                           -(pos2D - self.prev_pos2D)[1] *
                           np.cos(self.prev_att2D + self.rot) -
                           (pos2D - self.prev_pos2D)[1] *
                           np.sin(self.prev_att2D + self.rot),
                           np.sin(self.rot), -np.cos(self.rot),
                           -(pos2D - self.prev_pos2D)[1] *
                           np.cos(self.prev_att2D + self.rot) -
                           (pos2D - self.prev_pos2D)[1] *
                           np.sin(self.prev_att2D + self.rot) +
                           self.trans[0] * np.cos(self.rot) +
                           self.trans[1] * np.sin(self.rot)
                       ], [0, 0, -1, 0, 0, -1]])
        F = np.block([[F1], [F2 - F1]])

        self.prev_pos2D = self.prev_pos2D + R(-self.prev_att2D).dot(self.trans)
        self.prev_att2D = self.prev_att2D + self.rot

        self.trans = R(self.prev_att2D).dot(pos2D - self.prev_pos2D)
        self.rot = att2D - self.prev_att2D

        self.P = F.dot(self.P).dot(F.T) + self.Q
 def get_gps_measurements(self):
     """ Return GPS measurements between two consecutive images """
     if self.gps_rot is None or self.gps_trans is None:
         print("Retreiving GPS measurements... ")
         times = self.get_timestamps(0, np.inf)
         self.gps_trans = np.zeros((len(times)-1,2))
         self.gps_rot = np.zeros(len(times)-1)           
         for i in range(1,len(times)):           
             self.gps_rot[i-1] = rotation_proj(self.get_gps_att(times[i-1]), self.get_gps_att(times[i])).as_euler('zxy')[0]
             self.gps_trans[i-1] = self.heatmaps[times[i-1]].earth2rbd(self.get_gps_pos(times[i]) - self.get_gps_pos(times[i-1]))[0:2]
     return self.gps_trans, self.gps_rot
Exemple #11
0
 def image_position_from(self, otherdata):
     """ Return the actual position and attitude based on radar images comparison """
     translation, rotation = self.image_transformation_from(otherdata)
     
     if not np.any(np.isnan(translation)):     
         gps_pos = otherdata.gps_pos + otherdata.earth2rbd(translation,True)
         attitude = rotation.inv()*otherdata.attitude
     else:
         print("No cv2 measurement, use GPS instead")
         trans = otherdata.earth2rbd(self.gps_pos-otherdata.gps_pos)
         trans[2] = 0
         gps_pos = otherdata.gps_pos + otherdata.earth2rbd(trans, True)
         attitude = rotation_proj(otherdata.attitude, self.attitude).inv()*otherdata.attitude
     return gps_pos, attitude
Exemple #12
0
    def add_data(self, otherdata):
        """ Fusionning a new radardata with part of the map """
        if self.gps_pos is None:
            self.init_map(otherdata)

        img1, cov_img1, new_origin = self.build_partial_map(otherdata)
        shape = np.shape(img1)

        v2 = self.attitude.apply(otherdata.gps_pos -
                                 new_origin)[0:2] / self.precision
        M2 = np.concatenate(
            (rotation_proj(self.attitude, otherdata.attitude).as_dcm()[:2, :2],
             np.array([[v2[0]], [v2[1]]])),
            axis=1)
        img2 = cv2.warpAffine(otherdata.img,
                              M2, (shape[1], shape[0]),
                              flags=cv2.INTER_LINEAR,
                              borderValue=0)
        mask = cv2.warpAffine(np.ones(np.shape(otherdata.img)),
                              M2, (shape[1], shape[0]),
                              flags=cv2.INTER_LINEAR,
                              borderValue=0)
        diff = mask - np.ones(shape)
        diff[diff != 0] = np.nan
        img2 = diff + img2

        cov_img2 = cv2.warpAffine(self.img_cov *
                                  np.ones(np.shape(otherdata.img)),
                                  M2, (shape[1], shape[0]),
                                  flags=cv2.INTER_LINEAR,
                                  borderValue=0)
        cov_img2 = diff + cov_img2

        img, cov_img = merge_img(img1, img2, cov_img1, cov_img2)
        self.update_map(img, cov_img, new_origin)
        return img1, img2, v2
Exemple #13
0
    def build_partial_map(self, otherdata):
        """ Build partial map of the chunks that are needed to contain the new data """
        hdf5 = h5py.File('maps/' + self.map_name + '.h5', 'a')
        try:
            map_hdf5 = hdf5["map"]
            cov_map_hdf5 = hdf5["covariance"]

            q = rotation_proj(self.attitude, otherdata.attitude)
            P5 = self.attitude.apply(otherdata.gps_pos - self.gps_pos)[0:2]
            P6 = P5 + q.apply(np.array([otherdata.width(), 0, 0]))[0:2]
            P7 = P5 + q.apply(
                np.array([otherdata.width(),
                          otherdata.height(), 0]))[0:2]
            P8 = P5 + q.apply(np.array([0, otherdata.height(), 0]))[0:2]

            P9 = np.array([
                min(P5[0], P6[0], P7[0], P8[0]),
                min(P5[1], P6[1], P7[1], P8[1])
            ])
            P10 = np.array([
                max(P5[0], P6[0], P7[0], P8[0]),
                max(P5[1], P6[1], P7[1], P8[1])
            ])

            chunk_1 = np.flip(
                np.floor(
                    (P9 / self.precision) / self.chunk_size).astype(np.int))
            chunk_2 = np.flip(
                np.floor(
                    (P10 / self.precision) / self.chunk_size).astype(np.int))

            img = np.nan * np.ones(
                (self.chunk_size *
                 (1 + chunk_2[0] - chunk_1[0]), self.chunk_size *
                 (1 + chunk_2[1] - chunk_1[1])))
            cov_img = np.nan * np.ones(
                (self.chunk_size *
                 (1 + chunk_2[0] - chunk_1[0]), self.chunk_size *
                 (1 + chunk_2[1] - chunk_1[1])))

            for i in range(chunk_1[0], chunk_2[0] + 1):
                for j in range(chunk_1[1], chunk_2[1] + 1):
                    if not str(i) + "/" + str(j) in map_hdf5:
                        map_hdf5.create_dataset(
                            str(i) + "/" + str(j),
                            data=np.nan * np.ones(
                                (self.chunk_size, self.chunk_size)),
                            shape=(self.chunk_size, self.chunk_size))
                        cov_map_hdf5.create_dataset(
                            str(i) + "/" + str(j),
                            data=np.nan * np.ones(
                                (self.chunk_size, self.chunk_size)),
                            shape=(self.chunk_size, self.chunk_size))
                    img[(i - chunk_1[0]) *
                        self.chunk_size:(i - chunk_1[0] + 1) * self.chunk_size,
                        (j - chunk_1[1]) *
                        self.chunk_size:(j - chunk_1[1] + 1) *
                        self.chunk_size] = map_hdf5[str(i) + "/" + str(j)]
                    cov_img[(i - chunk_1[0]) *
                            self.chunk_size:(i - chunk_1[0] + 1) *
                            self.chunk_size, (j - chunk_1[1]) *
                            self.chunk_size:(j - chunk_1[1] + 1) *
                            self.chunk_size] = cov_map_hdf5[str(i) + "/" +
                                                            str(j)]
            gps_pos = self.gps_pos + self.attitude.inv().apply(
                np.array([
                    chunk_1[1] * self.chunk_size * self.precision,
                    chunk_1[0] * self.chunk_size * self.precision, 0
                ]))
        finally:
            hdf5.close()
        return img, cov_img, gps_pos