예제 #1
0
def get_plot_origin(obj):
    reader = define_reader(obj)
    if hasattr(reader, "groundtruth"):
        pos0 = rbd_translate(reader.get_groundtruth_pos(0),
                             reader.get_groundtruth_att(0),
                             reader.tracklog_translation)
        coord0 = ecef2lla(pos0)
        att0 = ecef2enu(coord0[1], coord0[0])
    else:
        pos0 = rbd_translate(reader.get_gps_pos(0), reader.get_gps_att(0),
                             reader.tracklog_translation)
        coord0 = ecef2lla(pos0)
        att0 = ecef2enu(coord0[1], coord0[0])
    return pos0, att0
예제 #2
0
def add_altitude_line(obj, gps_pos, attitudes, label, color):
    """ Add a line in figure of altitude from a 2D plane"""
    reader = define_reader(obj)
    map_origin, map_orientation = get_plot_origin(obj)
    pos = rbd_translate(gps_pos, attitudes, reader.tracklog_translation)
    trajectory = map_orientation.apply(pos - map_origin)
    plt.plot(obj.get_timestamps(), trajectory[:, 2], color, label=label)
예제 #3
0
        def process_images(t):
            center = -self.kalman.mapdata.precision * np.array(
                [0.5 * shape[1], 0.5 * shape[0], 0])
            gps_pos = projection(
                self.kalman.mapdata.gps_pos, self.kalman.mapdata.attitude,
                rbd_translate(self.get_positions(t), self.get_attitudes(t),
                              self.reader.tracklog_translation))
            img, _ = self.kalman.mapdata.extract_from_map(
                gps_pos + self.kalman.mapdata.attitude.apply(center, True),
                self.kalman.mapdata.attitude, shape)

            data = deepcopy(self.reader.get_radardata(t))
            data.gps_pos, data.attitude = self.get_positions(
                t), self.get_attitudes(t)
            img_border = 255 * np.ones(np.shape(data.img))
            img_border[border:-border,
                       border:-border] = data.img[border:-border,
                                                  border:-border]
            data.img = img_border
            img_overlay = np.nan_to_num(
                data.predict_image(
                    gps_pos + self.kalman.mapdata.attitude.apply(center, True),
                    self.kalman.mapdata.attitude, shape))
            error = np.min(
                [np.linalg.norm(self.get_kalman_error(t)[0][0:2]), bar_scale])
            return increase_saturation(
                np.nan_to_num(img)), increase_saturation(img_overlay), error
예제 #4
0
def add_altitude_line(origin, gps_pos, attitudes, t, tracklog_translation,
                      label, color):
    """ Add a altitude line in current plot projected in the 2D plane described by the origin"""
    map_origin, map_orientation = origin
    gps_pos = rbd_translate(gps_pos, attitudes, tracklog_translation)
    trajectory = map_orientation.apply(gps_pos - map_origin)
    plt.plot(t, trajectory[:, 2], color, label=label)
예제 #5
0
def add_trajectory_line(obj, gps_pos, attitudes, label, color, arrow):
    """ Add a trajectory in an oriented 2D map """
    reader = define_reader(obj)
    map_origin, map_orientation = get_plot_origin(obj)
    pos = rbd_translate(gps_pos, attitudes, reader.tracklog_translation)
    trajectory = map_orientation.apply(pos - map_origin)
    plt.plot(trajectory[:, 0],
             trajectory[:, 1],
             color,
             label=label,
             picker=True)
    if arrow:
        arrows = np.array([
            map_orientation.apply(data.earth2rbd([0, -1, 0], True))
            for data in reader.get_radardata()
        ])
        for i in range(0, len(arrows), 5):
            plt.arrow(trajectory[i, 0], trajectory[i, 1], arrows[i, 0],
                      arrows[i, 1])
예제 #6
0
def add_trajectory_line(origin, gps_pos, attitudes, tracklog_translation,
                        label, color, arrow):
    """ Add a trajectory in current plot projected in the 2D plane described by the origin"""
    map_origin, map_orientation = origin
    gps_pos = rbd_translate(gps_pos, attitudes, tracklog_translation)
    trajectory = map_orientation.apply(gps_pos - map_origin)
    plt.plot(trajectory[:, 0],
             trajectory[:, 1],
             color,
             label=label,
             picker=True)
    if arrow:
        arrows = np.array([
            map_orientation.apply(att.apply([0, -1, 0], True))
            for att in attitudes
        ])
        for i in range(0, len(arrows), 5):
            plt.arrow(trajectory[i, 0], trajectory[i, 1], arrows[i, 0],
                      arrows[i, 1])
예제 #7
0
def get_origin(obj, projection, car_position):
    """ Return origin of frame according to given projection convention """
    if not (projection == "ENU" or projection == "Map"):
        raise Exception(
            "Only projection in 'Map' frame or 'ENU' frame are available")
    reader = define_reader(obj)
    if projection == "ENU":
        if car_position is None:
            car_position = True
        if is_reader(obj):
            if hasattr(reader, "groundtruth"):
                pos0 = rbd_translate(
                    reader.get_groundtruth_pos(0),
                    reader.get_groundtruth_att(0),
                    car_position * reader.tracklog_translation)
                coord0 = ecef2lla(pos0)
                att0 = ecef2enu(coord0[1], coord0[0])
            else:
                pos0 = rbd_translate(
                    reader.get_gps_pos(0), reader.get_gps_att(0),
                    car_position * reader.tracklog_translation)
                coord0 = ecef2lla(pos0)
                att0 = ecef2enu(coord0[1], coord0[0])
        else:
            pos0 = rbd_translate(obj.get_positions(0), obj.get_attitudes(0),
                                 car_position * reader.tracklog_translation)
            coord0 = ecef2lla(pos0)
            att0 = ecef2enu(coord0[1], coord0[0])
    elif projection == "Map":
        if car_position is None:
            car_position = False
        q = rot.from_dcm([[0, -1, 0], [-1, 0, 0], [0, 0, -1]])
        if is_reader(obj):
            if hasattr(reader, "groundtruth"):
                pos0 = rbd_translate(
                    reader.get_groundtruth_pos(0),
                    reader.get_groundtruth_att(0),
                    car_position * reader.tracklog_translation)
                att0 = q * reader.get_groundtruth_att(0)
            else:
                pos0 = rbd_translate(
                    reader.get_gps_pos(0), reader.get_gps_att(0),
                    car_position * reader.tracklog_translation)
                att0 = q * reader.get_gps_att(0)
        else:
            pos0 = rbd_translate(obj.kalman.mapdata.gps_pos,
                                 obj.kalman.mapdata.attitude,
                                 car_position * reader.tracklog_translation)
            att0 = q * obj.kalman.mapdata.attitude
    return (pos0, att0), car_position
예제 #8
0
        def update(t):
            center = -self.kalman.mapdata.precision * np.array(
                [0.5 * shape[1], 0.5 * shape[0], 0])
            gps_pos = projection(
                self.kalman.mapdata.gps_pos, self.kalman.mapdata.attitude,
                rbd_translate(self.get_position(t), self.get_attitude(t),
                              self.reader.tracklog_translation))
            img, _ = self.kalman.mapdata.extract_from_map(
                gps_pos + self.kalman.mapdata.attitude.apply(center, True),
                self.kalman.mapdata.attitude, shape)

            data = deepcopy(self.reader.get_radardata(t))
            data.gps_pos, data.gps_att = self.get_position(
                t), self.get_attitude(t)
            img_border = 255 * np.ones(np.shape(data.img))
            img_border[border:-border,
                       border:-border] = data.img[border:-border,
                                                  border:-border]
            data.img = img_border
            img_overlay = np.nan_to_num(
                data.predict_image(
                    gps_pos + self.kalman.mapdata.attitude.apply(center, True),
                    self.kalman.mapdata.attitude, shape))
            overlay_red = np.zeros(
                (np.shape(img_overlay)[0], np.shape(img_overlay)[1], 4))
            overlay_red[:, :, 0] = img_overlay
            overlay_red[:, :, 3] = (img_overlay != 0) * overlay_alpha * 255
            return [
                plt.imshow(increase_saturation(np.nan_to_num(img)),
                           cmap='gray',
                           vmin=0,
                           vmax=255,
                           zorder=1),
                plt.imshow(increase_saturation(overlay_red.astype(np.uint8)),
                           alpha=0.5,
                           zorder=2,
                           interpolation=None),
                plt.text(0.6, 0.5, str(round(t, 2)))
            ]
예제 #9
0
    def export_map(self, gps_only=False, cv2_corrected=False):
        """ Plot reference GPS on a Google map as well as measured position and filtered position
            corrected: if True apply bias correction to CV2 measurements
        """
        print("Exporting Google map...")
        reader = define_reader(self)
        if hasattr(reader, "groundtruth"):
            coords = ecef2lla(
                rbd_translate(reader.get_groundtruth_pos(),
                              reader.get_groundtruth_att(),
                              reader.tracklog_translation))
            gmap = gmplot.GoogleMapPlotter(np.rad2deg(np.mean(coords[:, 1])),
                                           np.rad2deg(np.mean(coords[:, 0])),
                                           15)
            gmap.plot(np.rad2deg(coords[:, 1]),
                      np.rad2deg(coords[:, 0]),
                      'black',
                      edge_width=2.5)

        coords = ecef2lla(
            rbd_translate(reader.get_gps_pos(), reader.get_gps_att(),
                          reader.tracklog_translation))
        if not hasattr(reader, "groundtruth"):
            gmap = gmplot.GoogleMapPlotter(np.mean(coords[:, 1]),
                                           np.mean(coords[:, 0]), 15)
        gmap.plot(np.rad2deg(coords[:, 1]),
                  np.rad2deg(coords[:, 0]),
                  'green',
                  edge_width=2.5)

        if not gps_only:
            coords = ecef2lla(
                rbd_translate(self.get_measured_positions(),
                              self.get_measured_attitudes(),
                              reader.tracklog_translation))
            gmap.plot(np.rad2deg(coords[:, 1]),
                      np.rad2deg(coords[:, 0]),
                      'red',
                      edge_width=2.5)

        if not gps_only and cv2_corrected:
            coords = ecef2lla(
                rbd_translate(
                    self.get_measured_positions(corrected=cv2_corrected),
                    self.get_measured_attitudes(corrected=cv2_corrected),
                    reader.tracklog_translation))
            gmap.plot(np.rad2deg(coords[:, 1]),
                      np.rad2deg(coords[:, 0]),
                      'red',
                      edge_width=2.5)

        if hasattr(self, "get_position") and len(self.get_position()) != 0:
            coords = ecef2lla(
                rbd_translate(self.get_position(), self.get_attitude(),
                              self.reader.tracklog_translation))
            gmap.plot(np.rad2deg(coords[:, 1]),
                      np.rad2deg(coords[:, 0]),
                      'cornflowerblue',
                      edge_width=2.5)

        gmap.apikey = "AIzaSyB0UlIEiFl6IFtzz2_1WaDyYsXjscLVRWU"
        gmap.draw("map.html")
 def load_heatmaps(self, t_ini=0, t_final=np.inf):
     """ Function load radar data magnitude from HDF5 between t_ini and t_final"""
     hdf5 = h5py.File(self.src,'r+')
     
     cv2_transformations = SqliteDict('cv2_transformations.db', autocommit=True)
     cv2_transformations['use_dataset'] = self.src
     cv2_transformations.close()
     
     try:
         aperture = hdf5['radar']['broad01']['aperture2D']
     except:
         raise Exception("The images should be in radar/broad01/aperture2D directory")
     if not(('preprocessed' in aperture.attrs) and aperture.attrs['preprocessed']):
         hdf5.close()
         raise Exception("The dataset should be preprocessed before with Preprocessor")
         
     try:            
         times = list(aperture.keys())
         
         # Importing radar images from dataset
         t0 = float(times[0])       
         times = [times[i] for i in range(len(times)) if float(times[i])-t0>=t_ini and float(times[i])-t0<=t_final]
         prev_perc = -1
         for i, t in enumerate(times):
             if np.floor(i/(len(times)-1)*10) != prev_perc:
                 print("Loading data: "+str(np.floor(i/(len(times)-1)*10)*10)+"%")
                 prev_perc = np.floor(i/(len(times)-1)*10)
             heatmap = aperture[t][...];       
             if not np.sum(heatmap) == 0:
                 gps_pos = np.array(list(aperture[t].attrs['POSITION'][0]))
                 att = np.array(list(aperture[t].attrs['ATTITUDE'][0]))
                 self.heatmaps[float(t)-t0] = RadarData(float(t), np.array(Image.fromarray(heatmap, 'L')), gps_pos, rot.from_quat(att))
         self.tracklog_translation = -aperture.attrs['tracklog_translation']
         
         # Importing groundtruth GPS information if available
         aperture_gt = hdf5['radar']['broad01']
         groundtruth = ("groundtruth" in aperture_gt)
         if groundtruth:
             print("Loading groundtruth")
             aperture_gt = hdf5['radar']['broad01']['groundtruth']
             self.groundtruth = dict()
             times_gt = list(aperture_gt.keys())
             times_gt = [times_gt[i] for i in range(len(times_gt)) if (t_ini <= float(times_gt[i])-t0 < t_final) or (i+1<len(times_gt) and t_ini <= float(times_gt[i+1])-t0 < t_final) or (i-1>=0 and t_ini <= float(times_gt[i-1])-t0 < t_final)]
             gt_att, gt_pos, gt_time = [], [], []
             for i, t in enumerate(times_gt):
                 gt_att.append(rot.from_quat(np.array(list(aperture_gt[t].attrs['ATTITUDE'][0]))))
                 gt_pos.append(np.array(list(aperture_gt[t].attrs['POSITION'][0])))             
                 gt_time.append(float(t)-t0)
             gt_pos = rbd_translate(gt_pos, gt_att, self.tracklog_translation - (-aperture_gt.attrs['tracklog_translation']))
             
             # Interpolating groundtruth positions to make them match with radar images positions
             if times_gt[0]> times[0] or times_gt[-1]<times[-1]:
                 for t in times:
                     if times_gt[0] > t or times_gt[-1] < t:
                         self.heatmaps.pop(float(t)-t0)
             times = list(self.heatmaps.keys())
             slerp = Slerp(gt_time, rot.from_quat([r.as_quat() for r in gt_att]))
             gt_att = [rot.from_quat(q) for q in slerp(times).as_quat()]
             gt_pos = np.array([np.interp(times, gt_time, np.array(gt_pos)[:,i]) for i in range(0,3)]).T
             for i in range(len(times)):
                 self.groundtruth[times[i]] = {'POSITION': gt_pos[i], 'ATTITUDE': gt_att[i]}
         else:
             print("No groundtruth data found")
         
         hdf5.close()
         print("Data loaded")
     except:  
         hdf5.close()
         raise Exception("A problem occured when importing data")           
    build_map, bias_estimation=bias_estimation)  # Creating Kalman filter

recorder = Recorder(reader, kalman)  # Creating recorder

for ts, radardata in reader:
    data = deepcopy(radardata)
    if use_groundtruth:
        data.gps_pos = reader.get_groundtruth_pos(ts)
        data.attitude = reader.get_groundtruth_att(ts)

    pos, att = kalman.add(data, use_fusion)  # add a new image to the map

    recorder.record(ts)  # save Kalman output
    if kalman.mapping and display_map:
        kalman.mapdata.show(
            rbd_translate(pos, att, reader.tracklog_translation)
        )  # update the displayed map during mapping

# Extracting map after fusion
if kalman.mapping:
    m = kalman.mapdata

# Plots
#recorder.export_map(gps_only = not use_fusion)
recorder.plot_trajectory(arrow=False, cv2=use_fusion)
recorder.plot_attitude(cv2=use_fusion)
recorder.plot_altitude(cv2=use_fusion)
if use_fusion:
    recorder.plot_innovation()
    recorder.plot_kalman_evaluation(use_groundtruth)
예제 #12
0
use_groundtruth = True                  # use groundtruth GPS value of SBG if true (because of problems in VN)


# Loading data   
reader = Reader(dataset_name, start_time, end_time)

kalman = Kalman_Localizer(mapping, map_to_use)          # Creating Kalman filter for mapping
# Initialize the first position and attitude
kalman.set_initial_position(reader.get_groundtruth_pos(0), reader.get_groundtruth_att(0))

recorder = Recorder(reader, kalman)                     # Creating recorder 

for ts, radardata in reader:
    data = deepcopy(radardata)
    if use_groundtruth:
        data.gps_pos = reader.get_groundtruth_pos(ts)
        data.attitude = reader.get_groundtruth_att(ts)
    pos, att = kalman.localize(data)         # localize image (only radardata.img is used)
    
    recorder.record(ts)                      # save Kalman output
    if display_map:
        data = deepcopy(radardata)
        data.gps_pos, data.attitude = pos, att
        kalman.mapdata.show(rbd_translate(pos, att, reader.tracklog_translation), data)             # see the map during localization

# Plots
recorder.export_map()
recorder.plot_attitude()
recorder.plot_trajectory(False)
recorder.plot_kalman_evaluation()