示例#1
0
    def _build_training_set_index(self, radius=2):
        self.training_set_index = []
        self.poses = {}
        self.calib = {}

        for drive in self.sequences:

            trajectory = self._read_oxts_data(drive)
            proj_c2p, proj_v2c, imu2cam = self._read_raw_calib_data(drive)

            for i in range(len(trajectory)):
                trajectory[i] = np.dot(imu2cam, util.inv_SE3(trajectory[i]))
                trajectory[i][0:3, 3] *= self.args['scale']

            self.poses[drive] = trajectory
            self.calib[drive] = (proj_c2p, proj_v2c, imu2cam)

            for i in range(len(trajectory)):
                seq = []
                for j in range(i-radius, i+radius+1):
                    j = min(max(0, j), len(trajectory)-1)
                    frame = {
                        'image': self._fetch_image_path(drive, j),
                        'velo': self._fetch_velo_path(drive, j),
                        'pose': self.poses[drive][j],
                        'drive': drive,
                    }
                    seq.append(frame)
                self.training_set_index.append(seq)
示例#2
0
    def test_drive(self, drive, radius=2):

        self.poses = {}
        self.calib = {}
        trajectory = self._read_oxts_data(drive)
        proj_c2p, proj_v2c, imu2cam = self._read_raw_calib_data(drive)

        for i in range(len(trajectory)):
            trajectory[i] = np.dot(imu2cam, util.inv_SE3(trajectory[i]))
            trajectory[i][0:3, 3] *= self.args['scale']

        self.poses[drive] = trajectory
        self.calib[drive] = (proj_c2p, proj_v2c, imu2cam)

        for i in range(len(trajectory)):
            seq = []
            for j in range(i-radius, i+radius+1):
                j = min(max(0, j), len(trajectory)-1)
                frame = {
                    'image': self._fetch_image_path(drive, j),
                    'velo': self._fetch_velo_path(drive, j),
                    'pose': self.poses[drive][j],
                    'drive': drive,
                }
                seq.append(frame)

            yield self._load_example(seq)
示例#3
0
    def test_set_iterator(self, radius=2):
        test_list = []
        with open('data/kitti/test_files_eigen.txt') as f:
            reader = csv.reader(f)
            for row in reader:
                test_list.append(row[0])

        self.poses = {}
        self.calib = {}
        for test_frame in test_list:
            comps = test_frame.split('/')
            drive = comps[1].replace('_sync', '')
            frame = int(comps[4].replace('.png', ''))

            if drive not in self.poses:
                trajectory = self._read_oxts_data(drive)
                proj_c2p, proj_v2c, imu2cam = self._read_raw_calib_data(drive)

                for i in range(len(trajectory)):
                    trajectory[i] = np.dot(imu2cam, util.inv_SE3(trajectory[i]))
                    trajectory[i][0:3, 3] *= self.args['scale']

                self.poses[drive] = trajectory
                self.calib[drive] = (proj_c2p, proj_v2c, imu2cam)

            else:
                trajectory = self.poses[drive]

            seq = []
            for j in range(frame-radius, frame+radius+1):
                j = min(max(0, j), len(trajectory)-1)
                frame = {
                    'image': self._fetch_image_path(drive, j),
                    'velo': self._fetch_velo_path(drive, j),
                    'pose': self.poses[drive][j],
                    'drive': drive,
                }
                seq.append(frame)

            data_blob = self._load_example(seq)
            yield data_blob
示例#4
0
    def _build_sequence_set_index(self, tskip=0.6):
        self.sequence_set_index = []
        for sceneb in self._collect_train_scenes(mode='train'):
            seach_str = os.path.join(self.dataset_path, '%s*' % sceneb)
            for scene in glob.glob(seach_str):
                scene = os.path.basename(scene)
                try:
                    timestamps, pairs, poses = self._load_trajectory_data(
                        scene)
                except:
                    continue

                tstamps = np.array(timestamps, dtype="float64")
                tcur = tstamps[0] - 2 * tskip

                for i in range(0, len(timestamps)):
                    if tstamps[i] - tcur > tskip:
                        tcur = tstamps[i]
                        vid = self._make_video(i,
                                               tstamps,
                                               radius=self.args.get('radius'))

                        sequence = []
                        for j in range(len(vid)):
                            t = timestamps[vid[j]]
                            frame = {
                                'image':
                                os.path.join(self.dataset_path, scene,
                                             pairs[t][0]),
                                'depth':
                                os.path.join(self.dataset_path, scene,
                                             pairs[t][1]),
                                'pose':
                                util.inv_SE3(poses[t]),
                            }
                            sequence.append(frame)

                        self.sequence_set_index.append(sequence)