def gen_weights_and_poses(sequence, fast_threshold=150):
    dataset = pykitti.odometry(basedir_odom, sequence, imformat='cv2')
    print("Dataset loaded")
    T_calib = dataset.calib.T_cam0_velo
    T_calib = np.linalg.inv(T_calib)
    x = []
    y = []
    z = []
    w = []
    camera_normal = np.zeros((0, 3))
    # print("Dateset lengths: ", sum(1 for _ in dataset.cam2), sum(1 for _ in dataset.poses))
    count = 0
    for T in dataset.poses:
        T = T_calib.dot(T)
        x.append(T[0, 3])
        y.append(T[1, 3])
        z.append(T[2, 3])
        camera_normal = np.vstack((camera_normal, T[0:3, 2].T))

    # Didn't like the zip, had to do them separately
    for img in dataset.cam2:
        # find the keypoints with ORB
        orb = cv2.ORB_create(nfeatures=10000, fastThreshold=fast_threshold)
        kp = orb.detect(img, None)
        w.append(len(kp))
        # count += 1
        # if count % 100 == 0:
        #     print("Count: ", count)
    x = np.array(x)
    y = np.array(y)
    z = np.array(z)
    yaw = np.arctan2(camera_normal[:, 1], camera_normal[:, 0])
    return x, y, z, yaw, w
Ejemplo n.º 2
0
    def __init__(self, config: dict):

        # Set Ground Truth and Velocity
        if config['name'] == 'kitti_raw':
            self.resize_factor = config['resize_factor']
            self.dataset = pykitti.raw(config['basedir'], config['date'],
                                       config['drive'])
            self.image_getter_left = iter(self.dataset.cam0)
            self.velocity_getter = ([
                oxt.packet.vl, oxt.packet.vu, oxt.packet.vf
            ] for oxt in self.dataset.oxts)
            self.true_pose_getter = ([
                oxt.T_w_imu[0][3], oxt.T_w_imu[1][3], oxt.T_w_imu[2][3]
            ] for oxt in self.dataset.oxts)
            self.k = self.dataset.k = self.resize_factor * self.dataset.calib.K_cam0
            self.k[2][2] = 1
            self.inv_k = np.linalg.inv(self.k)
            self.focal = self.k[0][0]
            self.pp = (self.k[0][2], self.k[1][2])

        if config['name'] == 'kitti_odometry':
            self.resize_factor = config['resize_factor']
            self.dataset = pykitti.odometry(config['basedir'],
                                            config['sequence'])
            self.image_getter_left = iter(self.dataset.cam0)
            self.image_getter_right = iter(self.dataset.cam1)
            self.true_pose_getter = ([pose[0][3], pose[1][3], pose[2][3]]
                                     for pose in self.dataset.poses)
            self.k = self.dataset.k = self.resize_factor * self.dataset.calib.K_cam0
            self.k[2][2] = 1
            self.inv_k = np.linalg.inv(self.k)
            self.focal = self.k[0][0]
            self.pp = (self.k[0][2], self.k[1][2])
    def __init__(self, path2odometry, sequence):

        if isinstance(sequence, int):
            sequence = str(sequence).zfill(2)

        self.datainfo = pykitti.odometry(path2odometry, sequence)
        self.br = tf.TransformBroadcaster()
Ejemplo n.º 4
0
    def __init__(self, cfg, base_dir, seq, frames=None):

        if seq == "03" or seq not in self.raw_path.keys():
            raise ValueError("%s sequence not supported" % seq)

        self.cfg = cfg
        self.imu_time_sync_shift = 1  # we discovered IMU seems to be delayed, so we shifted it forward

        if frames:
            range_start = self.raw_range[seq][0] + frames.start
            range_stop = range_start + frames.stop
        else:
            range_start = self.raw_range[seq][0]
            range_stop = self.raw_range[seq][1] + 1

        self.data_raw_kitti = pykitti.raw(base_dir, self.raw_path[seq][0], self.raw_path[seq][1],
                                          frames=range(range_start, range_stop))
        self.data_imu_kitti = pykitti.raw(base_dir, self.raw_path[seq][0], self.raw_path[seq][1],
                                          frames=range(range_start + self.imu_time_sync_shift,
                                                       range_stop + self.imu_time_sync_shift))
        self.data_odom_kitti = pykitti.odometry(base_dir, seq, frames=frames)
        self.data_lidar_image = LidarDataLoader(self.cfg, base_dir, seq, frames=frames)

        assert (len(self.data_raw_kitti.oxts) == len(self.data_odom_kitti.poses) and
                len(self.data_imu_kitti.oxts) == len(self.data_odom_kitti.poses) and
                len(self.data_odom_kitti.poses) == self.data_lidar_image.num_frames)

        self.imu_measurements = []
        self.imu_measurements_reversed = []

        self.__load_imu()
Ejemplo n.º 5
0
 def __init__(self,
              sequence,
              root='E:\\share_folder\\dataset',
              transform=None,
              pre_transform=None):
     super().__init__(root, transform, pre_transform)
     self.dataset = pykitti.odometry(root, sequence, frames=None)
Ejemplo n.º 6
0
 def _create_drive_loader(self, drive_path):
     drive = op.basename(drive_path)
     pose_file = op.join(self.base_path, "poses", drive + ".txt")
     self.poses = np.loadtxt(pose_file)
     print(
         f"[_create_drive_loader] drive: {drive}, pose avail: {self.pose_avail}, depth avail: {self.depth_avail}"
     )
     return pykitti.odometry(self.base_path, drive)
    def loadImageDataset(self):
        '''Load the data. Optionally, specify the frame range to load'''

        # PyKITTI object for
        self.image_dataset = pykitti.odometry(self.raw_path,
                                              self.sequence_id,
                                              frames=range(0, 20, 2))
        print("Length of dataset: %s" % str(len(self.image_dataset)))
def main(sequence, results_dir, kitti_data_dir, start_end_increment=None):
    poses_name = results_dir+"/{}.txt".format(sequence)

    if start_end_increment not is None:  # Get range of data.
        r_start = start_end_increment[0]
        r_end = start_end_increment[1]
        r_incr = start_end_increment[2]
        plot_range = range(r_start, r_end, r_incr)
        data = pykitti.odometry(kitti_data_dir, sequence, frames=plot_range)
Ejemplo n.º 9
0
def draw_gt(seq):
    x = []
    y = []
    odom = pykitti.odometry(KITTI_PATH, seq)
    for i in range(len(odom)):
        t = se3_to_position(odom.poses[i])
        x.append(t[0])
        y.append(t[2])
    plt.plot(x, y, color="g", label="ground truth")
Ejemplo n.º 10
0
def test(model,
         dataloader,
         device,
         epoch,
         draw_seq=None,
         path=KITTI_PATH,
         test_seq=TEST_SEQ):
    model.eval()
    answer = [[0.0] * 6]
    gt = []
    odom = pykitti.odometry(path, test_seq[0])
    traj = []
    cur_R = np.eye(3)
    cur_t = np.zeros((3, 1))
    for i in range(len(odom)):
        gt.append(se3_to_position(odom.poses[i]))
    for i, batch in enumerate(dataloader):
        seq, _, _ = batch
        seq = seq.to(device)
        predicted = model(seq)
        predicted = predicted.data.cpu().numpy()
        if i == 0:
            for pose in predicted[0]:
                for i in range(len(pose)):
                    pose[i] = pose[i] + answer[-1][i]
                answer.append(pose.tolist())
            traj.append(
                np.concatenate((cur_R.copy(), cur_t.copy()), axis=1).flatten())
            predicted = predicted[1:]
        for poses in predicted:
            ang = eulerAnglesToRotationMatrix([0, answer[-1][0], 0])

            location = ang.dot(poses[-1][3:])
            poses[-1][3:] = location[:]

            last_pose = poses[-1]
            ang_full = eulerAnglesToRotationMatrix(
                [last_pose[1], last_pose[0], last_pose[2]])
            cur_R = ang_full @ cur_R
            for j in range(len(last_pose)):
                last_pose[j] = last_pose[j] + answer[-1][j]
            cur_t = np.array(last_pose[3:]).reshape((3, 1))
            last_pose[0] = (last_pose[0] + np.pi) % (2 * np.pi) - np.pi

            answer.append(last_pose.tolist())
            traj.append(
                np.concatenate((cur_R.copy(), cur_t.copy()), axis=1).flatten())

    if draw_seq:
        np.savetxt(f"{RESULT_FOLDER}/{draw_seq}_{epoch}_pred.txt",
                   traj,
                   fmt="%1.8f")
    else:
        np.savetxt(f"{RESULT_FOLDER}/{DRAW_SEQ}_{epoch}_pred.txt",
                   traj,
                   fmt="%1.8f")
Ejemplo n.º 11
0
 def __init__(self, path: str, sequence: str) -> None:
     """
     Args:
         path: Path to KITTI Odometry dataset
         sequence: sequence number
     """
     super().__init__(path, path)
     self.sequence = sequence
     self._kitti = pykitti.odometry(path, sequence)
     self.camera_params = self.getCameraParameters()
Ejemplo n.º 12
0
    def init_session(self, session_ind):
        try:
            requested_sequence_path = self.get_session_path_raw(session_ind)
            if self.dataset.sequence_path == requested_sequence_path:
                return
        except:
            pass

        sequence = "%02d" % session_ind
        self.dataset = pykitti.odometry(self.dataset_directory(), sequence)
Ejemplo n.º 13
0
 def __init__(self,
              frames,
              sequence="08",
              directory="datasets",
              download=False):
     sequence = Downloader(sequence, directory)
     if download and not os.path.exists(os.path.join(directory, "poses")):
         print("Download datasets")
         sequence.download_sequence()
     self._kitty_dataset = pykitti.odometry(sequence.main_dir,
                                            sequence.sequence_id,
                                            frames=frames)
Ejemplo n.º 14
0
 def sequence(self, i):
     if 0 <= i < 11:
         sequence = pykitti.odometry(self.ododir, '{:02d}'.format(i))
         sequence.poses = numpy.array(sequence.poses)
     else:
         drive = kittidrives.drives[i - 11]
         sequence = pykitti.raw(self.rawdir, drive['date'], drive['drive'])
         T_imu_cam0 = util.invert_ht(sequence.calib.T_cam0_imu)
         sequence.poses = numpy.array(
             [numpy.matmul(oxts.T_w_imu, T_imu_cam0) \
                 for oxts in sequence.oxts])
     return sequence
Ejemplo n.º 15
0
def get_kitti_odometry(dataset, sequence):
    data = pykitti.odometry(dataset, sequence)
    n_scans = len(data)
    T_cam_velo = data.calib.T_cam0_velo
    T_velo_cam = np.linalg.inv(T_cam_velo)

    gt_poses = T_velo_cam @ data.poses @ T_cam_velo
    timestamps = [dt.total_seconds() for dt in data.timestamps]
    # Hack: Add 1 nano sec in the first timestamp, ros python has bug in the
    # implementation, this doesn't happen in the C++ one
    timestamps[0] += 1e-9

    return n_scans, gt_poses, timestamps, data.velo
Ejemplo n.º 16
0
def load_poses():
    directoryL = "/home/sexy/Documents/dataset/"
    list_Y = []
    for i in range(11):
        data = pykitti.odometry(directoryL, str(i).zfill(2))
        data.load_poses()
        Y = np.zeros((len(data.T_w_cam0), 3))
        for j in range(len(data.T_w_cam0)):
            Y[j, :] = data.T_w_cam0[j][:-1, -1]
        delta_Y = np.zeros((len(data.T_w_cam0), 3))
        delta_Y[1:, :] = Y[1:, :] - Y[:-1, :]
        list_Y.append(delta_Y)
    return list_Y
Ejemplo n.º 17
0
def get_datasets(args):

    seq_velo_files = []
    seq_poses = []
    for seq in args.sequences:
        data = pykitti.odometry(args.base_dir, seq, frames=None)
        seq_velo_files.extend(data.velo_files)
        seq_poses.extend(
            [data.poses[pose_i] for pose_i in range(data.poses.shape[0])])
    orig_dset = ptlk.data.datasets.KITTIVelo(seq_velo_files, seq_poses,
                                             args.num_points, args.partition)
    trainset, testset = orig_dset.split()
    return trainset, testset
Ejemplo n.º 18
0
 def __init__(
     self,
     seq,
     original=False,
     path=KITTI_PATH,
     transform=kittiTransform,
     left=True,
     stereo=False,
 ):
     self.odom = pykitti.odometry(path, seq)
     self.transform = transform
     self.left = left
     self.original = original
Ejemplo n.º 19
0
 def __init__(
     self,
     seq,
     path=KITTI_PATH,
     transform=kittiTransform,
     normalizer=normalizer,
     left=True,
     stereo=False,
 ):
     self.odom = pykitti.odometry(path, seq)
     self.transform = transform
     self.left = left
     self.normalizer = normalizer
Ejemplo n.º 20
0
    def __init__(self, cfg, base_dir, seq, frames=None):
        pickles_dir = config.lidar_pickles_path
        seq_data = pykitti.odometry(base_dir, seq)
        self.num_frames = len(seq_data.poses)
        self.data = np.zeros([
            self.num_frames, cfg.input_channels, cfg.input_height,
            cfg.input_width
        ],
                             dtype=np.float16)

        with (open(os.path.join(pickles_dir, seq + "_range.pik"),
                   "rb")) as opfile:
            i = 0
            while True:
                try:
                    cur_image = pickle.load(opfile)
                    self.data[i, 0, :, :] = cur_image
                except EOFError:
                    break
                i += 1
                if i % 1000 == 0:
                    tools.printf("Loading lidar range seq. %s %.1f%% " %
                                 (seq, (i / self.num_frames) * 100))
            assert (i == self.num_frames)

        with (open(os.path.join(pickles_dir, seq + "_intensity.pik"),
                   "rb")) as opfile:
            i = 0
            while True:
                try:
                    cur_image = pickle.load(opfile)
                    self.data[i, 1, :, :] = cur_image
                    self.data[i, 1, :, :] = np.divide(self.data[i, 1, :, :],
                                                      255.0,
                                                      dtype=np.float16)
                    self.data[i, 1, :, :] = np.subtract(self.data[i, 1, :, :],
                                                        0.5,
                                                        dtype=np.float16)
                except EOFError:
                    break
                i += 1
                if i % 1000 == 0:
                    tools.printf("Loading lidar intensity seq. %s %.1f%% " %
                                 (seq, (i / self.num_frames) * 100))
            assert (i == self.num_frames)

        # select the range of frames
        if frames:
            self.data = self.data[frames]
            self.num_frames = self.data.shape[0]
Ejemplo n.º 21
0
    def __init__(self, N, std_w, std_v, gamma, bias_w_std):
        self.N = N
        self.dt = 1/20
        self.max_vis = 60.0
        self.std_w = std_w
        self.std_v = std_v
        self.bias_w_std = bias_w_std
        self.gamma = gamma
        # fraction of particles to project onto road
        self.alpha = 0.0
        # probability of detection of a point
        self.rho = 0.6
        # P0 is a design parameter specifying the probability that a visible map point is occluded
        self.P0 = 0.2
# design PMF pr of detection d under not occluded and the map detections D
# pr_d = vector of probabilities for each class find via validation class mismatches? detector property?

        self.ROOT_DIR = os.path.abspath('')
        self.results_path = os.path.join(self.ROOT_DIR, "results/_results.txt")
        self.instances_path = os.path.join(self.ROOT_DIR, "content/kitti_dataset/dataset/sequences/08/instances_2")
        self.CNN_PMF_PATH = os.path.join(self.ROOT_DIR, 'results/PMFs')
        self.CLASS_MARGINAL = os.path.join(self.ROOT_DIR, 'detection_probabilities')
        self.class_marginal = np.load(self.CLASS_MARGINAL + '.npy')
        self.basedir = 'content/kitti_dataset/dataset'
        self.sequence = '08'
        self.CNN_PMF_PATH = os.path.join(self.ROOT_DIR, 'results/PMFs')
        self.cnn_pmf = np.load(self.CNN_PMF_PATH + '.npy') # one row is the pmf of the cnn detecting the class, when the gt corresponding to the row is present
        self.dataset = pykitti.odometry(self.basedir, self.sequence)
        self.Kmat = self.dataset.calib.P_rect_20
        self.T_w0_w = np.array([[0., 0., 1., 0.],
                           [-1., 0., 0., 0.],
                           [0., -1., 0., 0.],
                           [0., 0., 0., 1.]])
        self.T_cam0_cam2 = np.linalg.inv(self.dataset.calib.T_cam0_velo).dot(self.dataset.calib.T_cam2_velo)
        with open("results.json") as json_file:
            all_data = json.load(json_file)

            # sorting alldata for image_id
        all_data = all_data['results']
        self.all_data_sort = []
        for x in sorted(all_data, key=itemgetter('image_id')):
            self.all_data_sort.append(x)

        self.rescale_factor = 5
        self.prob_mask = np.zeros((int(370 / self.rescale_factor), int(1226 / self.rescale_factor), 17))
        self.previous_image = None

        print(self.class_marginal)
Ejemplo n.º 22
0
def convert_odom(sequence,
                 color,
                 input_dir='.',
                 output_dir='.',
                 compression=rosbag.Compression.NONE):
    """Convert an odometry KITTI dataset to a bag"""
    sequence_str = str(sequence).zfill(2)
    kitti = pykitti.odometry(input_dir, sequence_str)

    if len(kitti.timestamps) == 0:
        print('Dataset is empty? Exiting.', file=sys.stderr)
        sys.exit(1)

    # The odom timestamps are relative, add an arbitrary datetime to make them absolute
    base_timestamp = datetime(2011, 9, 26, 12, 0, 0)
    timestamps = [base_timestamp + timestamp for timestamp in kitti.timestamps]

    if 0 <= sequence < 10:
        print(
            "Odometry dataset sequence {} has ground truth information (poses)."
            .format(sequence_str))

    if color == 'color':
        camera_nrs = (2, 3)
    elif color == 'gray':
        camera_nrs = (0, 1)
    else:
        camera_nrs = list(range(4))

    # Export
    bag_name = "kitti_data_odometry_{}_sequence_{}.bag".format(
        color, sequence_str)
    with rosbag.Bag(os.path.join(output_dir, bag_name),
                    'w',
                    compression=compression) as bag:
        save_dynamic_tf(bag, timestamps, kitti.poses,
                        rectified_camera_frame_id)
        for camera_nr in camera_nrs:
            save_camera_data(bag, kitti, cameras[camera_nr], timestamps)
        print("## OVERVIEW ##")
        print(bag)
Ejemplo n.º 23
0
    def buildSE3(self, sourceImageId, destImageId):
        image_range = range(sourceImageId, destImageId, 1)
        dataset = pykitti.odometry(self.vo_path, self.sequence, frames=image_range)

        SE3_i = next(itertools.islice(dataset.poses, sourceImageId, None))
        SE3_j = next(itertools.islice(dataset.poses, destImageId, None))

        R_i_trans = np.transpose(SE3_i[0:3,0:3])
        R_ij = np.matmul(SE3_j[0:3,0:3],R_i_trans)

        t_i_c = np.matmul(R_i_trans,SE3_i[0:3,3])
        t_j_c = np.matmul(R_i_trans,SE3_j[0:3,3])
        t_ij = t_j_c - t_i_c

        # t_ij = np.matmul(SE3_i[0:3,0:3],t_ij) # Account for rotation of the camera
        # t_ij[2] *= -1
        se3 = np.identity(4)
        se3[0:3,0:3] = R_ij
        se3[0:3,3] = t_ij

        return se3
    def __init__(self, config, base_dir, seq, frames=None):
        pickles_dir = os.path.join(base_dir, "sequences", "lidar_pickles")
        seq_data = pykitti.odometry(base_dir, seq)
        num_frames = len(seq_data.poses)
        self.data = np.zeros(
            [num_frames, 2, config.input_height, config.input_width],
            dtype=np.float16)

        with (open(os.path.join(pickles_dir, seq + "_range.pik"),
                   "rb")) as opfile:
            i = 0
            while True:
                try:
                    cur_image = pickle.load(opfile)
                    self.data[i, 0, :, :] = cur_image
                except EOFError:
                    break
                i += 1
            assert (i == num_frames)

        with (open(os.path.join(pickles_dir, seq + "_intensity.pik"),
                   "rb")) as opfile:
            i = 0
            while True:
                try:
                    cur_image = pickle.load(opfile)
                    self.data[i, 1, :, :] = cur_image
                    self.data[i, 1, :, :] = np.divide(self.data[i, 1, :, :],
                                                      254.0,
                                                      dtype=np.float16)
                    self.data[i, 1, :, :] = np.subtract(self.data[i, 1, :, :],
                                                        0.5,
                                                        dtype=np.float16)
                except EOFError:
                    break
                i += 1
            assert (i == num_frames)

        if frames:
            self.data = self.data[frames]
 def __init__(self, config):
     path_to_dataset = config["path_to_dataset"]
     sequence = config["seq"]
     self.path_to_map_folder = config["path_to_map_folder"] + "/" + sequence
     weight_paths = config["weight_paths"]
     # setup kitti manager
     self.kitti = pykitti.odometry(path_to_dataset, sequence)
     # load the downsampled map
     start_map_num =config["start_frame_num"]//300
     self.map, self.map_intensity = self.load_map(self.path_to_map_folder+"/{}.pcd".format(start_map_num))
     # load the transformations
     velo2cam2 = torch.from_numpy(self.kitti.calib.T_cam2_velo).float().to("cuda")
     self.velo2cam = velo2cam2
     self.cam2velo = self.velo2cam.inverse()
     # load the models into a list
     self.image_shape = (384, 1280)
     self.models = self.load_models(weight_paths)
     # initialize a camera model used to project lidar point cloud
     self.cam_params = self.get_calib_kitti(sequence).cuda()
     self.camera_model = CameraModel(
         focal_length=self.cam_params[:2], principal_point=self.cam_params[2:]
     )
def plot_odometry_trajectory(sequence, downsample=10):
    dataset = pykitti.odometry(basedir_odom, sequence)
    # pose = next(iter(itertools.islice(dataset.poses, 1, None)))
    # print(dataset.calib.T_cam0_velo)
    T_calib = dataset.calib.T_cam2_velo
    T_calib = np.linalg.inv(T_calib)
    x = []
    y = []
    z = []
    # yaw = []
    camera_normal = np.zeros((0, 3))
    for T in dataset.poses:
        T = T_calib.dot(T)
        x.append(T[0, 3])
        y.append(T[1, 3])
        z.append(T[2, 3])
        # Yaw not useful here (it's the camera's orientation)
        # yaw.append(np.arctan2(-T[0,1], T[0,0]))
        camera_normal = np.vstack((camera_normal, T[0:3, 2].T))
    x = np.array(x)
    y = np.array(y)
    z = np.array(z)
    # yaw = np.array(yaw)
    yaw = np.arctan2(camera_normal[:, 1], camera_normal[:, 0])
    plot_traj(x, y, yaw, downsample)
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    ax.plot(x, y, z)
    inds = range(0, len(x), downsample)
    # ax.quiver(x[inds], y[inds], z[inds], cos(yaw[inds]), sin(yaw[inds]), 0*yaw[inds])
    ax.quiver(x[inds],
              y[inds],
              z[inds],
              camera_normal[inds, 0],
              camera_normal[inds, 1],
              camera_normal[inds, 2],
              color='r')
    plt.axis('equal')
    plt.show()
Ejemplo n.º 27
0
def main(dataset, out_dir, sequence, use_intensity):
    """Utility script to convert from the binary form found in the KITTI
    odometry dataset to .ply files. The intensity value for each measurement is
    encoded in the color channel of the output PointCloud.

    If a given sequence it's specified then it assumes you have a clean copy of
    the KITTI odometry benchmark, because it uses pykitti. If you only have a
    folder with just .bin files the script will most likely fail.

    If no sequence is specified then it blindly reads all the *.bin file in the
    specified dataset directory
    """
    print(
        "Converting .bin scans into .ply fromat from:{orig} to:{dest}".format(
            orig=dataset, dest=out_dir))

    os.makedirs(out_dir, exist_ok=True)
    if sequence:
        # Means KITTI like format
        base_path = os.path.join(out_dir, "sequences", sequence, "velodyne",
                                 "")
        os.makedirs(base_path, exist_ok=True)
        data = pykitti.odometry(dataset, sequence)
        velo_files = data.velo_files
        scans = data.velo
    else:
        # Read all the *.bin scans from the dataset folder
        base_path = os.path.join(out_dir, "")
        os.makedirs(base_path, exist_ok=True)
        velo_files = sorted(glob.glob(os.path.join(dataset, "*.bin")))
        scans = yield_velo_scans(velo_files)

    for points, scan_name in tqdm(zip(scans, velo_files),
                                  total=len(velo_files)):
        pcd = vel2ply(points, use_intensity)
        stem = os.path.splitext(scan_name.split("/")[-1])[0]
        filename = base_path + stem + ".ply"
        o3d.io.write_point_cloud(filename, pcd)
Ejemplo n.º 28
0
def main(dataset, out_dir, sequence, n_scans, start, visualize, normals):
    """From a set of input PointClouds create one unique aggreated map cloud."""
    o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Info)
    dataset = os.path.join(dataset, "")

    dataset_name = Path(dataset).parent.name

    data = pykitti.odometry(dataset, sequence)
    T_cam_velo = data.calib.T_cam0_velo
    T_velo_cam = np.linalg.inv(T_cam_velo)

    gt_poses = T_velo_cam @ data.poses @ T_cam_velo

    scans = os.path.join(dataset, "sequences", sequence, "velodyne", "")
    scan_names = sorted(glob.glob(scans + "*.ply"))

    # Use the whole sequence if -1 is specified
    n_scans = len(scan_names) if n_scans == -1 else n_scans
    print("Processing " + str(n_scans) + " in " + scans)
    cloud_map = o3d.geometry.PointCloud()
    for idx in tqdm(range(start, start + n_scans)):
        source = preprocess_cloud(
            o3d.io.read_point_cloud(scan_names[idx]),
            normals=normals,
        )

        source.transform(gt_poses[idx])
        cloud_map += source

    map_name = ("gt_" + str(n_scans) + "_scans_" + sequence + "_" +
                dataset_name + ".ply")
    out_file = os.path.join(out_dir, map_name)
    print("saving map to " + out_file)
    o3d.io.write_point_cloud(out_file, cloud_map)

    if visualize:
        o3d.visualization.draw_geometries([cloud_map])
Ejemplo n.º 29
0
import pykitti

__author__ = "Lee Clement"
__email__ = "*****@*****.**"

# Change this to the directory where you store KITTI data
basedir = '/Users/leeclement/Desktop/KITTI/odometry/dataset'

# Specify the dataset to load
sequence = '01'

# Load the data. Optionally, specify the frame range to load.
# Passing imformat='cv2' will convert images to uint8 and BGR for
# easy use with OpenCV.
# dataset = pykitti.odometry(basedir, sequence)
dataset = pykitti.odometry(basedir, sequence, frames=range(0, 20, 5))

# dataset.calib:      Calibration data are accessible as a named tuple
# dataset.timestamps: Timestamps are parsed into a list of timedelta objects
# dataset.poses:      Generator to load ground truth poses T_w_cam0
# dataset.camN:       Generator to load individual images from camera N
# dataset.gray:       Generator to load monochrome stereo pairs (cam0, cam1)
# dataset.rgb:        Generator to load RGB stereo pairs (cam2, cam3)
# dataset.velo:       Generator to load velodyne scans as [x,y,z,reflectance]

# Grab some data
second_pose = next(itertools.islice(dataset.poses, 1, None))
first_gray = next(dataset.gray)
first_cam1 = next(dataset.cam1)
first_rgb = next(dataset.rgb)
first_cam2 = next(dataset.cam2)
Ejemplo n.º 30
0
def run_kitti2bag():
    parser = argparse.ArgumentParser(
        description="Convert KITTI dataset to ROS bag file the easy way!")
    # Accepted argument values
    kitti_types = ["raw_synced", "odom_color", "odom_gray"]
    odometry_sequences = []
    for s in range(22):
        odometry_sequences.append(str(s).zfill(2))

    parser.add_argument("kitti_type",
                        choices=kitti_types,
                        help="KITTI dataset type")
    parser.add_argument(
        "dir",
        nargs="?",
        default=os.getcwd(),
        help=
        "base directory of the dataset, if no directory passed the deafult is current working directory"
    )
    parser.add_argument(
        "-t",
        "--date",
        help=
        "date of the raw dataset (i.e. 2011_09_26), option is only for RAW datasets."
    )
    parser.add_argument(
        "-r",
        "--drive",
        help=
        "drive number of the raw dataset (i.e. 0001), option is only for RAW datasets."
    )
    parser.add_argument(
        "-s",
        "--sequence",
        choices=odometry_sequences,
        help=
        "sequence of the odometry dataset (between 00 - 21), option is only for ODOMETRY datasets."
    )
    args = parser.parse_args()

    bridge = CvBridge()
    compression = rosbag.Compression.NONE
    # compression = rosbag.Compression.BZ2
    # compression = rosbag.Compression.LZ4

    # CAMERAS
    cameras = [(0, 'camera_gray_left', '/kitti/camera_gray_left'),
               (1, 'camera_gray_right', '/kitti/camera_gray_right'),
               (2, 'camera_color_left', '/kitti/camera_color_left'),
               (3, 'camera_color_right', '/kitti/camera_color_right')]

    if args.kitti_type.find("raw") != -1:

        if args.date == None:
            print("Date option is not given. It is mandatory for raw dataset.")
            print(
                "Usage for raw dataset: kitti2bag raw_synced [dir] -t <date> -r <drive>"
            )
            sys.exit(1)
        elif args.drive == None:
            print(
                "Drive option is not given. It is mandatory for raw dataset.")
            print(
                "Usage for raw dataset: kitti2bag raw_synced [dir] -t <date> -r <drive>"
            )
            sys.exit(1)

        bag = rosbag.Bag("kitti_{}_drive_{}_{}.bag".format(
            args.date, args.drive, args.kitti_type[4:]),
                         'w',
                         compression=compression)
        kitti = pykitti.raw(args.dir, args.date, args.drive)
        if not os.path.exists(kitti.data_path):
            print('Path {} does not exists. Exiting.'.format(kitti.data_path))
            sys.exit(1)

        if len(kitti.timestamps) == 0:
            print('Dataset is empty? Exiting.')
            sys.exit(1)

        try:
            # IMU
            imu_frame_id = 'imu_link'
            imu_topic = '/kitti/oxts/imu'
            gps_fix_topic = '/kitti/oxts/gps/fix'
            gps_vel_topic = '/kitti/oxts/gps/vel'
            velo_frame_id = 'velo_link'
            velo_topic = '/kitti/velo'

            T_base_link_to_imu = np.eye(4, 4)
            T_base_link_to_imu[0:3, 3] = [-2.71 / 2.0 - 0.05, 0.32, 0.93]

            # tf_static
            transforms = [
                ('base_link', imu_frame_id, T_base_link_to_imu),
                (imu_frame_id, velo_frame_id, inv(kitti.calib.T_velo_imu)),
                (imu_frame_id, cameras[0][1], inv(kitti.calib.T_cam0_imu)),
                (imu_frame_id, cameras[1][1], inv(kitti.calib.T_cam1_imu)),
                (imu_frame_id, cameras[2][1], inv(kitti.calib.T_cam2_imu)),
                (imu_frame_id, cameras[3][1], inv(kitti.calib.T_cam3_imu))
            ]

            util = pykitti.utils.read_calib_file(
                os.path.join(kitti.calib_path, 'calib_cam_to_cam.txt'))

            # Export
            save_static_transforms(bag, transforms, kitti.timestamps)
            save_dynamic_tf(bag, kitti, args.kitti_type, initial_time=None)
            save_imu_data(bag, kitti, imu_frame_id, imu_topic)
            save_gps_fix_data(bag, kitti, imu_frame_id, gps_fix_topic)
            save_gps_vel_data(bag, kitti, imu_frame_id, gps_vel_topic)
            for camera in cameras:
                save_camera_data(bag,
                                 args.kitti_type,
                                 kitti,
                                 util,
                                 bridge,
                                 camera=camera[0],
                                 camera_frame_id=camera[1],
                                 topic=camera[2],
                                 initial_time=None)
            save_velo_data(bag, kitti, velo_frame_id, velo_topic)

        finally:
            print("## OVERVIEW ##")
            print(bag)
            bag.close()

    elif args.kitti_type.find("odom") != -1:

        if args.sequence == None:
            print(
                "Sequence option is not given. It is mandatory for odometry dataset."
            )
            print(
                "Usage for odometry dataset: kitti2bag {odom_color, odom_gray} [dir] -s <sequence>"
            )
            sys.exit(1)

        bag = rosbag.Bag("kitti_data_odometry_{}_sequence_{}.bag".format(
            args.kitti_type[5:], args.sequence),
                         'w',
                         compression=compression)

        kitti = pykitti.odometry(args.dir, args.sequence)
        if not os.path.exists(kitti.sequence_path):
            print('Path {} does not exists. Exiting.'.format(
                kitti.sequence_path))
            sys.exit(1)

        kitti.load_calib()
        kitti.load_timestamps()

        if len(kitti.timestamps) == 0:
            print('Dataset is empty? Exiting.')
            sys.exit(1)

        if args.sequence in odometry_sequences[:11]:
            print(
                "Odometry dataset sequence {} has ground truth information (poses)."
                .format(args.sequence))
            kitti.load_poses()

        try:
            util = pykitti.utils.read_calib_file(
                os.path.join(args.dir, 'sequences', args.sequence,
                             'calib.txt'))
            current_epoch = (datetime.utcnow() -
                             datetime(1970, 1, 1)).total_seconds()
            # Export
            if args.kitti_type.find("gray") != -1:
                used_cameras = cameras[:2]
            elif args.kitti_type.find("color") != -1:
                used_cameras = cameras[-2:]

            save_dynamic_tf(bag,
                            kitti,
                            args.kitti_type,
                            initial_time=current_epoch)
            for camera in used_cameras:
                save_camera_data(bag,
                                 args.kitti_type,
                                 kitti,
                                 util,
                                 bridge,
                                 camera=camera[0],
                                 camera_frame_id=camera[1],
                                 topic=camera[2],
                                 initial_time=current_epoch)

        finally:
            print("## OVERVIEW ##")
            print(bag)
            bag.close()
Ejemplo n.º 31
0
from mpl_toolkits.mplot3d import Axes3D

import pykitti

__author__ = "Lee Clement"
__email__ = "*****@*****.**"

# Change this to the directory where you store KITTI data
basedir = '/Users/leeclement/Desktop/KITTI/odometry/dataset'

# Specify the dataset to load
sequence = '04'

# Load the data. Optionally, specify the frame range to load.
# dataset = pykitti.odometry(basedir, sequence)
dataset = pykitti.odometry(basedir, sequence, frames=range(0, 20, 5))

# dataset.calib:      Calibration data are accessible as a named tuple
# dataset.timestamps: Timestamps are parsed into a list of timedelta objects
# dataset.poses:      List of ground truth poses T_w_cam0
# dataset.camN:       Generator to load individual images from camera N
# dataset.gray:       Generator to load monochrome stereo pairs (cam0, cam1)
# dataset.rgb:        Generator to load RGB stereo pairs (cam2, cam3)
# dataset.velo:       Generator to load velodyne scans as [x,y,z,reflectance]

# Grab some data
second_pose = dataset.poses[1]
first_gray = next(iter(dataset.gray))
first_cam1 = next(iter(dataset.cam1))
first_rgb = dataset.get_rgb(0)
first_cam2 = dataset.get_cam2(0)