Ejemplo n.º 1
0
 def __init__(self, img_dir, lidar_dir, calib_dir):
     self.calib = utils.Calibration(calib_dir, from_video=True)
     self.img_dir = img_dir
     self.lidar_dir = lidar_dir
     self.img_filenames = sorted([os.path.join(img_dir, filename) \
         for filename in os.listdir(img_dir)])
     self.lidar_filenames = sorted([os.path.join(lidar_dir, filename) \
         for filename in os.listdir(lidar_dir)])
     print(len(self.img_filenames))
     print(len(self.lidar_filenames))
     #assert(len(self.img_filenames) == len(self.lidar_filenames))
     self.num_samples = len(self.img_filenames)
Ejemplo n.º 2
0
def main():
    gt_root_folder = '/dataset/kitti_format/waymo/training/'
    cam_idx = 0
    thresh = 5
    lidar_dtype = float

    gt_label_folder = os.path.join(gt_root_folder, 'label_{}/'.format(cam_idx))
    gt_lidar_folder = os.path.join(gt_root_folder, 'velodyne/')
    gt_calib_folder = os.path.join(gt_root_folder, 'calib/')
    new_label_folder = os.path.join(gt_root_folder,
                                    'label_filtered_{}/'.format(cam_idx))
    os.makedirs(new_label_folder, exist_ok=True)

    label_names = [
        f.name.split('.')[0] for f in os.scandir(gt_label_folder)
        if f.is_file() and f.name.endswith('.txt')
    ]
    for sample_name in tqdm.tqdm(label_names):
        calib_filename = os.path.join(gt_calib_folder, sample_name + '.txt')
        gt_filename = os.path.join(gt_label_folder, sample_name + '.txt')
        lidar_filename = os.path.join(gt_lidar_folder, sample_name + '.bin')

        lidar_scan = utils.load_velo_scan(lidar_filename, dtype=lidar_dtype)

        calib = utils.Calibration(calib_filename, cam_idx=cam_idx)
        objects_gt = utils.read_label(gt_filename)
        filtered_objects = filter_boxes_kitti(objects_gt,
                                              lidar_scan,
                                              calib,
                                              thresh=thresh)
        writepath = os.path.join(new_label_folder, sample_name + '.txt')

        # print(f"Before filtering: {len(objects_gt)}, after filtering {len(filtered_objects)}")
        with open(writepath, 'w') as f:
            for obj in filtered_objects:
                f.write(obj.get_string_ann() + '\n')
Ejemplo n.º 3
0
 def get_calibration(self, idx):
     assert (idx < self.num_samples)
     calib_filename = os.path.join(self.calib_dir, '%06d.txt' % (idx))
     return utils.Calibration(calib_filename)
import torch
import kitti.bev_utils as bev_utils
import kitti.config as cnf

root_dir = 'data/'
dataset = KittiDataset(root_dir=root_dir, split='list', set='sampledata')
data_loader = torch_data.DataLoader(dataset, batch_size=1, shuffle=False)

for batch_index, (image_file, lidar_file, label_file,
                  calib_file) in enumerate(data_loader):

    for i, img_file in enumerate(image_file):
        img = cv2.imread(image_file[i])
        objects = utils.read_label(label_file[i])
        lidar = np.fromfile(lidar_file[i], dtype=np.float32).reshape(-1, 4)
        calib = utils.Calibration(calib_file[i])

        lidar = bev_utils.removePoints(lidar, cnf.boundary)
        img_bev = bev_utils.makeBVFeature(lidar, cnf.DISCRETIZATION,
                                          cnf.boundary)
        img_bev = np.uint8(img_bev * 255)
        labels, noObjectLabels = bev_utils.read_labels_for_bevbox(objects)

        if not noObjectLabels:
            labels[:, 1:] = utils.camera_to_lidar_box(
                labels[:, 1:])  # convert rect cam to velo cord
        target = bev_utils.build_yolo_target(labels, cnf.boundary)

        pc = lidar[:, 0:3]
        # display point cloud, point cloud bev, and corresponting front camera 2d image all with object bbox.
        # note that in 2d image object bbox is projected from point cloud 3d labels.