Ejemplo n.º 1
0
def create_tf_record_train_as_val(fn_out, split, vis_results):
    label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map)
    writer = tf.python_io.TFRecordWriter(fn_out)
    params = read_params(FLAGS.param)
    logging.debug('Params: ' + str(params))
    nusc = NuScenes(version='v1.0-trainval', dataroot=FLAGS.nuscenes, verbose=True)
    sensor = 'LIDAR_TOP'
    nu_to_kitti_lidar = Quaternion(axis=(0, 0, 1), angle=np.pi / 2).inverse
    split_logs = create_splits_logs(split, nusc)
    sample_tokens = split_to_samples(nusc, split_logs)
    random.shuffle(sample_tokens)
    print('Number of samples:', len(sample_tokens))

    for sample_token in sample_tokens[1:100]:
        sample = nusc.get('sample', sample_token)
        lidar_top_data = nusc.get('sample_data', sample['data'][sensor])
        if not lidar_top_data['prev']:
            continue
        lidar_top_data_prev = nusc.get('sample_data', lidar_top_data['prev'])
        labels_corners, labels_center, labels_data = compute_labels_image(nusc, sample, sensor,
                                                                          nu_to_kitti_lidar, params)
        filename = os.path.splitext(os.path.splitext(lidar_top_data['filename'])[0])[0]
        filename_prev = os.path.splitext(os.path.splitext(lidar_top_data_prev['filename'])[0])[0]
        tf_example = dict_to_tf_example(labels_corners, labels_center, labels_data, params, label_map_dict,
                                        FLAGS.data, FLAGS.data_beliefs, filename, filename_prev)
        writer.write(tf_example.SerializeToString())
        if (vis_results):
            visualize_results(FLAGS.data, filename, labels_corners, os.path.join(FLAGS.output, 'Debug'))
Ejemplo n.º 2
0
    def kitti_res_to_nuscenes(self, meta: Dict[str, bool] = None) -> None:
        """
        Converts a KITTI detection result to the nuScenes detection results format.
        :param meta: Meta data describing the method used to generate the result. See nuscenes.org/object-detection.
        """
        # Dummy meta data, please adjust accordingly.
        if meta is None:
            meta = {
                'use_camera': False,
                'use_lidar': True,
                'use_radar': False,
                'use_map': False,
                'use_external': False,
            }

        # Init.
        results = {}

        # Load the KITTI dataset.
        kitti = KittiDB(root=self.nusc_kitti_dir, splits=(self.split, ))

        # Get assignment of scenes to splits.
        split_logs = create_splits_logs(self.split, self.nusc)

        # Use only the samples from the current split.
        sample_tokens = self._split_to_samples(split_logs)
        sample_tokens = sample_tokens[:self.image_count]

        for sample_token in sample_tokens:
            # Get the KITTI boxes we just generated in LIDAR frame.
            kitti_token = '%s_%s' % (self.split, sample_token)
            boxes = kitti.get_boxes(token=kitti_token)

            # Convert KITTI boxes to nuScenes detection challenge result format.
            sample_results = [self._box_to_sample_result(sample_token, box) for box in boxes]

            # Store all results for this image.
            results[sample_token] = sample_results

        # Store submission file to disk.
        submission = {
            'meta': meta,
            'results': results
        }
        submission_path = os.path.join(self.nusc_kitti_dir, 'submission.json')
        print('Writing submission to: %s' % submission_path)
        with open(submission_path, 'w') as f:
            json.dump(submission, f, indent=2)
Ejemplo n.º 3
0
    def __init__(self,
                 cfg,
                 cam_name: str = 'CAM_FRONT',
                 lidar_name: str = 'LIDAR_TOP',
                 nusc_version: str = 'v1.0-trainval',
                 is_train=True,
                 transforms=None):
        super(nuScenesDataset, self).__init__()

        self.split = cfg.DATASETS.TRAIN_SPLIT if is_train else cfg.DATASETS.TEST_SPLIT

        if self.split == "train_detect":
            self.image_count = 14059
        elif self.split == "val":
            self.image_count = 6019

        self.cam_name = cam_name
        self.lidar_name = lidar_name
        self.nusc_version = nusc_version
        # Select subset of the data to look at.
        self.nusc = NuScenes(version=nusc_version)

        self.is_train = is_train
        self.transforms = transforms
        self.classes = cfg.DATASETS.DETECT_CLASSES
        self.flip_prob = cfg.INPUT.FLIP_PROB_TRAIN if is_train else 0
        self.aug_prob = cfg.INPUT.SHIFT_SCALE_PROB_TRAIN if is_train else 0
        self.shift_scale = cfg.INPUT.SHIFT_SCALE_TRAIN
        self.num_classes = len(self.classes)

        self.input_width = cfg.INPUT.WIDTH_TRAIN
        self.input_height = cfg.INPUT.HEIGHT_TRAIN
        self.output_width = self.input_width // cfg.MODEL.BACKBONE.DOWN_RATIO
        self.output_height = self.input_height // cfg.MODEL.BACKBONE.DOWN_RATIO
        self.max_objs = cfg.DATASETS.MAX_OBJECTS

        self.logger = logging.getLogger(__name__)
        self.logger.info(
            "Initializing nuScenes {} set with {} files loaded".format(
                self.split, self.image_count))

        # Get assignment of scenes to splits.
        self.split_logs = create_splits_logs(self.split, self.nusc)
        self.sample_tokens = self._split_to_samples(self.split_logs)
    def nuscenes_gt_to_kitti(self) -> None:
        """
        Converts nuScenes GT annotations to KITTI format.
        """
        kitti_to_nu_lidar = Quaternion(axis=(0, 0, 1), angle=np.pi / 2)
        kitti_to_nu_lidar_inv = kitti_to_nu_lidar.inverse
        imsize = (1600, 900)

        token_idx = 0  # Start tokens from 0.

        # Get assignment of scenes to splits.
        split_logs = create_splits_logs(self.split, self.nusc)

        # Create output folders.
        label_folder = os.path.join(self.nusc_kitti_dir, self.split, 'label_2')
        calib_folder = os.path.join(self.nusc_kitti_dir, self.split, 'calib')
        image_folder = os.path.join(self.nusc_kitti_dir, self.split, 'image_2')
        lidar_folder = os.path.join(self.nusc_kitti_dir, self.split,
                                    'velodyne')
        for folder in [label_folder, calib_folder, image_folder, lidar_folder]:
            if not os.path.isdir(folder):
                os.makedirs(folder)

        # Use only the samples from the current split.
        sample_tokens = self._split_to_samples(split_logs)
        sample_tokens = sample_tokens[:self.image_count]

        tokens = []
        for sample_token in sample_tokens:

            # Get sample data.
            sample = self.nusc.get('sample', sample_token)
            sample_annotation_tokens = sample['anns']
            cam_front_token = sample['data'][self.cam_name]
            lidar_token = sample['data'][self.lidar_name]

            # Retrieve sensor records.
            sd_record_cam = self.nusc.get('sample_data', cam_front_token)
            sd_record_lid = self.nusc.get('sample_data', lidar_token)
            cs_record_cam = self.nusc.get(
                'calibrated_sensor', sd_record_cam['calibrated_sensor_token'])
            cs_record_lid = self.nusc.get(
                'calibrated_sensor', sd_record_lid['calibrated_sensor_token'])

            # Combine transformations and convert to KITTI format.
            # Note: cam uses same conventions in KITTI and nuScenes.
            lid_to_ego = transform_matrix(cs_record_lid['translation'],
                                          Quaternion(
                                              cs_record_lid['rotation']),
                                          inverse=False)
            ego_to_cam = transform_matrix(cs_record_cam['translation'],
                                          Quaternion(
                                              cs_record_cam['rotation']),
                                          inverse=True)
            velo_to_cam = np.dot(ego_to_cam, lid_to_ego)

            # Convert from KITTI to nuScenes LIDAR coordinates, where we apply velo_to_cam.
            velo_to_cam_kitti = np.dot(velo_to_cam,
                                       kitti_to_nu_lidar.transformation_matrix)

            # Currently not used.
            imu_to_velo_kitti = np.zeros((3, 4))  # Dummy values.
            r0_rect = Quaternion(axis=[1, 0, 0], angle=0)  # Dummy values.

            # Projection matrix.
            p_left_kitti = np.zeros((3, 4))
            p_left_kitti[:3, :3] = cs_record_cam[
                'camera_intrinsic']  # Cameras are always rectified.

            # Create KITTI style transforms.
            velo_to_cam_rot = velo_to_cam_kitti[:3, :3]
            velo_to_cam_trans = velo_to_cam_kitti[:3, 3]

            # Check that the rotation has the same format as in KITTI.
            assert (velo_to_cam_rot.round(0) == np.array([[0, -1,
                                                           0], [0, 0, -1],
                                                          [1, 0, 0]])).all()
            assert (velo_to_cam_trans[1:3] < 0).all()

            # Retrieve the token from the lidar.
            # Note that this may be confusing as the filename of the camera will include the timestamp of the lidar,
            # not the camera.
            filename_cam_full = sd_record_cam['filename']
            filename_lid_full = sd_record_lid['filename']
            # token = '%06d' % token_idx # Alternative to use KITTI names.
            token_idx += 1

            # Convert image (jpg to png).
            src_im_path = os.path.join(self.nusc.dataroot, filename_cam_full)
            dst_im_path = os.path.join(image_folder, sample_token + '.png')
            if not os.path.exists(dst_im_path):
                im = Image.open(src_im_path)
                im.save(dst_im_path, "PNG")

            # Convert lidar.
            # Note that we are only using a single sweep, instead of the commonly used n sweeps.
            src_lid_path = os.path.join(self.nusc.dataroot, filename_lid_full)
            dst_lid_path = os.path.join(lidar_folder, sample_token + '.bin')
            assert not dst_lid_path.endswith('.pcd.bin')
            pcl = LidarPointCloud.from_file(src_lid_path)
            pcl.rotate(
                kitti_to_nu_lidar_inv.rotation_matrix)  # In KITTI lidar frame.
            with open(dst_lid_path, "w") as lid_file:
                pcl.points.T.tofile(lid_file)

            # Add to tokens.
            tokens.append(sample_token)

            # Create calibration file.
            kitti_transforms = dict()
            kitti_transforms['P0'] = np.zeros((3, 4))  # Dummy values.
            kitti_transforms['P1'] = np.zeros((3, 4))  # Dummy values.
            kitti_transforms['P2'] = p_left_kitti  # Left camera transform.
            kitti_transforms['P3'] = np.zeros((3, 4))  # Dummy values.
            kitti_transforms[
                'R0_rect'] = r0_rect.rotation_matrix  # Cameras are already rectified.
            kitti_transforms['Tr_velo_to_cam'] = np.hstack(
                (velo_to_cam_rot, velo_to_cam_trans.reshape(3, 1)))
            kitti_transforms['Tr_imu_to_velo'] = imu_to_velo_kitti
            calib_path = os.path.join(calib_folder, sample_token + '.txt')
            with open(calib_path, "w") as calib_file:
                for (key, val) in kitti_transforms.items():
                    val = val.flatten()
                    val_str = '%.12e' % val[0]
                    for v in val[1:]:
                        val_str += ' %.12e' % v
                    calib_file.write('%s: %s\n' % (key, val_str))

            # Write label file.
            label_path = os.path.join(label_folder, sample_token + '.txt')
            if os.path.exists(label_path):
                print('Skipping existing file: %s' % label_path)
                continue
            else:
                print('Writing file: %s' % label_path)
            with open(label_path, "w") as label_file:
                for sample_annotation_token in sample_annotation_tokens:
                    sample_annotation = self.nusc.get('sample_annotation',
                                                      sample_annotation_token)

                    # Get box in LIDAR frame.
                    _, box_lidar_nusc, _ = self.nusc.get_sample_data(
                        lidar_token,
                        box_vis_level=BoxVisibility.NONE,
                        selected_anntokens=[sample_annotation_token])
                    box_lidar_nusc = box_lidar_nusc[0]

                    # Truncated: Set all objects to 0 which means untruncated.
                    truncated = 0.0

                    # Occluded: Set all objects to full visibility as this information is not available in nuScenes.
                    occluded = 0

                    # Convert nuScenes category to nuScenes detection challenge category.
                    detection_name = category_to_detection_name(
                        sample_annotation['category_name'])

                    # Skip categories that are not part of the nuScenes detection challenge.
                    if detection_name is None:
                        continue

                    # Convert from nuScenes to KITTI box format.
                    box_cam_kitti = KittiDB.box_nuscenes_to_kitti(
                        box_lidar_nusc, Quaternion(matrix=velo_to_cam_rot),
                        velo_to_cam_trans, r0_rect)

                    # Project 3d box to 2d box in image, ignore box if it does not fall inside.
                    bbox_2d = KittiDB.project_kitti_box_to_image(box_cam_kitti,
                                                                 p_left_kitti,
                                                                 imsize=imsize)
                    if bbox_2d is None:
                        continue

                    # Set dummy score so we can use this file as result.
                    box_cam_kitti.score = 0

                    # Convert box to output string format.
                    output = KittiDB.box_to_string(name=detection_name,
                                                   box=box_cam_kitti,
                                                   bbox_2d=bbox_2d,
                                                   truncation=truncated,
                                                   occlusion=occluded)

                    # Write to disk.
                    label_file.write(output + '\n')
Ejemplo n.º 5
0
def create_tf_record(fn_out, split, vis_results):
    label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map)
    writer = tf.python_io.TFRecordWriter(fn_out)
    params = read_params(FLAGS.param)
    logging.debug('Params: ' + str(params))
    nusc = NuScenes(version='v1.0-trainval',
                    dataroot=FLAGS.nuscenes,
                    verbose=True)
    sensor = 'LIDAR_TOP'
    nu_to_kitti_lidar = Quaternion(axis=(0, 0, 1), angle=np.pi / 2).inverse
    #
    # selected_train_set_1 = [
    #     'scene-0030', 'scene-0349', 'scene-0001', 'scene-0741', 'scene-0350', 'scene-0002', 'scene-0744', 'scene-0351',
    #     'scene-0746', 'scene-0004', 'scene-0352', 'scene-0005', 'scene-0747', 'scene-0353', 'scene-0354', 'scene-0006',
    #     'scene-0355', 'scene-0749', 'scene-0007', 'scene-0008', 'scene-0750', 'scene-0356', 'scene-0009', 'scene-0010',
    #     'scene-0751', 'scene-0357', 'scene-0011', 'scene-0752', 'scene-0019', 'scene-0358', 'scene-0757', 'scene-0020',
    #     'scene-0758', 'scene-0359', 'scene-0021', 'scene-0360', 'scene-0759', 'scene-0361', 'scene-0022', 'scene-0760',
    #     'scene-0023', 'scene-0362', 'scene-0024', 'scene-0761', 'scene-0363', 'scene-0762', 'scene-0364', 'scene-0763',
    #     'scene-0025', 'scene-0365', 'scene-0764', 'scene-0366', 'scene-0765', 'scene-0026', 'scene-0767', 'scene-0367',
    #     'scene-0027', 'scene-0768', 'scene-0368', 'scene-0769', 'scene-0028', 'scene-0786', 'scene-0369', 'scene-0029',
    #     'scene-0787', 'scene-0789', 'scene-0370', 'scene-0371', 'scene-0031', 'scene-0790', 'scene-0372', 'scene-0032',
    #     'scene-0791', 'scene-0373', 'scene-0792', 'scene-0033', 'scene-0803', 'scene-0034', 'scene-0374', 'scene-0804',
    #     'scene-0041', 'scene-0042', 'scene-0805', 'scene-0043', 'scene-0375', 'scene-0806', 'scene-0376', 'scene-0044',
    #     'scene-0377', 'scene-0808', 'scene-0045', 'scene-0378', 'scene-0809', 'scene-0379', 'scene-0046', 'scene-0810',
    #     'scene-0380', 'scene-0811', 'scene-0047', 'scene-0381', 'scene-0048', 'scene-0382', 'scene-0049', 'scene-0812',
    #     'scene-0050', 'scene-0383', 'scene-0813', 'scene-0051', 'scene-0384', 'scene-0815', 'scene-0052', 'scene-0816',
    #     'scene-0053', 'scene-0385', 'scene-0054', 'scene-0817', 'scene-0386', 'scene-0819', 'scene-0055', 'scene-0056',
    #     'scene-0388', 'scene-0820', 'scene-0057', 'scene-0821', 'scene-0058', 'scene-0389', 'scene-0822', 'scene-0059',
    #     'scene-0390', 'scene-0847', 'scene-0391', 'scene-0848', 'scene-0849', 'scene-0392', 'scene-0850', 'scene-0393',
    #     'scene-0060', 'scene-0851', 'scene-0852', 'scene-0394', 'scene-0853', 'scene-0395', 'scene-0061', 'scene-0396',
    #     'scene-0854', 'scene-0397', 'scene-0062', 'scene-0855', 'scene-0063', 'scene-0398', 'scene-0856', 'scene-0064',
    #     'scene-0858', 'scene-0065', 'scene-0399', 'scene-0860', 'scene-0066', 'scene-0861', 'scene-0400', 'scene-0067',
    #     'scene-0983', 'scene-1050', 'scene-0862', 'scene-1051', 'scene-0401', 'scene-0798', 'scene-0984', 'scene-0003',
    #     'scene-1052', 'scene-0068', 'scene-0402', 'scene-0988', 'scene-0799', 'scene-1053', 'scene-0863', 'scene-0403',
    #     'scene-0069', 'scene-0800', 'scene-1054', 'scene-0405', 'scene-0802', 'scene-0989', 'scene-0864', 'scene-0012',
    #     'scene-1055', 'scene-0406', 'scene-0904', 'scene-0070', 'scene-0013', 'scene-0865', 'scene-0407', 'scene-1056',
    #     'scene-0905', 'scene-0071', 'scene-0990', 'scene-0906', 'scene-0408', 'scene-1057', 'scene-0014', 'scene-0866',
    #     'scene-0991', 'scene-0907', 'scene-1058', 'scene-0072', 'scene-0868', 'scene-1074', 'scene-0015', 'scene-0410',
    #     'scene-1075', 'scene-0869', 'scene-0908', 'scene-1076', 'scene-0411', 'scene-0992', 'scene-0870', 'scene-0016',
    #     'scene-1077', 'scene-0909', 'scene-0073', 'scene-0412', 'scene-0345', 'scene-1078', 'scene-0994', 'scene-0910',
    #     'scene-1079', 'scene-0017', 'scene-0074', 'scene-0413', 'scene-0871', 'scene-0995', 'scene-0911', 'scene-1080',
    #     'scene-0346', 'scene-0912', 'scene-0996', 'scene-0413', 'scene-1081', 'scene-0018', 'scene-0913', 'scene-0914',
    #     'scene-0872', 'scene-0075', 'scene-1082', 'scene-0414', 'scene-0076', 'scene-0997', 'scene-0519', 'scene-0915',
    #     'scene-0873', 'scene-0916', 'scene-0520', 'scene-0120', 'scene-0998', 'scene-1083', 'scene-0415', 'scene-0035',
    #     'scene-0917', 'scene-0875', 'scene-0521', 'scene-0416', 'scene-1084', 'scene-0036', 'scene-0121', 'scene-0999',
    #     'scene-1085', 'scene-0122', 'scene-0919', 'scene-0522', 'scene-0127', 'scene-0038', 'scene-0417', 'scene-0876',
    #     'scene-1000', 'scene-1086', 'scene-0920', 'scene-0523', 'scene-0877', 'scene-0039', 'scene-0128', 'scene-1001',
    #     'scene-0418', 'scene-1087', 'scene-0878', 'scene-0123', 'scene-0524', 'scene-0129', 'scene-0092', 'scene-1002',
    #     'scene-1088', 'scene-0921', 'scene-0419', 'scene-0124', 'scene-0552', 'scene-0130', 'scene-1003', 'scene-0880',
    #     'scene-0093', 'scene-0125', 'scene-1089', 'scene-1004', 'scene-0420', 'scene-0882', 'scene-0922', 'scene-1005',
    #     'scene-0094', 'scene-0126', 'scene-0131', 'scene-1090', 'scene-0553', 'scene-0883', 'scene-1006', 'scene-1091',
    #     'scene-0421', 'scene-0095', 'scene-0923', 'scene-0132', 'scene-0127', 'scene-0924', 'scene-1092', 'scene-1007',
    #     'scene-1093', 'scene-0133', 'scene-0554', 'scene-0925', 'scene-0422', 'scene-0884', 'scene-0128', 'scene-1008',
    #     'scene-0134', 'scene-1009', 'scene-1094', 'scene-0423', 'scene-0096', 'scene-0885', 'scene-0926', 'scene-0129',
    #     'scene-1010', 'scene-1095', 'scene-0555', 'scene-0886', 'scene-0135', 'scene-1011', 'scene-1096', 'scene-0097',
    #     'scene-0130', 'scene-0424', 'scene-0138', 'scene-0927', 'scene-0887', 'scene-1012', 'scene-0556', 'scene-0139',
    #     'scene-1013', 'scene-1097', 'scene-0425', 'scene-0928', 'scene-0149', 'scene-0098', 'scene-1014', 'scene-0888',
    #     'scene-0131', 'scene-0099', 'scene-0187', 'scene-1015', 'scene-0557', 'scene-1098', 'scene-0929', 'scene-0426',
    #     'scene-0889', 'scene-1016', 'scene-0100', 'scene-0188', 'scene-0132', 'scene-1019', 'scene-0890', 'scene-1099',
    #     'scene-0427', 'scene-0930', 'scene-0558', 'scene-1024', 'scene-0133', 'scene-0931', 'scene-0891', 'scene-1100',
    #     'scene-0101', 'scene-1025', 'scene-0231', 'scene-0428', 'scene-0892', 'scene-0559', 'scene-1044', 'scene-0962',
    #     'scene-0102', 'scene-0893', 'scene-0134', 'scene-1101', 'scene-1045', 'scene-0560', 'scene-0429', 'scene-0894',
    #     'scene-1102', 'scene-0232', 'scene-0963', 'scene-1104', 'scene-0103', 'scene-0430', 'scene-0895', 'scene-0135',
    #     'scene-1046', 'scene-1105', 'scene-0431', 'scene-0561', 'scene-0966', 'scene-0896', 'scene-0138', 'scene-1106',
    #     'scene-0233', 'scene-0104', 'scene-1107', 'scene-1108', 'scene-1047', 'scene-0432', 'scene-0897', 'scene-0967',
    #     'scene-0105', 'scene-0139', 'scene-0562', 'scene-0433', 'scene-0898', 'scene-1048', 'scene-1109', 'scene-0106',
    #     'scene-0968', 'scene-0563', 'scene-0234', 'scene-0149', 'scene-0434', 'scene-1049', 'scene-0289', 'scene-0899',
    #     'scene-1110', 'scene-0107', 'scene-0290', 'scene-0435', 'scene-0900', 'scene-0150', 'scene-0291', 'scene-0235',
    #     'scene-0150', 'scene-0108', 'scene-0564', 'scene-0151', 'scene-0292', 'scene-0969', 'scene-0152', 'scene-0901',
    #     'scene-0293', 'scene-0151', 'scene-0436', 'scene-0236', 'scene-0902', 'scene-0154', 'scene-0294', 'scene-0109',
    #     'scene-0237', 'scene-0971', 'scene-0565', 'scene-0152', 'scene-0437', 'scene-0239', 'scene-0903', 'scene-0295',
    #     'scene-0438', 'scene-0154', 'scene-0110', 'scene-0972', 'scene-0625', 'scene-0238', 'scene-0439', 'scene-0240',
    #     'scene-0945', 'scene-0221', 'scene-0626', 'scene-0296', 'scene-0155', 'scene-0440', 'scene-1059', 'scene-0157',
    #     'scene-0653', 'scene-0947', 'scene-0241', 'scene-0441', 'scene-0627', 'scene-0297', 'scene-0268', 'scene-0949',
    #     'scene-0158', 'scene-0442', 'scene-0242', 'scene-0629', 'scene-0654', 'scene-1060', 'scene-0298', 'scene-0269',
    #     'scene-0443', 'scene-1061', 'scene-0299', 'scene-0630', 'scene-0655', 'scene-0952', 'scene-0270', 'scene-0250',
    #     'scene-1062', 'scene-0300', 'scene-0439', 'scene-0251', 'scene-0632', 'scene-0656', 'scene-0440', 'scene-0953',
    #     'scene-1063', 'scene-0444', 'scene-0703', 'scene-0159', 'scene-0633', 'scene-0479', 'scene-0271', 'scene-0445',
    #     'scene-0955', 'scene-0634', 'scene-0657', 'scene-0501', 'scene-0956', 'scene-0704', 'scene-0446', 'scene-1064',
    #     'scene-0635', 'scene-0272', 'scene-0447', 'scene-0957', 'scene-0705', 'scene-0636', 'scene-0958', 'scene-0273',
    #     'scene-0706', 'scene-0448', 'scene-0658', 'scene-0160', 'scene-1065', 'scene-0959', 'scene-0707', 'scene-0449',
    #     'scene-0274', 'scene-0637', 'scene-1066', 'scene-0708', 'scene-0960', 'scene-0638', 'scene-0275', 'scene-0450',
    #     'scene-1067', 'scene-0413', 'scene-0659', 'scene-0413', 'scene-0660', 'scene-0961', 'scene-0161', 'scene-0770',
    #     'scene-0451', 'scene-1068', 'scene-0661', 'scene-0975', 'scene-0452', 'scene-0276', 'scene-0771', 'scene-0976',
    #     'scene-0453', 'scene-0685', 'scene-1069', 'scene-1070', 'scene-0162', 'scene-0277', 'scene-0977', 'scene-0686',
    #     'scene-1071', 'scene-0775', 'scene-0278', 'scene-0978', 'scene-0454', 'scene-1072', 'scene-0979', 'scene-1073',
    #     'scene-0455', 'scene-0777', 'scene-0329', 'scene-0687', 'scene-0980', 'scene-0163', 'scene-0456', 'scene-0981',
    #     'scene-0688', 'scene-0164', 'scene-0457', 'scene-0778', 'scene-0330', 'scene-0982', 'scene-0165', 'scene-0689',
    #     'scene-0458', 'scene-0983', 'scene-0984', 'scene-0166', 'scene-0331', 'scene-0780', 'scene-0459', 'scene-0988',
    #     'scene-0167', 'scene-0695', 'scene-0989', 'scene-0461', 'scene-0696', 'scene-0168', 'scene-0781', 'scene-0697',
    #     'scene-0990', 'scene-0170', 'scene-0332', 'scene-0462', 'scene-0991', 'scene-0698', 'scene-0171', 'scene-0463',
    #     'scene-0700', 'scene-0782', 'scene-0992', 'scene-0344', 'scene-0994', 'scene-0172', 'scene-0783', 'scene-0701',
    #     'scene-0995', 'scene-0784', 'scene-0996', 'scene-0730', 'scene-0464', 'scene-0173', 'scene-0997', 'scene-0731',
    #     'scene-0998', 'scene-0174', 'scene-0794', 'scene-0465', 'scene-0999', 'scene-0175', 'scene-0733', 'scene-1000',
    #     'scene-0467', 'scene-0795', 'scene-0796', 'scene-1001', 'scene-0797', 'scene-0176', 'scene-0734', 'scene-1002',
    #     'scene-0468', 'scene-0177', 'scene-0735', 'scene-0469', 'scene-0471', 'scene-0736', 'scene-0472', 'scene-0474',
    #     'scene-0475', 'scene-0737', 'scene-0178', 'scene-0476', 'scene-0738', 'scene-0477', 'scene-0179', 'scene-0739',
    #     'scene-0478', 'scene-0479', 'scene-0180', 'scene-0480', 'scene-0740', 'scene-0499', 'scene-0500', 'scene-0181',
    #     'scene-0501', 'scene-0502', 'scene-0182', 'scene-0183', 'scene-0504', 'scene-0505', 'scene-0184', 'scene-0506',
    #     'scene-0507', 'scene-0185', 'scene-0508', 'scene-0509', 'scene-0187', 'scene-0510', 'scene-0511', 'scene-0188',
    #     'scene-0512', 'scene-0513', 'scene-0190', 'scene-0514', 'scene-0191', 'scene-0515', 'scene-0517', 'scene-0192',
    #     'scene-0518', 'scene-0525', 'scene-0193', 'scene-0526', 'scene-0527', 'scene-0194', 'scene-0528', 'scene-0195',
    #     'scene-0529', 'scene-0196', 'scene-0530', 'scene-0199', 'scene-0531', 'scene-0200', 'scene-0532', 'scene-0202',
    #     'scene-0533', 'scene-0203', 'scene-0534', 'scene-0535', 'scene-0204', 'scene-0206', 'scene-0536', 'scene-0207',
    #     'scene-0537', 'scene-0208', 'scene-0538', 'scene-0209', 'scene-0539', 'scene-0210', 'scene-0211', 'scene-0541',
    #     'scene-0542', 'scene-0212', 'scene-0213', 'scene-0214', 'scene-0543', 'scene-0218', 'scene-0544', 'scene-0545',
    #     'scene-0219', 'scene-0546', 'scene-0220', 'scene-0222', 'scene-0566', 'scene-0224', 'scene-0568', 'scene-0225',
    #     'scene-0570', 'scene-0226', 'scene-0571', 'scene-0572', 'scene-0227', 'scene-0573', 'scene-0228', 'scene-0574',
    #     'scene-0229', 'scene-0575', 'scene-0230', 'scene-0576', 'scene-0231', 'scene-0232', 'scene-0577', 'scene-0233',
    #     'scene-0578', 'scene-0234', 'scene-0235', 'scene-0580', 'scene-0236', 'scene-0582', 'scene-0237', 'scene-0583',
    #     'scene-0238', 'scene-0584', 'scene-0239', 'scene-0585', 'scene-0240', 'scene-0586', 'scene-0241', 'scene-0587',
    #     'scene-0242', 'scene-0588', 'scene-0243', 'scene-0589', 'scene-0244', 'scene-0590', 'scene-0245', 'scene-0246',
    #     'scene-0591', 'scene-0247', 'scene-0248', 'scene-0592', 'scene-0593', 'scene-0249', 'scene-0250', 'scene-0594',
    #     'scene-0595', 'scene-0251', 'scene-0596', 'scene-0252', 'scene-0597', 'scene-0598', 'scene-0599', 'scene-0600',
    #     'scene-0253', 'scene-0639', 'scene-0640', 'scene-0254', 'scene-0641', 'scene-0642', 'scene-0255', 'scene-0643',
    #     'scene-0644', 'scene-0645', 'scene-0256', 'scene-0646', 'scene-0647', 'scene-0648', 'scene-0649', 'scene-0257',
    #     'scene-0650', 'scene-0651', 'scene-0258', 'scene-0652', 'scene-0259', 'scene-0653', 'scene-0260', 'scene-0654',
    #     'scene-0655', 'scene-0261', 'scene-0656', 'scene-0262', 'scene-0657', 'scene-0658', 'scene-0263', 'scene-0264'
    # ]
    split_logs = create_splits_logs(split, nusc)
    # split_logs = selected_train_set_1
    sample_tokens = split_to_samples(nusc, split_logs)
    random.shuffle(sample_tokens)
    print('Number of samples:', len(sample_tokens))

    for sample_token in sample_tokens:
        sample = nusc.get('sample', sample_token)
        lidar_top_data = nusc.get('sample_data', sample['data'][sensor])
        if not lidar_top_data['prev']:
            continue
        lidar_top_data_prev = nusc.get('sample_data', lidar_top_data['prev'])
        labels_corners, labels_center, labels_data = compute_labels_image(
            nusc, sample, sensor, nu_to_kitti_lidar, params)
        filename = os.path.splitext(
            os.path.splitext(lidar_top_data['filename'])[0])[0]
        filename_prev = os.path.splitext(
            os.path.splitext(lidar_top_data_prev['filename'])[0])[0]
        tf_example = dict_to_tf_example(labels_corners, labels_center,
                                        labels_data, params, label_map_dict,
                                        FLAGS.data, FLAGS.data_beliefs,
                                        filename, filename_prev)
        writer.write(tf_example.SerializeToString())
        if (vis_results):
            visualize_results(FLAGS.data, filename, labels_corners,
                              os.path.join(FLAGS.output, 'Debug'))
Ejemplo n.º 6
0
    def nuscenes_gt_to_kitti(self) -> None:
        """
        Converts nuScenes GT annotations to KITTI format.
        """
        kitti_to_nu_lidar = Quaternion(axis=(0, 0, 1), angle=np.pi / 2)
        kitti_to_nu_lidar_inv = kitti_to_nu_lidar.inverse
        imsize = (1600, 900)

        token_idx = 0  # Start tokens from 0.

        # Get assignment of scenes to splits.
        split_logs = create_splits_logs(self.split, self.nusc)

        scene_splits = create_splits_scenes(verbose=False)
        scene_to_log = {
            scene['name']: self.nusc.get('log', scene['log_token'])['logfile']
            for scene in self.nusc.scene
        }
        logs = set()
        scenes = scene_splits[self.split]
        for scene in scenes:
            logs.add(scene_to_log[scene])
        # print(len(scenes), len(logs))

        split_mapping = {"train": "training", "val": "testing"}

        # Create output folders.
        label_folder = os.path.join(self.nusc_kitti_dir,
                                    split_mapping[self.split], 'label_2')
        calib_folder = os.path.join(self.nusc_kitti_dir,
                                    split_mapping[self.split], 'calib')
        image_folder = os.path.join(self.nusc_kitti_dir,
                                    split_mapping[self.split], 'image_2')
        lidar_folder = os.path.join(self.nusc_kitti_dir,
                                    split_mapping[self.split], 'velodyne')
        for folder in [label_folder, calib_folder, image_folder, lidar_folder]:
            if not os.path.isdir(folder):
                os.makedirs(folder)

        # Use only the samples from the current split.
        sample_tokens = self._split_to_samples(split_logs)
        # sample_tokens = sample_tokens[:self.image_count]

        # print(len(sample_tokens))
        tokens = []
        if self.split == "train":
            split_file = [
                os.path.join(self.nusc_kitti_dir, "train.txt"),
                os.path.join(self.nusc_kitti_dir, "val.txt")
            ]
        elif self.split == 'val':
            split_file = os.path.join(self.nusc_kitti_dir, "test.txt")
        # if os.path.isfile(split_file):
        #     os.remove(split_file)
        if self.split == "train":
            cnt = 0
            with open(split_file[0], "w") as f:
                for seq in list(self.sequence_mapping.keys())[:-150]:
                    for tk in self.sequence_mapping[seq]:
                        f.write("%06d" % tk + "\n")
                        cnt += 1
            # print(cnt)

            cnt = 0
            with open(split_file[1], "w") as f:
                for seq in list(self.sequence_mapping.keys())[-150:]:
                    for tk in self.sequence_mapping[seq]:
                        f.write("%06d" % tk + "\n")
                        cnt += 1
            # print(cnt)
        elif self.split == "val":
            with open(split_file, "w") as f:
                for seq in self.sequence_mapping.keys():
                    for tk in self.sequence_mapping[seq]:
                        f.write("%06d" % tk + "\n")

        for idx, sample_token in enumerate(sample_tokens):

            # Get sample data.
            sample = self.nusc.get('sample', sample_token)
            sample_annotation_tokens = sample['anns']
            cam_front_token = sample['data'][self.cam_name]
            lidar_token = sample['data'][self.lidar_name]
            sample_name = "%06d" % idx

            # Retrieve sensor records.
            sd_record_cam = self.nusc.get('sample_data', cam_front_token)
            sd_record_lid = self.nusc.get('sample_data', lidar_token)
            cs_record_cam = self.nusc.get(
                'calibrated_sensor', sd_record_cam['calibrated_sensor_token'])
            cs_record_lid = self.nusc.get(
                'calibrated_sensor', sd_record_lid['calibrated_sensor_token'])

            # Combine transformations and convert to KITTI format.
            # Note: cam uses same conventions in KITTI and nuScenes.
            lid_to_ego = transform_matrix(cs_record_lid['translation'],
                                          Quaternion(
                                              cs_record_lid['rotation']),
                                          inverse=False)
            ego_to_cam = transform_matrix(cs_record_cam['translation'],
                                          Quaternion(
                                              cs_record_cam['rotation']),
                                          inverse=True)
            velo_to_cam = np.dot(ego_to_cam, lid_to_ego)

            # Convert from KITTI to nuScenes LIDAR coordinates, where we apply velo_to_cam.
            velo_to_cam_kitti = np.dot(velo_to_cam,
                                       kitti_to_nu_lidar.transformation_matrix)

            # Currently not used.
            imu_to_velo_kitti = np.zeros((3, 4))  # Dummy values.
            r0_rect = Quaternion(axis=[1, 0, 0], angle=0)  # Dummy values.

            # Projection matrix.
            p_left_kitti = np.zeros((3, 4))
            p_left_kitti[:3, :3] = cs_record_cam[
                'camera_intrinsic']  # Cameras are always rectified.

            # Create KITTI style transforms.
            velo_to_cam_rot = velo_to_cam_kitti[:3, :3]
            velo_to_cam_trans = velo_to_cam_kitti[:3, 3]

            # Check that the rotation has the same format as in KITTI.
            assert (velo_to_cam_rot.round(0) == np.array([[0, -1,
                                                           0], [0, 0, -1],
                                                          [1, 0, 0]])).all()
            assert (velo_to_cam_trans[1:3] < 0).all()

            # Retrieve the token from the lidar.
            # Note that this may be confusing as the filename of the camera will include the timestamp of the lidar,
            # not the camera.
            filename_cam_full = sd_record_cam['filename']
            filename_lid_full = sd_record_lid['filename']
            # token = '%06d' % token_idx # Alternative to use KITTI names.
            token_idx += 1

            # Convert image (jpg to png).
            src_im_path = os.path.join(self.nusc.dataroot, filename_cam_full)
            dst_im_path = os.path.join(image_folder, sample_name + '.png')
            if not os.path.exists(dst_im_path):
                im = Image.open(src_im_path)
                im.save(dst_im_path, "PNG")

            # Convert lidar.
            # Note that we are only using a single sweep, instead of the commonly used n sweeps.
            src_lid_path = os.path.join(self.nusc.dataroot, filename_lid_full)
            dst_lid_path = os.path.join(lidar_folder, sample_name + '.bin')
            assert not dst_lid_path.endswith('.pcd.bin')
            pcl = LidarPointCloud.from_file(src_lid_path)
            # pcl, _ = LidarPointCloud.from_file_multisweep_future(self.nusc, sample, self.lidar_name, self.lidar_name, nsweeps=5)
            pcl.rotate(
                kitti_to_nu_lidar_inv.rotation_matrix)  # In KITTI lidar frame.
            with open(dst_lid_path, "w") as lid_file:
                pcl.points.T.tofile(lid_file)

            # Add to tokens.
            tokens.append(sample_token)

            # Create calibration file.
            kitti_transforms = dict()
            kitti_transforms['P0'] = np.zeros((3, 4))  # Dummy values.
            kitti_transforms['P1'] = np.zeros((3, 4))  # Dummy values.
            kitti_transforms['P2'] = p_left_kitti  # Left camera transform.
            kitti_transforms['P3'] = np.zeros((3, 4))  # Dummy values.
            kitti_transforms[
                'R0_rect'] = r0_rect.rotation_matrix  # Cameras are already rectified.
            kitti_transforms['Tr_velo_to_cam'] = np.hstack(
                (velo_to_cam_rot, velo_to_cam_trans.reshape(3, 1)))
            kitti_transforms['Tr_imu_to_velo'] = imu_to_velo_kitti
            calib_path = os.path.join(calib_folder, sample_name + '.txt')
            with open(calib_path, "w") as calib_file:
                for (key, val) in kitti_transforms.items():
                    val = val.flatten()
                    val_str = '%.12e' % val[0]
                    for v in val[1:]:
                        val_str += ' %.12e' % v
                    calib_file.write('%s: %s\n' % (key, val_str))

            # Write label file.
            label_path = os.path.join(label_folder, sample_name + '.txt')
            if os.path.exists(label_path):
                # print('Skipping existing file: %s' % label_path)
                continue
            # else:
            #     print('Writing file: %s' % label_path)

            objects = []
            for sample_annotation_token in sample_annotation_tokens:
                sample_annotation = self.nusc.get('sample_annotation',
                                                  sample_annotation_token)

                # Get box in LIDAR frame.
                _, box_lidar_nusc, _ = self.nusc.get_sample_data(
                    lidar_token,
                    box_vis_level=BoxVisibility.NONE,
                    selected_anntokens=[sample_annotation_token])
                box_lidar_nusc = box_lidar_nusc[0]

                # Truncated: Set all objects to 0 which means untruncated.
                truncated = 0.0

                # Occluded: Set all objects to full visibility as this information is not available in nuScenes.
                occluded = 0

                obj = dict()

                # Convert nuScenes category to nuScenes detection challenge category.
                obj["detection_name"] = category_to_detection_name(
                    sample_annotation['category_name'])

                # Skip categories that are not part of the nuScenes detection challenge.
                if obj["detection_name"] is None or obj[
                        "detection_name"] not in CLASS_MAP.keys():
                    continue

                obj["detection_name"] = CLASS_MAP[obj["detection_name"]]

                # Convert from nuScenes to KITTI box format.
                obj["box_cam_kitti"] = KittiDB.box_nuscenes_to_kitti(
                    box_lidar_nusc, Quaternion(matrix=velo_to_cam_rot),
                    velo_to_cam_trans, r0_rect)

                # Project 3d box to 2d box in image, ignore box if it does not fall inside.
                bbox_2d = project_to_2d(obj["box_cam_kitti"], p_left_kitti,
                                        imsize[1], imsize[0])
                if bbox_2d is None:
                    continue
                obj["bbox_2d"] = bbox_2d["bbox"]
                obj["truncated"] = bbox_2d["truncated"]

                # Set dummy score so we can use this file as result.
                obj["box_cam_kitti"].score = 0

                v = np.dot(obj["box_cam_kitti"].rotation_matrix,
                           np.array([1, 0, 0]))
                rot_y = -np.arctan2(v[2], v[0])
                obj["alpha"] = -np.arctan2(
                    obj["box_cam_kitti"].center[0],
                    obj["box_cam_kitti"].center[2]) + rot_y
                obj["depth"] = np.linalg.norm(
                    np.array(obj["box_cam_kitti"].center[:3]))
                objects.append(obj)

            objects = postprocessing(objects, imsize[1], imsize[0])

            with open(label_path, "w") as label_file:
                for obj in objects:
                    # Convert box to output string format.
                    output = box_to_string(name=obj["detection_name"],
                                           box=obj["box_cam_kitti"],
                                           bbox_2d=obj["bbox_2d"],
                                           truncation=obj["truncated"],
                                           occlusion=obj["occluded"],
                                           alpha=obj["alpha"])
                    label_file.write(output + '\n')
Ejemplo n.º 7
0
    def nuscenes_gt_to_kitti(self) -> None:
        """
        Converts nuScenes GT annotations to KITTI format.
        """
        kitti_to_nu_lidar = Quaternion(axis=(0, 0, 1), angle=np.pi / 2)
        kitti_to_nu_lidar_inv = kitti_to_nu_lidar.inverse
        imsize = (1600, 900)

        token_idx = 0  # Start tokens from 0.

        # Create output folders.
        label_folder = os.path.join(self.output_dir, self.split, 'label_2')
        calib_folder = os.path.join(self.output_dir, self.split, 'calib')
        image_folder = os.path.join(self.output_dir, self.split, 'image_2')
        lidar_folder = os.path.join(self.output_dir, self.split, 'velodyne')
        radar_folder = os.path.join(self.output_dir, self.split, 'radar')
        for folder in [
                label_folder, calib_folder, image_folder, lidar_folder,
                radar_folder
        ]:
            if not os.path.isdir(folder):
                os.makedirs(folder)
        id_to_token_file = os.path.join(self.output_dir, self.split,
                                        'id2token.txt')
        id2token = open(id_to_token_file, "w+")

        # Use only the samples from the current split.
        split_logs = create_splits_logs(self.split, self.nusc)
        sample_tokens = self._split_to_samples(split_logs)
        sample_tokens = sample_tokens[:self.image_count]

        out_filenames = []
        for sample_token in tqdm(sample_tokens):
            # Get sample data.
            sample = self.nusc.get('sample', sample_token)
            sample_annotation_tokens = sample['anns']
            cam_token = sample['data'][self.cam_name]
            lidar_token = sample['data'][self.lidar_name]
            radar_tokens = []
            for radar_name in _C.RADARS.keys():
                radar_tokens.append(sample['data'][radar_name])

            # Retrieve sensor records.
            sd_record_cam = self.nusc.get('sample_data', cam_token)
            sd_record_lid = self.nusc.get('sample_data', lidar_token)
            cs_record_cam = self.nusc.get(
                'calibrated_sensor', sd_record_cam['calibrated_sensor_token'])
            cs_record_lid = self.nusc.get(
                'calibrated_sensor', sd_record_lid['calibrated_sensor_token'])
            sd_record_rad = []
            cs_record_rad = []
            for i, radar_token in enumerate(radar_tokens):
                sd_record_rad.append(self.nusc.get('sample_data', radar_token))
                cs_record_rad.append(
                    self.nusc.get('calibrated_sensor',
                                  sd_record_rad[i]['calibrated_sensor_token']))

            # Combine transformations and convert to KITTI format.
            # Note: cam uses same conventions in KITTI and nuScenes.
            lid_to_ego = transform_matrix(cs_record_lid['translation'],
                                          Quaternion(
                                              cs_record_lid['rotation']),
                                          inverse=False)
            ego_to_cam = transform_matrix(cs_record_cam['translation'],
                                          Quaternion(
                                              cs_record_cam['rotation']),
                                          inverse=True)
            rad_to_ego = []
            for cs_rec_rad in cs_record_rad:
                rad_to_ego.append(
                    transform_matrix(cs_rec_rad['translation'],
                                     Quaternion(cs_rec_rad['rotation']),
                                     inverse=False))

            velo_to_cam = np.dot(ego_to_cam, lid_to_ego)
            # # TODO: Assuming Radar point are going to be in ego coordinates
            # radar_to_cam = ego_to_cam

            # Convert from KITTI to nuScenes LIDAR coordinates, where we apply velo_to_cam.
            velo_to_cam_kitti = np.dot(velo_to_cam,
                                       kitti_to_nu_lidar.transformation_matrix)
            # # Nuscenes radars use same convention as KITTI lidar
            # radar_to_cam_kitti = radar_to_cam

            # Currently not used.
            imu_to_velo_kitti = np.zeros((3, 4))  # Dummy values.
            r0_rect = Quaternion(axis=[1, 0, 0], angle=0)  # Dummy values.

            # Projection matrix.
            p_left_kitti = np.zeros((3, 4))
            # Cameras are always rectified.
            p_left_kitti[:3, :3] = cs_record_cam['camera_intrinsic']

            # Create KITTI style transforms.
            velo_to_cam_rot = velo_to_cam_kitti[:3, :3]
            velo_to_cam_trans = velo_to_cam_kitti[:3, 3]
            # radar_to_cam_rot = radar_to_cam_kitti[:3, :3]
            # radar_to_cam_trans = radar_to_cam_kitti[:3, 3]

            # Check that the lidar rotation has the same format as in KITTI.
            assert (velo_to_cam_rot.round(0) == np.array([[0, -1,
                                                           0], [0, 0, -1],
                                                          [1, 0, 0]])).all()
            assert (velo_to_cam_trans[1:3] < 0).all()

            # Retrieve the token from the lidar.
            # Note that this may be confusing as the filename of the camera will
            # include the timestamp of the lidar, not the camera.
            filename_cam_full = sd_record_cam['filename']
            filename_lid_full = sd_record_lid['filename']
            filename_rad_full = []
            for sd_rec_rad in sd_record_rad:
                filename_rad_full.append(sd_rec_rad['filename'])
            out_filename = '%06d' % token_idx  # Alternative to use KITTI names.
            # out_filename = sample_token

            # Write token to disk.
            text = sample_token
            id2token.write(text + '\n')
            id2token.flush()
            token_idx += 1

            # Convert image (jpg to png).
            src_im_path = os.path.join(self.nusc.dataroot, filename_cam_full)
            dst_im_path = os.path.join(image_folder, out_filename + '.png')

            if self.use_symlinks:
                # Create symbolic links to nuscenes images
                os.symlink(os.path.abspath(src_im_path), dst_im_path)
            else:
                im = Image.open(src_im_path)
                im.save(dst_im_path, "PNG")

            # Convert lidar.
            src_lid_path = os.path.join(self.nusc.dataroot, filename_lid_full)
            dst_lid_path = os.path.join(lidar_folder, out_filename + '.bin')
            assert not dst_lid_path.endswith('.pcd.bin')
            # pcl = LidarPointCloud.from_file(src_lid_path)
            pcl, _ = LidarPointCloud.from_file_multisweep(
                nusc=self.nusc,
                sample_rec=sample,
                chan=self.lidar_name,
                ref_chan=self.lidar_name,
                nsweeps=self.lidar_sweeps,
                min_distance=1)

            pcl.rotate(
                kitti_to_nu_lidar_inv.rotation_matrix)  # In KITTI lidar frame.
            pcl.points = pcl.points.astype('float32')
            with open(dst_lid_path, "w") as lid_file:
                pcl.points.T.tofile(lid_file)

            # # Visualize pointclouds
            # _, ax = plt.subplots(1, 1, figsize=(9, 9))
            # points = view_points(pcl.points[:3, :], np.eye(4), normalize=False)
            # dists = np.sqrt(np.sum(pcl.points[:2, :] ** 2, axis=0))
            # colors = np.minimum(1, dists / 50)
            # ax.scatter(points[0, :], points[1, :], c=colors, s=0.2)
            # # plt.show(block=False)
            # plt.show()

            # Convert radar.
            src_rad_path = []
            for filename_radar in filename_rad_full:
                src_rad_path.append(
                    os.path.join(self.nusc.dataroot, filename_radar))
            dst_rad_path = os.path.join(radar_folder, out_filename + '.bin')
            assert not dst_rad_path.endswith('.pcd.bin')
            pcl = RadarPointCloud(np.zeros((18, 0)))
            ## Get Radar points in Lidar coordinate system
            for i, rad_path in enumerate(src_rad_path):
                pc, _ = RadarPointCloud.from_file_multisweep(
                    self.nusc,
                    sample_rec=sample,
                    chan=sd_record_rad[i]['channel'],
                    ref_chan=self.lidar_name,
                    nsweeps=self.radar_sweeps,
                    min_distance=0)

                # rot_matrix = Quaternion(cs_record_rad[i]['rotation']).rotation_matrix
                # pc.rotate(rot_matrix)
                # pc.translate(np.array(cs_record_rad[i]['translation']))
                pcl.points = np.hstack((pcl.points, pc.points))
            pcl.rotate(
                kitti_to_nu_lidar_inv.rotation_matrix)  # In KITTI lidar frame.
            ## Visualize pointclouds
            # _, ax = plt.subplots(1, 1, figsize=(9, 9))
            # points = view_points(pcl.points[:3, :], np.eye(4), normalize=False)
            # dists = np.sqrt(np.sum(pcl.points[:2, :] ** 2, axis=0))
            # colors = np.minimum(1, dists / 50)
            # ax.scatter(points[0, :], points[1, :], c=colors, s=0.2)
            # plt.show()

            pcl.points = pcl.points.astype('float32')
            with open(dst_rad_path, "w") as rad_file:
                pcl.points.T.tofile(rad_file)

            # Add to tokens.
            out_filenames.append(out_filename)

            # Create calibration file.
            kitti_transforms = dict()
            kitti_transforms['P0'] = np.zeros((3, 4))  # Dummy values.
            kitti_transforms['P1'] = np.zeros((3, 4))  # Dummy values.
            kitti_transforms['P2'] = p_left_kitti  # Left camera transform.
            kitti_transforms['P3'] = np.zeros((3, 4))  # Dummy values.
            kitti_transforms[
                'R0_rect'] = r0_rect.rotation_matrix  # Cameras are already rectified.
            kitti_transforms['Tr_velo_to_cam'] = np.hstack(
                (velo_to_cam_rot, velo_to_cam_trans.reshape(3, 1)))
            # kitti_transforms['Tr_radar_to_cam'] = np.hstack((radar_to_cam_rot, radar_to_cam_trans.reshape(3, 1)))
            kitti_transforms['Tr_imu_to_velo'] = imu_to_velo_kitti
            calib_path = os.path.join(calib_folder, out_filename + '.txt')
            with open(calib_path, "w") as calib_file:
                for (key, val) in kitti_transforms.items():
                    val = val.flatten()
                    val_str = '%.12e' % val[0]
                    for v in val[1:]:
                        val_str += ' %.12e' % v
                    calib_file.write('%s: %s\n' % (key, val_str))

            # Write label file.
            label_path = os.path.join(label_folder, out_filename + '.txt')
            if os.path.exists(label_path):
                print('Skipping existing file: %s' % label_path)
                continue
            with open(label_path, "w") as label_file:
                for sample_annotation_token in sample_annotation_tokens:
                    sample_annotation = self.nusc.get('sample_annotation',
                                                      sample_annotation_token)

                    # Get box in LIDAR frame.
                    _, box_lidar_nusc, _ = self.nusc.get_sample_data(
                        lidar_token,
                        box_vis_level=BoxVisibility.NONE,
                        selected_anntokens=[sample_annotation_token])
                    box_lidar_nusc = box_lidar_nusc[0]

                    # Truncated: Set all objects to 0 which means untruncated.
                    truncated = 0.0

                    # Occluded: Set all objects to full visibility as this information is not available in nuScenes.
                    occluded = 0

                    # Convert nuScenes category to nuScenes detection challenge category.
                    detection_name = _C.KITTI_CLASSES.get(
                        sample_annotation['category_name'])
                    # Skip categories that are not in the KITTI classes.
                    if detection_name is None:
                        continue

                    # Convert from nuScenes to KITTI box format.
                    box_cam_kitti = KittiDB.box_nuscenes_to_kitti(
                        box_lidar_nusc, Quaternion(matrix=velo_to_cam_rot),
                        velo_to_cam_trans, r0_rect)

                    # Project 3d box to 2d box in image, ignore box if it does not fall inside.
                    bbox_2d = KittiDB.project_kitti_box_to_image(box_cam_kitti,
                                                                 p_left_kitti,
                                                                 imsize=imsize)
                    if bbox_2d is None:
                        # continue
                        ## If box is not inside the image, 2D boxes are set to zero
                        bbox_2d = (0, 0, 0, 0)

                    # Set dummy score so we can use this file as result.
                    box_cam_kitti.score = 0

                    # Convert box to output string format.
                    output = KittiDB.box_to_string(name=detection_name,
                                                   box=box_cam_kitti,
                                                   bbox_2d=bbox_2d,
                                                   truncation=truncated,
                                                   occlusion=occluded)

                    # Write to disk.
                    label_file.write(output + '\n')
        id2token.close()