示例#1
0
    def _get_all_radar_data(self, frame: dict, sample_rec: str, pose_rec,
                            nsweeps_radar: int) -> RadarPointCloud:
        """
        Concatenates all radar pointclouds from this sample into one pointcloud
        :param frame: the frame returned from the db for this sample
        :param sample_rec: the sample record dictionary from nuscenes
        :param pose_rec: ego pose record dictionary from nuscenes
        :param nsweeps_radar: number of sweeps to retrieve for each radar
        :return: RadarPointCloud with all points
        """

        all_radar_pcs = RadarPointCloud(np.zeros((18, 0)))

        for radar in _C.RADARS.keys():
            sample_data = self.nusc.get('sample_data', frame['sample'][radar])
            current_radar_pc = self._get_radar_data(sample_rec, sample_data,
                                                    nsweeps_radar)
            ## Vehicle to global
            if self.coordinates == 'global':
                current_radar_pc.rotate(
                    Quaternion(pose_rec['rotation']).rotation_matrix)
                current_radar_pc.translate(np.array(pose_rec['translation']))

            all_radar_pcs.points = np.hstack(
                (all_radar_pcs.points, current_radar_pc.points))

        return all_radar_pcs
示例#2
0
def get_pc_from_file_multisweep(nusc,
                                sample_rec,
                                chan: str,
                                ref_chan: str,
                                nsweeps: int = 3,
                                min_distance: float = 1.0,
                                invalid_states=None,
                                dynprop_states=None,
                                ambig_states=None):
    """
    Returns a point cloud of multiple aggregated sweeps.
    Original function can be found at: https://github.com/nutonomy/nuscenes-devkit/blob/ae022aba3b37f07202ea402f8278979097738369/python-sdk/nuscenes/utils/data_classes.py#L56.
    Function is modified to accept additional input parametes, just like the get_pc_from_file function. This enables the filtering by invalid_states, dynprop_states and ambig_states.

    Arguments:
        nusc: A NuScenes instance, <NuScenes>.
        sample_rec: The current sample, <dict>.
        chan: The radar/lidar channel from which we track back n sweeps to aggregate the point cloud, <str>.
        ref_chan: The reference channel of the current sample_rec that the point clouds are mapped to, <str>.
        nsweeps: Number of sweeps to aggregated, <int>.
        min_distance: Distance below which points are discarded, <float>.
        invalid_states: Radar states to be kept, <int List>.
        dynprop_states: Radar states to be kept, <int List>. Use [0, 2, 6] for moving objects only.
        ambig_states: Radar states to be kept, <int List>.
    """
    # Define
    if chan == 'LIDAR_TOP':
        points = np.zeros((get_number_of_lidar_pc_dimensions(), 0))
        all_pc = LidarPointCloud(points)
    else:
        points = np.zeros((get_number_of_radar_pc_dimensions(), 0))
        all_pc = RadarPointCloud(points)

    all_times = np.zeros((1, 0))

    # Get reference pose and timestamp.
    ref_sd_token = sample_rec['data'][ref_chan]
    ref_sd_rec = nusc.get('sample_data', ref_sd_token)
    ref_pose_rec = nusc.get('ego_pose', ref_sd_rec['ego_pose_token'])
    ref_cs_rec = nusc.get('calibrated_sensor',
                          ref_sd_rec['calibrated_sensor_token'])
    ref_time = 1e-6 * ref_sd_rec['timestamp']

    # Homogeneous transform from ego car frame to reference frame.
    ref_from_car = transform_matrix(ref_cs_rec['translation'],
                                    Quaternion(ref_cs_rec['rotation']),
                                    inverse=True)

    # Homogeneous transformation matrix from global to _current_ ego car frame.
    car_from_global = transform_matrix(ref_pose_rec['translation'],
                                       Quaternion(ref_pose_rec['rotation']),
                                       inverse=True)

    # Aggregate current and previous sweeps.
    sample_data_token = sample_rec['data'][chan]
    current_sd_rec = nusc.get('sample_data', sample_data_token)
    for _ in range(nsweeps):
        # Load up the pointcloud.
        if chan == 'LIDAR_TOP':
            current_pc = LidarPointCloud.from_file(file_name=os.path.join(
                nusc.dataroot, current_sd_rec['filename']))
        else:
            current_pc = RadarPointCloud.from_file(
                file_name=os.path.join(nusc.dataroot,
                                       current_sd_rec['filename']),
                invalid_states=invalid_states,
                dynprop_states=dynprop_states,
                ambig_states=ambig_states)

        # Get past pose.
        current_pose_rec = nusc.get('ego_pose',
                                    current_sd_rec['ego_pose_token'])
        global_from_car = transform_matrix(current_pose_rec['translation'],
                                           Quaternion(
                                               current_pose_rec['rotation']),
                                           inverse=False)

        # Homogeneous transformation matrix from sensor coordinate frame to ego car frame.
        current_cs_rec = nusc.get('calibrated_sensor',
                                  current_sd_rec['calibrated_sensor_token'])
        car_from_current = transform_matrix(current_cs_rec['translation'],
                                            Quaternion(
                                                current_cs_rec['rotation']),
                                            inverse=False)

        # Fuse four transformation matrices into one and perform transform.
        trans_matrix = reduce(
            np.dot,
            [ref_from_car, car_from_global, global_from_car, car_from_current])
        current_pc.transform(trans_matrix)

        # Remove close points and add timevector.
        current_pc.remove_close(min_distance)
        time_lag = ref_time - 1e-6 * current_sd_rec[
            'timestamp']  # Positive difference.
        times = time_lag * np.ones((1, current_pc.nbr_points()))
        all_times = np.hstack((all_times, times))

        # Merge with key pc.
        all_pc.points = np.hstack((all_pc.points, current_pc.points))

        # Abort if there are no previous sweeps.
        if current_sd_rec['prev'] == '':
            break
        else:
            current_sd_rec = nusc.get('sample_data', current_sd_rec['prev'])

    return all_pc, all_times
示例#3
0
    def load_image(self, image_index):
        """
        Returns the image plus from given image and radar samples.
        It takes the requested channels into account.

        :param sample_token: [str] the token pointing to a certain sample

        :returns: imageplus
        """
        # Initialize local variables
        radar_name = self.radar_sensors[0]
        camera_name = self.camera_sensors[0]

        # Gettign data from nuscenes database
        sample_token = self.sample_tokens[image_index]
        sample = self.nusc.get('sample', sample_token)

        # Grab the front camera and the radar sensor.
        radar_token = sample['data'][radar_name]
        camera_token = sample['data'][camera_name]
        image_target_shape = (self.image_min_side, self.image_max_side)

        # Load the image
        image_sample = self.load_sample_data(sample, camera_name)

        # Add noise to the image if enabled
        if self.noisy_image_method is not None and self.noise_factor > 0:
            image_sample = noisy(self.noisy_image_method, image_sample,
                                 self.noise_factor)

        if self._is_image_plus_enabled() or self.camera_dropout > 0.0:

            # Parameters
            kwargs = {
                'pointsensor_token': radar_token,
                'camera_token': camera_token,
                'height': (0, self.radar_projection_height),
                'image_target_shape': image_target_shape,
                'clear_radar': np.random.rand() < self.radar_dropout,
                'clear_image': np.random.rand() < self.camera_dropout,
            }

            # Create image plus
            # radar_sample = self.load_sample_data(sample, radar_name) # Load samples from disk

            # Get filepath
            if self.noise_filter:
                required_sweep_count = self.n_sweeps + self.noise_filter.num_sweeps_required - 1
            else:
                required_sweep_count = self.n_sweeps

            # sd_rec = self.nusc.get('sample_data', sample['data'][sensor_channel])
            sensor_channel = radar_name
            pcs, times = RadarPointCloud.from_file_multisweep(self.nusc, sample, sensor_channel, \
                sensor_channel, nsweeps=required_sweep_count, min_distance=0.0, merge=False)

            if self.noise_filter:
                # fill up with zero sweeps
                for _ in range(required_sweep_count - len(pcs)):
                    pcs.insert(
                        0,
                        RadarPointCloud(
                            np.zeros(shape=(RadarPointCloud.nbr_dims(), 0))))

            radar_sample = [radar.enrich_radar_data(pc.points) for pc in pcs]

            if self.noise_filter:
                ##### Filter the pcs #####
                radar_sample = list(
                    self.noise_filter.denoise(radar_sample, self.n_sweeps))

            if len(radar_sample) == 0:
                radar_sample = np.zeros(shape=(len(radar.channel_map), 0))
            else:
                ##### merge pcs into single radar samples array #####
                radar_sample = np.concatenate(radar_sample, axis=-1)

            radar_sample = radar_sample.astype(dtype=np.float32)

            if self.perfect_noise_filter:
                cartesian_uncertainty = 0.5  # meters
                angular_uncertainty = math.radians(1.7)  # degree
                category_selection = self.noise_category_selection

                nusc_sample_data = self.nusc.get('sample_data', radar_token)
                radar_gt_mask = calc_mask(nusc=self.nusc, nusc_sample_data=nusc_sample_data, points3d=radar_sample[0:3,:], \
                    tolerance=cartesian_uncertainty, angle_tolerance=angular_uncertainty, \
                    category_selection=category_selection)

                # radar_sample = radar_sample[:, radar_gt_mask.astype(np.bool)]
                radar_sample = np.compress(radar_gt_mask,
                                           radar_sample,
                                           axis=-1)

            if self.normalize_radar:
                # we need to noramlize
                # : use preprocess method analog to image preprocessing
                sigma_factor = int(self.normalize_radar)
                for ch in range(
                        3, radar_sample.shape[0]
                ):  # neural fusion requires x y and z to be not normalized
                    norm_interval = (
                        -127.5, 127.5
                    )  # caffee mode is default and has these norm interval for img
                    radar_sample[ch, :] = radar.normalize(
                        ch,
                        radar_sample[ch, :],
                        normalization_interval=norm_interval,
                        sigma_factor=sigma_factor)

            img_p_full = self.image_plus_creation(self.nusc,
                                                  image_data=image_sample,
                                                  radar_data=radar_sample,
                                                  **kwargs)

            # reduce to requested channels
            #self.channels = [ch - 1 for ch in self.channels] # Shift channels by 1, cause we have a weird convetion starting at 1
            input_data = img_p_full[:, :, self.channels]

        else:  # We are not in image_plus mode
            # Only resize, because in the other case this is contained in image_plus_creation
            input_data = cv2.resize(image_sample, image_target_shape[::-1])

        return input_data
示例#4
0
    def dataset_mapper(self, frame):
        """
        Add sensor data in vehicle or global coordinates to the frame
        
        :param frame (dict): A frame dictionary from the db (no sensor data)
        :return frame (dict): frame with all sensor data
        """
        sample_record = self.get('sample', frame['sample_token'])
        ref_sd_record = self.get(
            'sample_data', sample_record['data'][self.cfg.REF_POSE_CHANNEL])
        ref_pose_record = self.get('ego_pose', ref_sd_record['ego_pose_token'])
        ref_cs_record = self.get('calibrated_sensor',
                                 ref_sd_record['calibrated_sensor_token'])
        frame['ref_pose_record'] = ref_pose_record

        ## Load camera data
        cams = frame.get('camera', [])
        for cam in cams:
            sd_record = self.get('sample_data', cam['token'])
            filename = osp.join(self.dataroot, sd_record['filename'])
            image = self.get_camera_data(filename)
            cam['image'] = image
            cam['height'] = sd_record['height']
            cam['width'] = sd_record['width']
            cam['filename'] = filename
            cam['cs_record'] = self.get('calibrated_sensor',
                                        sd_record['calibrated_sensor_token'])
            cam['pose_record'] = self.get('ego_pose',
                                          sd_record['ego_pose_token'])

        ## Load annotations in vehicle coordinates
        frame['anns'] = self._get_anns(frame['anns'], ref_pose_record)

        ## Filter anns outside image if in 'camera' mode:
        if self.cfg.SAMPLE_MODE == 'camera':
            filtered_anns = []
            for ann in frame['anns']:
                box_cam = nsutils.map_annotation_to_camera(
                    ann['box_3d'], cam['cs_record'], cam['pose_record'],
                    ref_pose_record, self.cfg.COORDINATES)
                cam_intrinsic = np.array(cam['cs_record']['camera_intrinsic'])
                if not box_in_image(box_cam, cam_intrinsic, (1600, 900)):
                    continue
                if self.cfg.GEN_2D_BBOX:
                    ann['box_2d'] = nsutils.box_3d_to_2d_simple(
                        box_cam, cam_intrinsic, (1600, 900))
                filtered_anns.append(ann)
            frame['anns'] = filtered_anns

        ## Load LIDAR data in vehicle coordinates
        if 'lidar' in frame:
            lidar = frame['lidar']
            sd_record = self.get('sample_data', lidar['token'])
            lidar['cs_record'] = self.get('calibrated_sensor',
                                          sd_record['calibrated_sensor_token'])
            lidar['pose_record'] = self.get('ego_pose',
                                            sd_record['ego_pose_token'])
            lidar_pc, _ = LidarPointCloud.from_file_multisweep(
                self,
                sample_record,
                lidar['channel'],
                lidar['channel'],
                nsweeps=self.cfg.LIDAR_SWEEPS,
                min_distance=self.cfg.PC_MIN_DIST)
            ## Take from sensor to vehicle coordinates
            lidar_pc = nsutils.sensor_to_vehicle(lidar_pc, lidar['cs_record'])

            ## filter points outside the image if in 'camera' mode
            if self.cfg.SAMPLE_MODE == "camera":
                cam = frame['camera'][0]
                cam_intrinsic = np.array(cam['cs_record']['camera_intrinsic'])
                lidar_pc_cam, _ = nsutils.map_pointcloud_to_camera(
                    lidar_pc,
                    cam_cs_record=cam['cs_record'],
                    cam_pose_record=cam['pose_record'],
                    pointsensor_pose_record=frame['lidar']['pose_record'],
                    coordinates=frame['coordinates'])
                _, _, mask = nsutils.map_pointcloud_to_image(lidar_pc_cam,
                                                             cam_intrinsic,
                                                             img_shape=(1600,
                                                                        900))
                lidar_pc.points = lidar_pc.points[:, mask]
            frame['lidar']['pointcloud'] = lidar_pc

        ## Load Radar data in vehicle coordinates
        if 'radar' in frame:
            all_radar_pcs = RadarPointCloud(np.zeros((18, 0)))
            for radar in frame['radar']:
                radar_pc, _ = RadarPointCloud.from_file_multisweep(
                    self,
                    sample_record,
                    radar['channel'],
                    self.cfg.REF_POSE_CHANNEL,
                    nsweeps=self.cfg.RADAR_SWEEPS,
                    min_distance=self.cfg.PC_MIN_DIST)
                radar_pc = nsutils.sensor_to_vehicle(radar_pc, ref_cs_record)

                ## filter points outside the image if in 'camera' mode
                if self.cfg.SAMPLE_MODE == "camera":
                    cam = frame['camera'][0]
                    cam_intrinsic = np.array(
                        cam['cs_record']['camera_intrinsic'])
                    radar_pc_cam, _ = nsutils.map_pointcloud_to_camera(
                        radar_pc,
                        cam_cs_record=cam['cs_record'],
                        cam_pose_record=cam['pose_record'],
                        pointsensor_pose_record=ref_pose_record,
                        coordinates=frame['coordinates'])
                    _, _, mask = nsutils.map_pointcloud_to_image(
                        radar_pc_cam, cam_intrinsic, img_shape=(1600, 900))
                    radar_pc.points = radar_pc.points[:, mask]

                all_radar_pcs.points = np.hstack(
                    (all_radar_pcs.points, radar_pc.points))

            frame['radar'] = {}
            frame['radar']['pointcloud'] = all_radar_pcs
            frame['radar']['pose_record'] = ref_pose_record
            frame['radar']['cs_record'] = ref_cs_record

        return frame
示例#5
0
    def nuscenes_gt_to_kitti(self) -> None:
        """
        Converts nuScenes GT annotations to KITTI format.
        """
        kitti_to_nu_lidar = Quaternion(axis=(0, 0, 1), angle=np.pi / 2)
        kitti_to_nu_lidar_inv = kitti_to_nu_lidar.inverse
        imsize = (1600, 900)

        token_idx = 0  # Start tokens from 0.

        # Create output folders.
        label_folder = os.path.join(self.output_dir, self.split, 'label_2')
        calib_folder = os.path.join(self.output_dir, self.split, 'calib')
        image_folder = os.path.join(self.output_dir, self.split, 'image_2')
        lidar_folder = os.path.join(self.output_dir, self.split, 'velodyne')
        radar_folder = os.path.join(self.output_dir, self.split, 'radar')
        for folder in [
                label_folder, calib_folder, image_folder, lidar_folder,
                radar_folder
        ]:
            if not os.path.isdir(folder):
                os.makedirs(folder)
        id_to_token_file = os.path.join(self.output_dir, self.split,
                                        'id2token.txt')
        id2token = open(id_to_token_file, "w+")

        # Use only the samples from the current split.
        split_logs = create_splits_logs(self.split, self.nusc)
        sample_tokens = self._split_to_samples(split_logs)
        sample_tokens = sample_tokens[:self.image_count]

        out_filenames = []
        for sample_token in tqdm(sample_tokens):
            # Get sample data.
            sample = self.nusc.get('sample', sample_token)
            sample_annotation_tokens = sample['anns']
            cam_token = sample['data'][self.cam_name]
            lidar_token = sample['data'][self.lidar_name]
            radar_tokens = []
            for radar_name in _C.RADARS.keys():
                radar_tokens.append(sample['data'][radar_name])

            # Retrieve sensor records.
            sd_record_cam = self.nusc.get('sample_data', cam_token)
            sd_record_lid = self.nusc.get('sample_data', lidar_token)
            cs_record_cam = self.nusc.get(
                'calibrated_sensor', sd_record_cam['calibrated_sensor_token'])
            cs_record_lid = self.nusc.get(
                'calibrated_sensor', sd_record_lid['calibrated_sensor_token'])
            sd_record_rad = []
            cs_record_rad = []
            for i, radar_token in enumerate(radar_tokens):
                sd_record_rad.append(self.nusc.get('sample_data', radar_token))
                cs_record_rad.append(
                    self.nusc.get('calibrated_sensor',
                                  sd_record_rad[i]['calibrated_sensor_token']))

            # Combine transformations and convert to KITTI format.
            # Note: cam uses same conventions in KITTI and nuScenes.
            lid_to_ego = transform_matrix(cs_record_lid['translation'],
                                          Quaternion(
                                              cs_record_lid['rotation']),
                                          inverse=False)
            ego_to_cam = transform_matrix(cs_record_cam['translation'],
                                          Quaternion(
                                              cs_record_cam['rotation']),
                                          inverse=True)
            rad_to_ego = []
            for cs_rec_rad in cs_record_rad:
                rad_to_ego.append(
                    transform_matrix(cs_rec_rad['translation'],
                                     Quaternion(cs_rec_rad['rotation']),
                                     inverse=False))

            velo_to_cam = np.dot(ego_to_cam, lid_to_ego)
            # # TODO: Assuming Radar point are going to be in ego coordinates
            # radar_to_cam = ego_to_cam

            # Convert from KITTI to nuScenes LIDAR coordinates, where we apply velo_to_cam.
            velo_to_cam_kitti = np.dot(velo_to_cam,
                                       kitti_to_nu_lidar.transformation_matrix)
            # # Nuscenes radars use same convention as KITTI lidar
            # radar_to_cam_kitti = radar_to_cam

            # Currently not used.
            imu_to_velo_kitti = np.zeros((3, 4))  # Dummy values.
            r0_rect = Quaternion(axis=[1, 0, 0], angle=0)  # Dummy values.

            # Projection matrix.
            p_left_kitti = np.zeros((3, 4))
            # Cameras are always rectified.
            p_left_kitti[:3, :3] = cs_record_cam['camera_intrinsic']

            # Create KITTI style transforms.
            velo_to_cam_rot = velo_to_cam_kitti[:3, :3]
            velo_to_cam_trans = velo_to_cam_kitti[:3, 3]
            # radar_to_cam_rot = radar_to_cam_kitti[:3, :3]
            # radar_to_cam_trans = radar_to_cam_kitti[:3, 3]

            # Check that the lidar rotation has the same format as in KITTI.
            assert (velo_to_cam_rot.round(0) == np.array([[0, -1,
                                                           0], [0, 0, -1],
                                                          [1, 0, 0]])).all()
            assert (velo_to_cam_trans[1:3] < 0).all()

            # Retrieve the token from the lidar.
            # Note that this may be confusing as the filename of the camera will
            # include the timestamp of the lidar, not the camera.
            filename_cam_full = sd_record_cam['filename']
            filename_lid_full = sd_record_lid['filename']
            filename_rad_full = []
            for sd_rec_rad in sd_record_rad:
                filename_rad_full.append(sd_rec_rad['filename'])
            out_filename = '%06d' % token_idx  # Alternative to use KITTI names.
            # out_filename = sample_token

            # Write token to disk.
            text = sample_token
            id2token.write(text + '\n')
            id2token.flush()
            token_idx += 1

            # Convert image (jpg to png).
            src_im_path = os.path.join(self.nusc.dataroot, filename_cam_full)
            dst_im_path = os.path.join(image_folder, out_filename + '.png')

            if self.use_symlinks:
                # Create symbolic links to nuscenes images
                os.symlink(os.path.abspath(src_im_path), dst_im_path)
            else:
                im = Image.open(src_im_path)
                im.save(dst_im_path, "PNG")

            # Convert lidar.
            src_lid_path = os.path.join(self.nusc.dataroot, filename_lid_full)
            dst_lid_path = os.path.join(lidar_folder, out_filename + '.bin')
            assert not dst_lid_path.endswith('.pcd.bin')
            # pcl = LidarPointCloud.from_file(src_lid_path)
            pcl, _ = LidarPointCloud.from_file_multisweep(
                nusc=self.nusc,
                sample_rec=sample,
                chan=self.lidar_name,
                ref_chan=self.lidar_name,
                nsweeps=self.lidar_sweeps,
                min_distance=1)

            pcl.rotate(
                kitti_to_nu_lidar_inv.rotation_matrix)  # In KITTI lidar frame.
            pcl.points = pcl.points.astype('float32')
            with open(dst_lid_path, "w") as lid_file:
                pcl.points.T.tofile(lid_file)

            # # Visualize pointclouds
            # _, ax = plt.subplots(1, 1, figsize=(9, 9))
            # points = view_points(pcl.points[:3, :], np.eye(4), normalize=False)
            # dists = np.sqrt(np.sum(pcl.points[:2, :] ** 2, axis=0))
            # colors = np.minimum(1, dists / 50)
            # ax.scatter(points[0, :], points[1, :], c=colors, s=0.2)
            # # plt.show(block=False)
            # plt.show()

            # Convert radar.
            src_rad_path = []
            for filename_radar in filename_rad_full:
                src_rad_path.append(
                    os.path.join(self.nusc.dataroot, filename_radar))
            dst_rad_path = os.path.join(radar_folder, out_filename + '.bin')
            assert not dst_rad_path.endswith('.pcd.bin')
            pcl = RadarPointCloud(np.zeros((18, 0)))
            ## Get Radar points in Lidar coordinate system
            for i, rad_path in enumerate(src_rad_path):
                pc, _ = RadarPointCloud.from_file_multisweep(
                    self.nusc,
                    sample_rec=sample,
                    chan=sd_record_rad[i]['channel'],
                    ref_chan=self.lidar_name,
                    nsweeps=self.radar_sweeps,
                    min_distance=0)

                # rot_matrix = Quaternion(cs_record_rad[i]['rotation']).rotation_matrix
                # pc.rotate(rot_matrix)
                # pc.translate(np.array(cs_record_rad[i]['translation']))
                pcl.points = np.hstack((pcl.points, pc.points))
            pcl.rotate(
                kitti_to_nu_lidar_inv.rotation_matrix)  # In KITTI lidar frame.
            ## Visualize pointclouds
            # _, ax = plt.subplots(1, 1, figsize=(9, 9))
            # points = view_points(pcl.points[:3, :], np.eye(4), normalize=False)
            # dists = np.sqrt(np.sum(pcl.points[:2, :] ** 2, axis=0))
            # colors = np.minimum(1, dists / 50)
            # ax.scatter(points[0, :], points[1, :], c=colors, s=0.2)
            # plt.show()

            pcl.points = pcl.points.astype('float32')
            with open(dst_rad_path, "w") as rad_file:
                pcl.points.T.tofile(rad_file)

            # Add to tokens.
            out_filenames.append(out_filename)

            # Create calibration file.
            kitti_transforms = dict()
            kitti_transforms['P0'] = np.zeros((3, 4))  # Dummy values.
            kitti_transforms['P1'] = np.zeros((3, 4))  # Dummy values.
            kitti_transforms['P2'] = p_left_kitti  # Left camera transform.
            kitti_transforms['P3'] = np.zeros((3, 4))  # Dummy values.
            kitti_transforms[
                'R0_rect'] = r0_rect.rotation_matrix  # Cameras are already rectified.
            kitti_transforms['Tr_velo_to_cam'] = np.hstack(
                (velo_to_cam_rot, velo_to_cam_trans.reshape(3, 1)))
            # kitti_transforms['Tr_radar_to_cam'] = np.hstack((radar_to_cam_rot, radar_to_cam_trans.reshape(3, 1)))
            kitti_transforms['Tr_imu_to_velo'] = imu_to_velo_kitti
            calib_path = os.path.join(calib_folder, out_filename + '.txt')
            with open(calib_path, "w") as calib_file:
                for (key, val) in kitti_transforms.items():
                    val = val.flatten()
                    val_str = '%.12e' % val[0]
                    for v in val[1:]:
                        val_str += ' %.12e' % v
                    calib_file.write('%s: %s\n' % (key, val_str))

            # Write label file.
            label_path = os.path.join(label_folder, out_filename + '.txt')
            if os.path.exists(label_path):
                print('Skipping existing file: %s' % label_path)
                continue
            with open(label_path, "w") as label_file:
                for sample_annotation_token in sample_annotation_tokens:
                    sample_annotation = self.nusc.get('sample_annotation',
                                                      sample_annotation_token)

                    # Get box in LIDAR frame.
                    _, box_lidar_nusc, _ = self.nusc.get_sample_data(
                        lidar_token,
                        box_vis_level=BoxVisibility.NONE,
                        selected_anntokens=[sample_annotation_token])
                    box_lidar_nusc = box_lidar_nusc[0]

                    # Truncated: Set all objects to 0 which means untruncated.
                    truncated = 0.0

                    # Occluded: Set all objects to full visibility as this information is not available in nuScenes.
                    occluded = 0

                    # Convert nuScenes category to nuScenes detection challenge category.
                    detection_name = _C.KITTI_CLASSES.get(
                        sample_annotation['category_name'])
                    # Skip categories that are not in the KITTI classes.
                    if detection_name is None:
                        continue

                    # Convert from nuScenes to KITTI box format.
                    box_cam_kitti = KittiDB.box_nuscenes_to_kitti(
                        box_lidar_nusc, Quaternion(matrix=velo_to_cam_rot),
                        velo_to_cam_trans, r0_rect)

                    # Project 3d box to 2d box in image, ignore box if it does not fall inside.
                    bbox_2d = KittiDB.project_kitti_box_to_image(box_cam_kitti,
                                                                 p_left_kitti,
                                                                 imsize=imsize)
                    if bbox_2d is None:
                        # continue
                        ## If box is not inside the image, 2D boxes are set to zero
                        bbox_2d = (0, 0, 0, 0)

                    # Set dummy score so we can use this file as result.
                    box_cam_kitti.score = 0

                    # Convert box to output string format.
                    output = KittiDB.box_to_string(name=detection_name,
                                                   box=box_cam_kitti,
                                                   bbox_2d=bbox_2d,
                                                   truncation=truncated,
                                                   occlusion=occluded)

                    # Write to disk.
                    label_file.write(output + '\n')
        id2token.close()