Ejemplo n.º 1
0
def from_file(file_name: str):
    scan = np.fromfile(file_name, dtype=np.float32)
    points = scan.reshape((-1, 5))
    xyzr = points[:, :LidarPointCloud.nbr_dims()]
    ring = points[:, 4]
    xyzr[:, 3] = ring
    return LidarPointCloud(xyzr.T)
Ejemplo n.º 2
0
    def test_load_pointclouds(self):
        """
        Loads up lidar and radar pointclouds.
        """
        assert 'NUSCENES' in os.environ, 'Set NUSCENES env. variable to enable tests.'
        dataroot = os.environ['NUSCENES']
        nusc = NuScenes(version='v1.0-mini', dataroot=dataroot, verbose=False)
        sample_rec = nusc.sample[0]
        lidar_name = nusc.get('sample_data',
                              sample_rec['data']['LIDAR_TOP'])['filename']
        radar_name = nusc.get('sample_data',
                              sample_rec['data']['RADAR_FRONT'])['filename']
        lidar_path = os.path.join(dataroot, lidar_name)
        radar_path = os.path.join(dataroot, radar_name)
        pc1 = LidarPointCloud.from_file(lidar_path)
        pc2 = RadarPointCloud.from_file(radar_path)
        pc3, _ = LidarPointCloud.from_file_multisweep(nusc,
                                                      sample_rec,
                                                      'LIDAR_TOP',
                                                      'LIDAR_TOP',
                                                      nsweeps=2)
        pc4, _ = RadarPointCloud.from_file_multisweep(nusc,
                                                      sample_rec,
                                                      'RADAR_FRONT',
                                                      'RADAR_FRONT',
                                                      nsweeps=2)

        # Check for valid dimensions.
        assert pc1.points.shape[0] == pc3.points.shape[
            0] == 4, 'Error: Invalid dimension for lidar pointcloud!'
        assert pc2.points.shape[0] == pc4.points.shape[
            0] == 18, 'Error: Invalid dimension for radar pointcloud!'
        assert pc1.points.dtype == pc3.points.dtype, 'Error: Invalid dtype for lidar pointcloud!'
        assert pc2.points.dtype == pc4.points.dtype, 'Error: Invalid dtype for radar pointcloud!'
Ejemplo n.º 3
0
    def render_annotation(self,
                          anntoken: str,
                          margin: float = 10,
                          view: np.ndarray = np.eye(4),
                          box_vis_level: BoxVisibility = BoxVisibility.ANY) -> None:
        """
        Render selected annotation.
        :param anntoken: Sample_annotation token.
        :param margin: How many meters in each direction to include in LIDAR view.
        :param view: LIDAR view point.
        :param box_vis_level: If sample_data is an image, this sets required visibility for boxes.
        """

        ann_record = self.nusc.get('sample_annotation', anntoken)
        sample_record = self.nusc.get('sample', ann_record['sample_token'])
        assert 'LIDAR_TOP' in sample_record['data'].keys(), 'No LIDAR_TOP in data, cant render'

        fig, axes = plt.subplots(1, 2, figsize=(18, 9))

        # Figure out which camera the object is fully visible in (this may return nothing)
        boxes, cam = [], []
        cams = [key for key in sample_record['data'].keys() if 'CAM' in key]
        for cam in cams:
            _, boxes, _ = self.nusc.get_sample_data(sample_record['data'][cam], box_vis_level=box_vis_level,
                                                    selected_anntokens=[anntoken])
            if len(boxes) > 0:
                break  # We found an image that matches. Let's abort.
        assert len(boxes) > 0, "Could not find image where annotation is visible. Try using e.g. BoxVisibility.ANY."
        assert len(boxes) < 2, "Found multiple annotations. Something is wrong!"

        cam = sample_record['data'][cam]

        # Plot LIDAR view
        lidar = sample_record['data']['LIDAR_TOP']
        data_path, boxes, camera_intrinsic = self.nusc.get_sample_data(lidar, selected_anntokens=[anntoken])
        LidarPointCloud.from_file(data_path).render_height(axes[0], view=view)
        for box in boxes:
            c = np.array(self.get_color(box.name)) / 255.0
            box.render(axes[0], view=view, colors=(c, c, c))
            corners = view_points(boxes[0].corners(), view, False)[:2, :]
            axes[0].set_xlim([np.min(corners[0, :]) - margin, np.max(corners[0, :]) + margin])
            axes[0].set_ylim([np.min(corners[1, :]) - margin, np.max(corners[1, :]) + margin])
            axes[0].axis('off')
            axes[0].set_aspect('equal')

        # Plot CAMERA view
        data_path, boxes, camera_intrinsic = self.nusc.get_sample_data(cam, selected_anntokens=[anntoken])
        im = Image.open(data_path)
        axes[1].imshow(im)
        axes[1].set_title(self.nusc.get('sample_data', cam)['channel'])
        axes[1].axis('off')
        axes[1].set_aspect('equal')
        for box in boxes:
            c = np.array(self.get_color(box.name)) / 255.0
            box.render(axes[1], view=camera_intrinsic, normalize=True, colors=(c, c, c))
Ejemplo n.º 4
0
    def project_pts_to_image(self, pointcloud: LidarPointCloud,
                             token: str) -> np.ndarray:
        """
        Project lidar points into image.
        :param pointcloud: The LidarPointCloud in nuScenes lidar frame.
        :param token: Unique KITTI token.
        :return: <np.float: N, 3.> X, Y are points in image pixel coordinates. Z is depth in image.
        """

        # Copy and convert pointcloud.
        pc_image = LidarPointCloud(points=pointcloud.points.copy())
        pc_image.rotate(self.kitti_to_nu_lidar_inv)  # Rotate to KITTI lidar.

        # Transform pointcloud to camera frame.
        transforms = self.get_transforms(token, root=self.root)
        pc_image.rotate(transforms['velo_to_cam']['R'])
        pc_image.translate(transforms['velo_to_cam']['T'])

        # Project to image.
        depth = pc_image.points[2, :]
        points_fov = view_points(pc_image.points[:3, :],
                                 transforms['p_combined'],
                                 normalize=True)
        points_fov[2, :] = depth

        return points_fov
Ejemplo n.º 5
0
def parse_sample(sample_token, nusc):

    ret_dict = {}
    sample_data = nusc.get('sample', sample_token)
    timestamp = sample_data['timestamp']

    # First Step: Get lidar points in global frame cord!
    lidar_token = sample_data['data']['LIDAR_TOP']
    lidar_data = nusc.get('sample_data', lidar_token)
    pcl_path = osp.join(nusc.dataroot, lidar_data['filename'])
    pc = LidarPointCloud.from_file(pcl_path)

    # lidar point in point sensor frame

    cs_record = nusc.get('calibrated_sensor', lidar_data['calibrated_sensor_token'])
    pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
    pc.translate(np.array(cs_record['translation']))

    # Second step: transform from ego to the global frame.
    poserecord = nusc.get('ego_pose', lidar_data['ego_pose_token'])
    pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix)
    pc.translate(np.array(poserecord['translation']))

    # First Step finished! pc in global frame

    for cam_name in CamNames:
        cam_token = sample_data['data'][cam_name]
        _, boxes, _ = nusc.get_sample_data(cam_token, box_vis_level=BoxVisibility.ANY)

        all_points = []
        for box in boxes:
            ann_token = box.token
            points, _ = get_dense_depth_ann(ann_token, timestamp, deepcopy(pc.points[:3, ]), nusc)
            if points is not None:
                all_points.append(points)

        if len(all_points) > 0:
            points = np.concatenate(all_points, axis=-1)
        else:
            # set meta[''] = None!
            ret_dict[cam_name] = None
            continue
        # transform points to ego pose; change sf's orentiation

        # change from global to ego pose
        points = points - np.array(poserecord['translation'])[:, np.newaxis]
        points = np.dot(Quaternion(poserecord['rotation']).rotation_matrix.T, points)

        # change from ego to camera
        cam_data = nusc.get('sample_data', cam_token)
        cam_cs = nusc.get('calibrated_sensor', cam_data['calibrated_sensor_token'])

        # transform points for ego pose to camera pose
        cam_points = deepcopy(points) - np.array(cam_cs['translation'])[:, np.newaxis]
        cam_points = np.dot(Quaternion(cam_cs['rotation']).rotation_matrix.T, cam_points)

        ret_points = cam_points
        ret_dict[cam_name] = ret_points

    return ret_dict
    def get_lidar_pointcloud(self):
        """ 
        Extracting lidar pointcloud for current timestep in current scene
        Author: Javier
        :return Point cloud [Position(x,y,z) X n_points] 
        """

        # Select sensor data record
        channel = 'LIDAR_TOP'
        sample_data_token = self.sample['data'][channel]
        sd_record = self.nusc.get('sample_data', sample_data_token)

        # Get aggregated point cloud in lidar frame.
        chan = sd_record['channel']
        pc, times = LidarPointCloud.from_file_multisweep(self.nusc, self.sample, chan, channel, nsweeps=1)

        #calibration of LiDAR points
        ref_sd_record = self.nusc.get('sample_data', sample_data_token)
        cs_record = self.nusc.get('calibrated_sensor', ref_sd_record['calibrated_sensor_token'])
        pose_record = self.nusc.get('ego_pose', ref_sd_record['ego_pose_token'])
        ref_to_ego = transform_matrix(translation=cs_record['translation'],
                                      rotation=Quaternion(cs_record["rotation"]))

        # Compute rotation between 3D vehicle pose and "flat" vehicle pose (parallel to global z plane).
        ego_yaw = Quaternion(pose_record['rotation']).yaw_pitch_roll[0]
        rotation_vehicle_flat_from_vehicle = np.dot(
            Quaternion(scalar=np.cos(ego_yaw / 2), vector=[0, 0, np.sin(ego_yaw / 2)]).rotation_matrix,
            Quaternion(pose_record['rotation']).inverse.rotation_matrix)
        vehicle_flat_from_vehicle = np.eye(4)
        vehicle_flat_from_vehicle[:3, :3] = rotation_vehicle_flat_from_vehicle
        viewpoint = np.dot(vehicle_flat_from_vehicle, ref_to_ego)

        points = view_points(pc.points[:3, :], viewpoint, normalize=False)
        self.points_lidar = points
        return points
Ejemplo n.º 7
0
 def get_data(sensor_data):
     sd_record = nusc.get('sample_data', sensor_data)
     sensor_modality = sd_record['sensor_modality']
     nsweeps = 1
     sample_rec = nusc.get('sample', sd_record['sample_token'])
     ref_chan = 'LIDAR_TOP'
     chan = sd_record['channel']
     if sensor_modality == 'lidar':
         pc, times = LidarPointCloud.from_file_multisweep(nusc,
                                                          sample_rec,
                                                          chan,
                                                          ref_chan,
                                                          nsweeps=nsweeps)
         points = pc.points
         return points  # (4, 24962) max 30000
     elif sensor_modality == 'radar':
         pc, times = RadarPointCloud.from_file_multisweep(nusc,
                                                          sample_rec,
                                                          chan,
                                                          ref_chan,
                                                          nsweeps=nsweeps)
         points = pc.points
         return points  # (18, -1) # max 100
     elif sensor_modality == 'camera':
         data_path, boxes, camera_intrinsic = nusc.get_sample_data(
             sensor_data)  #, box_vis_level=box_vis_level)
         data = Image.open(data_path)
         img = np.array(data)
         return img  # (900, 1600, 3)
Ejemplo n.º 8
0
    def map_pointcloud_to_image(self, pointsensor_token: str, camera_token: str) -> Tuple:
        """
        Given a point sensor (lidar/radar) token and camera sample_data token, load point-cloud and map it to the image
        plane.
        :param pointsensor_token: Lidar/radar sample_data token.
        :param camera_token: Camera sample_data token.
        :return (pointcloud <np.float: 2, n)>, coloring <np.float: n>, image <Image>).
        """

        cam = self.nusc.get('sample_data', camera_token)
        pointsensor = self.nusc.get('sample_data', pointsensor_token)
        pcl_path = osp.join(self.nusc.dataroot, pointsensor['filename'])
        if pointsensor['sensor_modality'] == 'lidar':
            pc = LidarPointCloud.from_file(pcl_path)
        else:
            pc = RadarPointCloud.from_file(pcl_path)
        im = Image.open(osp.join(self.nusc.dataroot, cam['filename']))

        # Points live in the point sensor frame. So they need to be transformed via global to the image plane.
        # First step: transform the point-cloud to the ego vehicle frame for the timestamp of the sweep.
        cs_record = self.nusc.get('calibrated_sensor', pointsensor['calibrated_sensor_token'])
        pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
        pc.translate(np.array(cs_record['translation']))

        # Second step: transform to the global frame.
        poserecord = self.nusc.get('ego_pose', pointsensor['ego_pose_token'])
        pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix)
        pc.translate(np.array(poserecord['translation']))

        # Third step: transform into the ego vehicle frame for the timestamp of the image.
        poserecord = self.nusc.get('ego_pose', cam['ego_pose_token'])
        pc.translate(-np.array(poserecord['translation']))
        pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix.T)

        # Fourth step: transform into the camera.
        cs_record = self.nusc.get('calibrated_sensor', cam['calibrated_sensor_token'])
        pc.translate(-np.array(cs_record['translation']))
        pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix.T)

        # Fifth step: actually take a "picture" of the point cloud.
        # Grab the depths (camera frame z axis points away from the camera).
        depths = pc.points[2, :]

        # Set the height to be the coloring.
        coloring = pc.points[2, :]

        # Take the actual picture (matrix multiplication with camera-matrix + renormalization).
        points = view_points(pc.points[:3, :], np.array(cs_record['camera_intrinsic']), normalize=True)

        # Remove points that are either outside or behind the camera. Leave a margin of 1 pixel for aesthetic reasons.
        mask = np.ones(depths.shape[0], dtype=bool)
        mask = np.logical_and(mask, depths > 0)
        mask = np.logical_and(mask, points[0, :] > 1)
        mask = np.logical_and(mask, points[0, :] < im.size[0] - 1)
        mask = np.logical_and(mask, points[1, :] > 1)
        mask = np.logical_and(mask, points[1, :] < im.size[1] - 1)
        points = points[:, mask]
        coloring = coloring[mask]

        return points, coloring, im
Ejemplo n.º 9
0
    def getPoints(self, index):
        sample = self.nusc.get('sample', self.filtered_sample_tokens[index])
        sample_lidar_token = sample['data']['LIDAR_TOP']

        lidar_data = self.nusc.get('sample_data', sample_lidar_token)
        ego_pose = self.nusc.get('ego_pose', lidar_data['ego_pose_token'])
        calibrated_sensor = self.nusc.get(
            'calibrated_sensor', lidar_data['calibrated_sensor_token'])

        global_from_car = transform_matrix(ego_pose['translation'],
                                           Quaternion(ego_pose['rotation']),
                                           inverse=False)
        car_from_sensor = transform_matrix(calibrated_sensor['translation'],
                                           Quaternion(
                                               calibrated_sensor['rotation']),
                                           inverse=False)
        try:
            lidar_pointcloud, times = LidarPointCloud.from_file_multisweep(
                self.nusc, sample, 'LIDAR_TOP', 'LIDAR_TOP')
            lidar_pointcloud.transform(car_from_sensor)
        except Exception as e:
            print(f"Failed to load Lidar Pointcloud for {sample}:{e}")
        points = lidar_pointcloud.points
        points[3, :] /= 255
        points[3, :] -= 0.5

        # points_cat = np.concatenate([points, times], axis=0).transpose()
        points_cat = points.transpose()
        points_cat = points_cat[~np.isnan(points_cat).any(axis=1)]

        return points_cat
Ejemplo n.º 10
0
def get_points_data(nusc, all_sample_tokens):
    all_points = []
    all_ego_poses = []
    for sample_token in all_sample_tokens:
        sample = nusc.get('sample', sample_token)
        lidar_data = nusc.get('sample_data', sample['data']['LIDAR_TOP'])
        chan = 'LIDAR_TOP'
        ref_chan = 'LIDAR_TOP'
        pc, _ = LidarPointCloud.from_file_multisweep(nusc,
                                                     sample,
                                                     chan,
                                                     ref_chan,
                                                     nsweeps=10)
        # First step: transform the point-cloud to the ego vehicle frame for the timestamp of the sweep.
        cs_record = nusc.get('calibrated_sensor',
                             lidar_data['calibrated_sensor_token'])
        pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
        pc.translate(np.array(cs_record['translation']))

        # Second step: transform to the global frame.
        ps_record = nusc.get('ego_pose', lidar_data['ego_pose_token'])
        pc.rotate(Quaternion(ps_record['rotation']).rotation_matrix)
        pc.translate(np.array(ps_record['translation']))
        points = pc.points
        points[3, :] = points[3, :] / 255.0
        all_points.append(points)
        all_ego_poses.append(ps_record)
    return all_points, all_ego_poses
Ejemplo n.º 11
0
 def load_points(self, path: str) -> None:
     """
     Loads the x, y, z and intensity of the points in the point cloud.
     :param path: Path to the bin file containing the x, y, z and intensity of the points in the point cloud.
     """
     self.points = LidarPointCloud.from_file(
         path).points.T  # [N, 4], where N is the number of points.
     if self.labels is not None:
         assert len(self.points) == len(self.labels), 'Error: There are {} points in the point cloud, ' \
                                                      'but {} labels'.format(len(self.points), len(self.labels))
Ejemplo n.º 12
0
    def _get_lidar_data(self,
                        sample_rec: dict,
                        sample_data: dict,
                        pose_rec: dict,
                        nsweeps: int = 1) -> LidarPointCloud:
        """
        Returns the LIDAR pointcloud for this frame in vehicle/global coordniates
        :param sample_rec: sample record dictionary from nuscenes
        :param sample_data: sample data dictionary from nuscenes
        :param pose_rec: ego pose record dictionary from nuscenes
        :param nsweeps: number of sweeps to return for the LIDAR
        :return: LidarPointCloud containing this sample and all sweeps
        """

        lidar_path = os.path.join(self.nusc_path, sample_data['filename'])
        cs_record = self.nusc.get('calibrated_sensor',
                                  sample_data['calibrated_sensor_token'])
        if nsweeps > 1:
            ## Returns in vehicle
            lidar_pc, _ = LidarPointCloud.from_file_multisweep(
                self.nusc,
                sample_rec,
                sample_data['channel'],
                sample_data['channel'],
                nsweeps=nsweeps)
        else:
            ## returns in sensor coordinates
            lidar_pc = LidarPointCloud.from_file(lidar_path)

        ## Sensor to vehicle
        lidar_pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
        lidar_pc.translate(np.array(cs_record['translation']))

        ## Vehicle to global
        if self.coordinates == 'global':
            lidar_pc.rotate(Quaternion(pose_rec['rotation']).rotation_matrix)
            lidar_pc.translate(np.array(pose_rec['translation']))

        return lidar_pc, cs_record
Ejemplo n.º 13
0
    def get_pointcloud(token: str, root: str = '/data/sets/kitti') -> LidarPointCloud:
        """
        Load up the pointcloud for a sample.
        :param token: KittiDB unique id.
        :param root: Base folder for all KITTI data.
        :return: LidarPointCloud for the sample in the KITTI Lidar frame.
        """
        pc_filename = KittiDB.get_filepath(token, 'velodyne', root=root)

        # The lidar PC is stored in the KITTI LIDAR coord system.
        pc = LidarPointCloud(np.fromfile(pc_filename, dtype=np.float32).reshape(-1, 4).T)

        return pc
Ejemplo n.º 14
0
def process(sd):

    # read lidar points
    pc = LidarPointCloud.from_file(f"{nusc_root}/{sd['filename']}")

    #
    pts = np.array(pc.points[:3].T)

    # we follow nuscenes's labeling protocol
    # c.f. nuscenes lidarseg
    # 24: driveable surface
    # 30: static.others
    # 31: ego

    # initialize everything to 30 (static others)
    lbls = np.full(len(pts), 30, dtype=np.uint8)

    # identify ego mask based on the car's physical dimensions
    ego_mask = np.logical_and(
        np.logical_and(-0.8 <= pc.points[0], pc.points[0] <= 0.8),
        np.logical_and(-1.5 <= pc.points[1], pc.points[1] <= 2.5))
    lbls[ego_mask] = 31

    # run ground segmentation code
    index = np.flatnonzero(np.logical_not(ego_mask))
    label = segmentation.segment(pts[index])

    #
    grnd_index = np.flatnonzero(label)
    lbls[index[grnd_index]] = 24

    # visualize to double check
    if False:
        import open3d as o3d
        pcd = o3d.geometry.PointCloud()
        pcd.points = o3d.utility.Vector3dVector(pts)

        colors = np.zeros_like(pts)
        colors[lbls == 24, :] = [1, 0, 0]
        colors[lbls == 30, :] = [0, 1, 0]
        colors[lbls == 31, :] = [0, 0, 1]
        pcd.colors = o3d.utility.Vector3dVector(colors)

        o3d.visualization.draw_geometries([pcd])

    #
    res_file = os.path.join(res_dir, f"{sd['token']}_grndseg.bin")
    lbls.tofile(res_file)
Ejemplo n.º 15
0
def get_lidar_pc_from_file(nusc, sample_rec, chan: str):
    """
    Returns the lidar point cloud of a single keyframe.

    Arguments:
        nusc: A NuScenes instance, <NuScenes>.
        sample_rec: The current sample, <dict>.
        chan: The lidar channel from which the point cloud is extracted, <str>.
    """
    # Get filename
    sample_data = nusc.get('sample_data', sample_rec['data'][chan])
    pcl_path = os.path.join(nusc.dataroot, sample_data['filename'])

    # Extract point cloud
    pc = LidarPointCloud.from_file(pcl_path)
    return pc
Ejemplo n.º 16
0
def load_lidar_file_nuscenes(file_path):
    n_vec = 5
    dtype = np.float32
    lidar_pc_raw = np.fromfile(file_path, dtype)
    lidar_pc = lidar_pc_raw.reshape((-1, n_vec))
    #intensity_normalized = lidar_pc[:, 3] / 255
    #lidar_pc[:, 3] = intensity_normalized
    pointcloud = LidarPointCloud.from_file(file_path)
    r = R.from_quat([0, 0, np.sin((np.pi / 4) * 3), np.cos((np.pi / 4) * 3)])
    pointcloud.rotate(r.as_matrix())
    lidar_pc[:, 0] = pointcloud.points[0, :]
    lidar_pc[:, 1] = pointcloud.points[1, :]
    lidar_pc[:, 2] = pointcloud.points[2, :]
    lidar_pc[:, 3] = pointcloud.points[3, :]

    return lidar_pc
Ejemplo n.º 17
0
def parse_sample(sample_token, nusc):
    sample_data = nusc.get('sample', sample_token)
    timestamp = sample_data['timestamp']

    # First Step: Get lidar points in global frame cord!
    lidar_token = sample_data['data']['LIDAR_TOP']
    lidar_data = nusc.get('sample_data', lidar_token)
    pcl_path = osp.join(nusc.dataroot, lidar_data['filename'])
    pc = LidarPointCloud.from_file(pcl_path)

    # lidar point in point sensor frame

    cs_record = nusc.get('calibrated_sensor',
                         lidar_data['calibrated_sensor_token'])
    pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
    pc.translate(np.array(cs_record['translation']))

    # Second step: transform from ego to the global frame.
    poserecord = nusc.get('ego_pose', lidar_data['ego_pose_token'])
    pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix)
    pc.translate(np.array(poserecord['translation']))

    # First Step finished! pc in global frame

    ann_tokens = sample_data['anns']

    all_points = []
    all_sf = []
    for ann_token in ann_tokens:
        # ret are in global frame
        points, sf, box = get_sf_ann(ann_token, timestamp, pc.points[:3, ],
                                     nusc)
        if points is not None:
            all_points.append(points)
            all_sf.append(sf)

    points = np.concatenate(all_points, axis=-1)
    sf = np.concatenate(all_sf, axis=-1)

    # transform points to ego pose; change sf's orentiation
    points = points - np.array(poserecord['translation'])[:, np.newaxis]
    points = np.dot(
        Quaternion(poserecord['rotation']).rotation_matrix.T, points)

    sf = np.dot(Quaternion(poserecord['rotation']).rotation_matrix.T, sf)

    return points, sf
    def get_lidar_pointcloud(self, nsweeps=5):
        """
        Extracting lidar pointcloud for current timestep in current scene
        :return Point cloud [Position(x,y,z,i,t) X n_points]
        """

        # Select sensor data record
        channel = 'LIDAR_TOP'
        sample_data_token = self.sample['data'][channel]
        sd_record = self.nusc.get('sample_data', sample_data_token)
        # Get aggregated point cloud in lidar frame.
        chan = sd_record['channel']
        pc, times = LidarPointCloud.from_file_multisweep(
            self.nusc, self.sample, chan, channel, nsweeps)
        points = np.concatenate([pc.points, times], axis=0)

        return points
Ejemplo n.º 19
0
    def load_cloud_raw(self, sample_lidar_token):  
        lidar_data = self.NuScenes_data.get("sample_data", sample_lidar_token)  
        lidar_filepath = self.NuScenes_data.get_sample_data_path(sample_lidar_token)
        ego_pose = self.NuScenes_data.get("ego_pose", lidar_data["ego_pose_token"])
        calibrated_sensor = self.NuScenes_data.get("calibrated_sensor", lidar_data["calibrated_sensor_token"])

        # Homogeneous transformation matrix from sensor coordinate frame to ego car frame.
        car_from_sensor = transform_matrix(calibrated_sensor['translation'], Quaternion(calibrated_sensor['rotation']),
                                            inverse=False)

        lidar_pointcloud = LidarPointCloud.from_file(lidar_filepath)

        # The lidar pointcloud is defined in the sensor's reference frame.
        # We want it in the car's reference frame, so we transform each point
        lidar_pointcloud.transform(car_from_sensor)

        return lidar_pointcloud.points[:3,:].T    
Ejemplo n.º 20
0
    def get_lidar_pc_intensity_by_token(self, lidar_token):
        lidar = self.nusc.get('sample_data', lidar_token)
        pc = LidarPointCloud.from_file(
            os.path.join(self.nusc.dataroot, lidar['filename']))
        pc_np = pc.points[0:3, :]
        intensity_np = pc.points[3:4, :]

        # remove point falls on egocar
        x_inside = np.logical_and(pc_np[0, :] < 0.8, pc_np[0, :] > -0.8)
        y_inside = np.logical_and(pc_np[1, :] < 2.7, pc_np[1, :] > -2.7)
        inside_mask = np.logical_and(x_inside, y_inside)
        outside_mask = np.logical_not(inside_mask)
        pc_np = pc_np[:, outside_mask]
        intensity_np = intensity_np[:, outside_mask]

        P_oi = get_sample_data_ego_pose_P(self.nusc, lidar)

        return pc_np, intensity_np, P_oi
    def get_lidar_instances(self, nsweeps=1, enlarge=1):
        channel = 'LIDAR_TOP'
        sample_data_token = self.sample['data'][channel]
        sd_record = self.nusc.get('sample_data', sample_data_token)
        _, boxes, _ = self.nusc.get_sample_data(
            sample_data_token, use_flat_vehicle_coordinates=False)

        # Get aggregated point cloud in lidar frame.
        chan = sd_record['channel']
        pc, _ = LidarPointCloud.from_file_multisweep(self.nusc, self.sample,
                                                     chan, channel, nsweeps)
        points = pc.points
        anno_points = []
        anno_boxes = []
        for box in boxes:
            mask = points_in_box(box, points[0:3, :], wlh_factor=enlarge)
            if True in mask:
                anno_points.append(points[:, mask])
                anno_boxes.append(box)
        return anno_points, anno_boxes
Ejemplo n.º 22
0
def get_sensor_sample_data(nusc,
                           sample,
                           sensor_channel,
                           dtype=np.float32,
                           size=None):
    """
    This function takes the token of a sample and a sensor sensor_channel and returns the according data
    :param sample: the nuscenes sample dict
    :param sensor_channel: the target sensor channel of the given sample to load the data from
    :param dtype: the target numpy type
    :param size: for resizing the image
    Radar Format:
        - Shape: 19 x n
        - Semantics: 
            [0]: x (1)
            [1]: y (2)
            [2]: z (3)
            [3]: recflance (4)


    """
    # Get filepath
    sd_rec = nusc.get('sample_data', sample['data'][sensor_channel])
    file_name = osp.join(nusc.dataroot, sd_rec['filename'])

    # Check conditions
    if not osp.exists(file_name):
        raise FileNotFoundError("nuscenes data must be located in %s" %
                                file_name)

    # Read the data
    if "LIDAR" in sensor_channel:
        pcs = LidarPointCloud.from_file(file_name)  # Load Lidar points
        data = pcs.points.astype(dtype)

    else:
        raise Exception("\"%s\" is not supported" % sensor_channel)

    return data
Ejemplo n.º 23
0
def main():
    nusc = NuScenes(version='v1.0-mini',
                    dataroot='../../data/datasets/nuscenes',
                    verbose=True)

    my_sample = nusc.sample[10]
    my_sample_token = my_sample['token']
    sample_record = nusc.get('sample', my_sample_token)

    pointsensor_token = sample_record['data']['LIDAR_TOP']
    camera_token = sample_record['data']['CAM_FRONT']

    # Get point cloud
    pointsensor = nusc.get('sample_data', pointsensor_token)
    pcl_path = os.path.join(nusc.dataroot, pointsensor['filename'])
    if pointsensor['sensor_modality'] == 'lidar':
        pc_data = LidarPointCloud.from_file(pcl_path)
    else:
        raise NotImplementedError()

    pc = pc_data.points[0:3]

    vtk_window_size = (1280, 720)
    vtk_renderer = demo_utils.setup_vtk_renderer()
    vtk_render_window = demo_utils.setup_vtk_render_window(
        'Point Cloud', vtk_window_size, vtk_renderer)

    vtk_interactor = vtk.vtkRenderWindowInteractor()
    vtk_interactor.SetRenderWindow(vtk_render_window)
    vtk_interactor.SetInteractorStyle(
        vtk_utils.ToggleActorsInteractorStyle(None, vtk_renderer))
    vtk_interactor.Initialize()

    vtk_pc = VtkPointCloudGlyph()
    vtk_pc.set_points(pc.T)
    vtk_renderer.AddActor(vtk_pc.vtk_actor)

    vtk_render_window.Render()
    vtk_interactor.Start()  # Blocking
Ejemplo n.º 24
0
def get_pcl():
	nusc = NuScenes(version='v1.0-mini', dataroot='/data/sets/nuscenes', verbose=True)
	explorer = NuScenesExplorer(nusc)

	imglist = []

	count = 0
	for scene in nusc.scene:
		token = scene['first_sample_token']
		while token != '':
			count += 1
			sample = nusc.get('sample',token)
			sample_data_cam = nusc.get('sample_data', sample['data']['CAM_FRONT'])
			sample_data_pcl = nusc.get('sample_data', sample['data']['LIDAR_TOP'])

			# get CAM_FRONT datapath 
			data_path, boxes, camera_intrinsic = explorer.nusc.get_sample_data(sample_data_cam['token'], box_vis_level = BoxVisibility.ANY)
			imglist += [data_path]

			# get LiDAR point cloud 
			sample_rec = explorer.nusc.get('sample', sample_data_pcl['sample_token'])
			chan = sample_data_pcl['channel']
			ref_chan = 'LIDAR_TOP'
			pc, times = LidarPointCloud.from_file_multisweep(nusc, sample_rec, chan, ref_chan)

			radius = np.sqrt((pc.points[0])**2 + (pc.points[1])**2 + (pc.points[2])**2)
			pc = pc.points.transpose() 
			pc = pc[np.where(radius<20)]
			print(pc)
			display(pc.transpose(), count)

			token = sample['next']   
			if count == 2:
				break
		if count == 2:
			break
	plt.show()
	print(len(imglist))
    def get_each_sweep_lidar_pointcloud(self, nsweeps=5):
        """
        Extracting lidar pointcloud for current timestep in current scene
        :return
            Point cloud [[Position(x,y,z, a) X n_points] * timestamps]
            Time stamps a list of int for timestamps in microsecond
        """

        # Select sensor data record
        channel = 'LIDAR_TOP'
        sample_data_token = self.sample['data'][channel]
        sd_record = self.nusc.get('sample_data', sample_data_token)
        # Get aggregated point cloud in lidar frame.
        chan = sd_record['channel']
        pc, times = LidarPointCloud.from_file_multisweep(
            self.nusc, self.sample, chan, channel, nsweeps)
        times_interval = np.unique(times)[::-1]
        unique_times = self.get_timestamp() - (times_interval * 1e6).astype(
            np.int)
        points = []
        for t in times_interval:
            points.append(pc.points[:, times[0] == t])
        return points, unique_times
Ejemplo n.º 26
0
def get_lidar_points(lyftdata, lidar_token):
    '''Get lidar point cloud in the frame of the ego vehicle'''
    sd_record = lyftdata.get("sample_data", lidar_token)
    sensor_modality = sd_record["sensor_modality"]

    # Get aggregated point cloud in lidar frame.
    sample_rec = lyftdata.get("sample", sd_record["sample_token"])
    chan = sd_record["channel"]
    ref_chan = "LIDAR_TOP"
    pc, times = LidarPointCloud.from_file_multisweep(lyftdata,
                                                     sample_rec,
                                                     chan,
                                                     ref_chan,
                                                     nsweeps=1)
    # Compute transformation matrices for lidar point cloud
    cs_record = lyftdata.get("calibrated_sensor",
                             sd_record["calibrated_sensor_token"])
    pose_record = lyftdata.get("ego_pose", sd_record["ego_pose_token"])
    vehicle_from_sensor = np.eye(4)
    vehicle_from_sensor[:3, :3] = Quaternion(
        cs_record["rotation"]).rotation_matrix
    vehicle_from_sensor[:3, 3] = cs_record["translation"]

    ego_yaw = Quaternion(pose_record["rotation"]).yaw_pitch_roll[0]
    rot_vehicle_flat_from_vehicle = np.dot(
        Quaternion(scalar=np.cos(ego_yaw / 2),
                   vector=[0, 0, np.sin(ego_yaw / 2)]).rotation_matrix,
        Quaternion(pose_record["rotation"]).inverse.rotation_matrix,
    )
    vehicle_flat_from_vehicle = np.eye(4)
    vehicle_flat_from_vehicle[:3, :3] = rot_vehicle_flat_from_vehicle
    points = view_points(pc.points[:3, :],
                         np.dot(vehicle_flat_from_vehicle,
                                vehicle_from_sensor),
                         normalize=False)
    return points
Ejemplo n.º 27
0
def from_file_multisweep(
    nusc: "NuScenes",
    sample_data_token: str,
):
    """
    Return a point cloud that aggregates multiple sweeps.
    As every sweep is in a different coordinate frame, we need to map the coordinates to a single reference frame.
    As every sweep has a different timestamp, we need to account for that in the transformations and timestamps.
    :param nusc: A NuScenes instance.
    :param sample_rec: The current sample.
    :param chan: The radar channel from which we track back n sweeps to aggregate the point cloud.
    :param ref_chan: The reference channel of the current sample_rec that the point clouds are mapped to.
    :param nsweeps: Number of sweeps to aggregated.
    :param min_distance: Distance below which points are discarded.
    :return: (all_pc, all_times). The aggregated point cloud and timestamps.
    """

    # Init
    # Aggregate current and previous sweeps.
    current_sd_rec = nusc.get("sample_data", sample_data_token)
    current_pc = LidarPointCloud.from_file(
        osp.join(nusc.dataroot, current_sd_rec["filename"]))

    return current_pc
def create_dataset(dataroot,
                   save_dir,
                   width=672,
                   height=672,
                   grid_range=70.,
                   nusc_version='v1.0-mini',
                   use_constant_feature=True,
                   use_intensity_feature=True,
                   end_id=None):

    os.makedirs(os.path.join(save_dir, 'in_feature'), exist_ok=True)
    os.makedirs(os.path.join(save_dir, 'out_feature'), exist_ok=True)

    nusc = NuScenes(version=nusc_version, dataroot=dataroot, verbose=True)
    ref_chan = 'LIDAR_TOP'

    if width == height:
        size = width
    else:
        raise Exception(
            'Currently only supported if width and height are equal')

    grid_length = 2. * grid_range / size
    label_half_length = 10

    data_id = 0
    grid_ticks = np.arange(-grid_range, grid_range + grid_length, grid_length)
    grid_centers \
        = (grid_ticks + grid_length / 2)[:len(grid_ticks) - 1]

    for my_scene in nusc.scene:
        first_sample_token = my_scene['first_sample_token']
        token = first_sample_token

        while (token != ''):
            print('--- {} '.format(data_id) + token + ' ---')
            my_sample = nusc.get('sample', token)
            sd_record = nusc.get('sample_data', my_sample['data'][ref_chan])
            sample_rec = nusc.get('sample', sd_record['sample_token'])
            chan = sd_record['channel']

            pc, times = LidarPointCloud.from_file_multisweep(nusc,
                                                             sample_rec,
                                                             chan,
                                                             ref_chan,
                                                             nsweeps=10)
            _, boxes, _ = nusc.get_sample_data(sd_record['token'],
                                               box_vis_level=0)

            out_feature = np.zeros((size, size, 7), dtype=np.float32)
            for box_idx, box in enumerate(boxes):
                label = 0
                if box.name.split('.')[0] == 'vehicle':
                    if box.name.split('.')[1] == 'car':
                        label = 1
                    elif (box.name.split('.')[1]
                          in ['bus', 'construction', 'trailer', 'truck']):
                        label = 2
                    elif box.name.split('.')[1] == 'emergency':
                        if box.name.split('.')[2] == 'ambulance':
                            label = 2
                        # police
                        else:
                            label = 1
                    elif box.name.split('.')[1] in ['bicycle', 'motorcycle']:
                        label = 3
                elif box.name.split('.')[0] == 'human':
                    label = 4
                elif (box.name.split('.')[0]
                      in ['animal', 'movable_object', 'static_object']):
                    label = 5
                else:
                    continue

                height_pt = np.linalg.norm(box.corners().T[0] -
                                           box.corners().T[3])
                corners2d = box.corners()[:2, :]
                box2d = corners2d.T[[2, 3, 7, 6]]

                box2d_center = box2d.mean(axis=0)
                generate_out_feature(width, height, size, grid_centers, box2d,
                                     box2d_center, height_pt, label,
                                     label_half_length, out_feature)

            out_feature = out_feature.astype(np.float16)
            feature_generator = fg.Feature_generator(grid_range, width, height,
                                                     use_constant_feature,
                                                     use_intensity_feature)
            feature_generator.generate(pc.points.T)
            in_feature = feature_generator.feature
            if use_constant_feature and use_intensity_feature:
                in_feature = in_feature.reshape(size, size, 8)
            elif use_constant_feature or use_intensity_feature:
                in_feature = in_feature.reshape(size, size, 6)
            else:
                in_feature = in_feature.reshape(size, size, 4)

            # instance_pt is flipped due to flip
            out_feature = np.flip(np.flip(out_feature, axis=0), axis=1)
            out_feature[:, :, 1:3] *= -1

            np.save(os.path.join(save_dir, 'in_feature/{:05}'.format(data_id)),
                    in_feature)
            np.save(
                os.path.join(save_dir, 'out_feature/{:05}'.format(data_id)),
                out_feature)

            token = my_sample['next']
            data_id += 1
            if data_id == end_id:
                return
Ejemplo n.º 29
0
    def map_pointcloud_to_image(self,
                                nusc,
                                pointsensor_token: str,
                                camera_token: str,
                                min_dist: float = 1.0,
                                render_intensity: bool = False,
                                show_lidarseg: bool = False):
        """
        Given a point sensor (lidar/radar) token and camera sample_data token, load pointcloud and map it to the image
        plane.
        :param pointsensor_token: Lidar/radar sample_data token.
        :param camera_token: Camera sample_data token.
        :param min_dist: Distance from the camera below which points are discarded.
        :param render_intensity: Whether to render lidar intensity instead of point depth.
        :param show_lidarseg: Whether to render lidar intensity instead of point depth.
        :param filter_lidarseg_labels: Only show lidar points which belong to the given list of classes. If None
            or the list is empty, all classes will be displayed.
        :param lidarseg_preds_bin_path: A path to the .bin file which contains the user's lidar segmentation
                                        predictions for the sample.
        :return (pointcloud <np.float: 2, n)>, coloring <np.float: n>, image <Image>).
        """

        cam = nusc.get('sample_data', camera_token)
        pointsensor = nusc.get('sample_data', pointsensor_token)
        pcl_path = osp.join(nusc.dataroot, pointsensor['filename'])
        if pointsensor['sensor_modality'] == 'lidar':
            # Ensure that lidar pointcloud is from a keyframe.
            assert pointsensor['is_key_frame'], \
                'Error: Only pointclouds which are keyframes have lidar segmentation labels. Rendering aborted.'
            pc = LidarPointCloud.from_file(pcl_path)
        else:
            pc = RadarPointCloud.from_file(pcl_path)
        im = Image.open(osp.join(nusc.dataroot, cam['filename']))

        # Points live in the point sensor frame. So they need to be transformed via global to the image plane.
        # First step: transform the pointcloud to the ego vehicle frame for the timestamp of the sweep.
        cs_record = nusc.get('calibrated_sensor',
                             pointsensor['calibrated_sensor_token'])
        pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
        pc.translate(np.array(cs_record['translation']))

        # Second step: transform from ego to the global frame.
        poserecord = nusc.get('ego_pose', pointsensor['ego_pose_token'])
        pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix)
        pc.translate(np.array(poserecord['translation']))

        # Third step: transform from global into the ego vehicle frame for the timestamp of the image.
        poserecord = nusc.get('ego_pose', cam['ego_pose_token'])
        pc.translate(-np.array(poserecord['translation']))
        pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix.T)

        # Fourth step: transform from ego into the camera.
        cs_record = nusc.get('calibrated_sensor',
                             cam['calibrated_sensor_token'])
        pc.translate(-np.array(cs_record['translation']))
        pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix.T)

        # Fifth step: actually take a "picture" of the point cloud.
        # Grab the depths (camera frame z axis points away from the camera).
        depths = pc.points[2, :]

        if render_intensity:
            assert pointsensor['sensor_modality'] == 'lidar', 'Error: Can only render intensity for lidar, ' \
                                                              'not %s!' % pointsensor['sensor_modality']
            # Retrieve the color from the intensities.
            # Performs arbitary scaling to achieve more visually pleasing results.
            intensities = pc.points[3, :]
            intensities = (intensities - np.min(intensities)) / (
                np.max(intensities) - np.min(intensities))
            intensities = intensities**0.1
            intensities = np.maximum(0, intensities - 0.5)
            coloring = intensities

        else:
            # Retrieve the color from the depth.
            coloring = depths

        # Take the actual picture (matrix multiplication with camera-matrix + renormalization).
        points = view_points(pc.points[:3, :],
                             np.array(cs_record['camera_intrinsic']),
                             normalize=True)

        # Remove points that are either outside or behind the camera. Leave a margin of 1 pixel for aesthetic reasons.
        # Also make sure points are at least 1m in front of the camera to avoid seeing the lidar points on the camera
        # casing for non-keyframes which are slightly out of sync.
        mask = np.ones(depths.shape[0], dtype=bool)
        mask = np.logical_and(mask, depths > min_dist)
        mask = np.logical_and(mask, points[0, :] > 1)
        mask = np.logical_and(mask, points[0, :] < im.size[0] - 1)
        mask = np.logical_and(mask, points[1, :] > 1)
        mask = np.logical_and(mask, points[1, :] < im.size[1] - 1)
        points = points[:, mask]
        coloring = coloring[mask]

        return points, coloring, im
Ejemplo n.º 30
0
    def render_sample_data(self,
                           sample_data_token: str,
                           with_anns: bool = True,
                           box_vis_level: BoxVisibility = BoxVisibility.ANY,
                           axes_limit: float = 40,
                           ax: Axes = None,
                           nsweeps: int = 1) -> None:
        """
        Render sample data onto axis.
        :param sample_data_token: Sample_data token.
        :param with_anns: Whether to draw annotations.
        :param box_vis_level: If sample_data is an image, this sets required visibility for boxes.
        :param axes_limit: Axes limit for lidar and radar (measured in meters).
        :param ax: Axes onto which to render.
        :param nsweeps: Number of sweeps for lidar and radar.
        """

        # Get sensor modality.
        sd_record = self.nusc.get('sample_data', sample_data_token)
        sensor_modality = sd_record['sensor_modality']

        if sensor_modality == 'lidar':
            # Get boxes in lidar frame.
            _, boxes, _ = self.nusc.get_sample_data(sample_data_token, box_vis_level=box_vis_level)

            # Get aggregated point cloud in lidar frame.
            sample_rec = self.nusc.get('sample', sd_record['sample_token'])
            chan = sd_record['channel']
            ref_chan = 'LIDAR_TOP'
            pc, times = LidarPointCloud.from_file_multisweep(self.nusc, sample_rec, chan, ref_chan, nsweeps=nsweeps)

            # Init axes.
            if ax is None:
                _, ax = plt.subplots(1, 1, figsize=(9, 9))

            # Show point cloud.
            points = view_points(pc.points[:3, :], np.eye(4), normalize=False)
            dists = np.sqrt(np.sum(pc.points[:2, :] ** 2, axis=0))
            colors = np.minimum(1, dists/axes_limit/np.sqrt(2))
            ax.scatter(points[0, :], points[1, :], c=colors, s=0.2)

            # Show ego vehicle.
            ax.plot(0, 0, 'x', color='black')

            # Show boxes.
            if with_anns:
                for box in boxes:
                    c = np.array(self.get_color(box.name)) / 255.0
                    box.render(ax, view=np.eye(4), colors=(c, c, c))

            # Limit visible range.
            ax.set_xlim(-axes_limit, axes_limit)
            ax.set_ylim(-axes_limit, axes_limit)

        elif sensor_modality == 'radar':
            # Get boxes in lidar frame.
            sample_rec = self.nusc.get('sample', sd_record['sample_token'])
            lidar_token = sample_rec['data']['LIDAR_TOP']
            _, boxes, _ = self.nusc.get_sample_data(lidar_token, box_vis_level=box_vis_level)

            # Get aggregated point cloud in lidar frame.
            # The point cloud is transformed to the lidar frame for visualization purposes.
            chan = sd_record['channel']
            ref_chan = 'LIDAR_TOP'
            pc, times = RadarPointCloud.from_file_multisweep(self.nusc, sample_rec, chan, ref_chan, nsweeps=nsweeps)

            # Transform radar velocities (x is front, y is left), as these are not transformed when loading the point
            # cloud.
            radar_cs_record = self.nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token'])
            lidar_sd_record = self.nusc.get('sample_data', lidar_token)
            lidar_cs_record = self.nusc.get('calibrated_sensor', lidar_sd_record['calibrated_sensor_token'])
            velocities = pc.points[8:10, :]  # Compensated velocity
            velocities = np.vstack((velocities, np.zeros(pc.points.shape[1])))
            velocities = np.dot(Quaternion(radar_cs_record['rotation']).rotation_matrix, velocities)
            velocities = np.dot(Quaternion(lidar_cs_record['rotation']).rotation_matrix.T, velocities)
            velocities[2, :] = np.zeros(pc.points.shape[1])

            # Init axes.
            if ax is None:
                _, ax = plt.subplots(1, 1, figsize=(9, 9))

            # Show point cloud.
            points = view_points(pc.points[:3, :], np.eye(4), normalize=False)
            dists = np.sqrt(np.sum(pc.points[:2, :] ** 2, axis=0))
            colors = np.minimum(1, dists / axes_limit / np.sqrt(2))
            sc = ax.scatter(points[0, :], points[1, :], c=colors, s=3)

            # Show velocities.
            points_vel = view_points(pc.points[:3, :] + velocities, np.eye(4), normalize=False)
            max_delta = 10
            deltas_vel = points_vel - points
            deltas_vel = 3 * deltas_vel  # Arbitrary scaling
            deltas_vel = np.clip(deltas_vel, -max_delta, max_delta)  # Arbitrary clipping
            colors_rgba = sc.to_rgba(colors)
            for i in range(points.shape[1]):
                ax.arrow(points[0, i], points[1, i], deltas_vel[0, i], deltas_vel[1, i], color=colors_rgba[i])

            # Show ego vehicle.
            ax.plot(0, 0, 'x', color='black')

            # Show boxes.
            if with_anns:
                for box in boxes:
                    c = np.array(self.get_color(box.name)) / 255.0
                    box.render(ax, view=np.eye(4), colors=(c, c, c))

            # Limit visible range.
            ax.set_xlim(-axes_limit, axes_limit)
            ax.set_ylim(-axes_limit, axes_limit)

        elif sensor_modality == 'camera':
            # Load boxes and image.
            data_path, boxes, camera_intrinsic = self.nusc.get_sample_data(sample_data_token,
                                                                           box_vis_level=box_vis_level)
            data = Image.open(data_path)

            # Init axes.
            if ax is None:
                _, ax = plt.subplots(1, 1, figsize=(9, 16))

            # Show image.
            ax.imshow(data)

            # Show boxes.
            if with_anns:
                for box in boxes:
                    c = np.array(self.get_color(box.name)) / 255.0
                    box.render(ax, view=camera_intrinsic, normalize=True, colors=(c, c, c))

            # Limit visible range.
            ax.set_xlim(0, data.size[0])
            ax.set_ylim(data.size[1], 0)

        else:
            raise ValueError("Error: Unknown sensor modality!")

        ax.axis('off')
        ax.set_title(sd_record['channel'])
        ax.set_aspect('equal')