示例#1
0
    def render_sample_data(self,
                           sample_data_token: str,
                           with_anns: bool = True,
                           box_vis_level: BoxVisibility = 3,
                           axes_limit: float = 40,
                           ax: Axes = None) -> None:
        """
        Render sample data onto axis.
        :param sample_data_token: Sample_data token.
        :param with_anns: Whether to draw annotations.
        :param box_vis_level: If sample_data is an image, this sets required visibility for boxes.
        :param axes_limit: Axes limit for lidar data (measured in meters).
        :param ax: Axes onto which to render.
        """

        sd_record = self.nusc.get('sample_data', sample_data_token)
        sensor_modality = sd_record['sensor_modality']

        data_path, boxes, camera_intrinsic = self.nusc.get_sample_data(
            sample_data_token, box_vis_level=box_vis_level)

        if sensor_modality == 'lidar':
            data = PointCloud.from_file(data_path)
            if ax is None:
                _, ax = plt.subplots(1, 1, figsize=(9, 9))
            points = view_points(data.points[:3, :],
                                 np.eye(4),
                                 normalize=False)
            ax.scatter(points[0, :], points[1, :], c=points[2, :], s=1)
            if with_anns:
                for box in boxes:
                    c = np.array(self.get_color(box.name)) / 255.0
                    box.render(ax, view=np.eye(4), colors=[c, c, c])
            ax.set_xlim(-axes_limit, axes_limit)
            ax.set_ylim(-axes_limit, axes_limit)

        elif sensor_modality == 'camera':
            data = Image.open(data_path)
            if ax is None:
                _, ax = plt.subplots(1, 1, figsize=(9, 16))
            ax.imshow(data)
            if with_anns:
                for box in boxes:
                    c = np.array(self.get_color(box.name)) / 255.0
                    box.render(ax,
                               view=camera_intrinsic,
                               normalize=True,
                               colors=[c, c, c])
            ax.set_xlim(0, data.size[0])
            ax.set_ylim(data.size[1], 0)

        else:
            raise ValueError("RADAR rendering not implemented yet.")

        ax.axis('off')
        ax.set_title(sd_record['channel'])
        ax.set_aspect('equal')
示例#2
0
    def render_annotation(self,
                          anntoken: str,
                          margin: float = 10,
                          view: np.ndarray = np.eye(4),
                          box_vis_level: BoxVisibility = 3) -> None:
        """
        Render selected annotation.
        :param anntoken: Sample_annotation token.
        :param margin: How many meters in each direction to include in LIDAR view.
        :param view: LIDAR view point.
        :param box_vis_level: If sample_data is an image, this sets required visibility for boxes.
        """

        ann_record = self.nusc.get('sample_annotation', anntoken)
        sample_record = self.nusc.get('sample', ann_record['sample_token'])

        assert 'LIDAR_TOP' in sample_record['data'].keys(
        ), 'No LIDAR_TOP in data, cant render'

        fig, axes = plt.subplots(1, 2, figsize=(18, 9))

        # Figure out which camera the object is fully visible in (this may return nothing)
        boxes, cam = [], []
        cams = [key for key in sample_record['data'].keys() if 'CAM' in key]
        for cam in cams:
            _, boxes, _ = self.nusc.get_sample_data(
                sample_record['data'][cam],
                box_vis_level=box_vis_level,
                selected_anntokens=[anntoken])
            if len(boxes) > 0:
                break  # We found an image that matches. Let's abort.
        assert len(
            boxes
        ) > 0, "Could not find image where annotation if visible. Try using e.g. BoxVisibility.ANY."
        assert len(
            boxes) < 2, "Found multiple annotations. Something is wrong!"

        cam = sample_record['data'][cam]

        # Plot LIDAR view
        lidar = sample_record['data']['LIDAR_TOP']
        data_path, boxes, camera_intrinsic = self.nusc.get_sample_data(
            lidar, selected_anntokens=[anntoken])

        PointCloud.from_file(data_path).render_height(axes[0], view=view)
        for box in boxes:
            c = np.array(self.get_color(box.name)) / 255.0
            box.render(axes[0], view=view, colors=[c, c, c])
            corners = view_points(boxes[0].corners(), view, False)[:2, :]
            axes[0].set_xlim([
                np.min(corners[0, :]) - margin,
                np.max(corners[0, :]) + margin
            ])
            axes[0].set_ylim([
                np.min(corners[1, :]) - margin,
                np.max(corners[1, :]) + margin
            ])
            axes[0].axis('off')
            axes[0].set_aspect('equal')

        # Plot CAMERA view
        data_path, boxes, camera_intrinsic = self.nusc.get_sample_data(
            cam, selected_anntokens=[anntoken])
        im = Image.open(data_path)
        axes[1].imshow(im)
        axes[1].set_title(self.nusc.get('sample_data', cam)['channel'])
        axes[1].axis('off')
        axes[1].set_aspect('equal')
        for box in boxes:
            c = np.array(self.get_color(box.name)) / 255.0
            box.render(axes[1],
                       view=camera_intrinsic,
                       normalize=True,
                       colors=[c, c, c])
示例#3
0
    def map_pointcloud_to_image(self, lidar_token: str, camera_token: str):
        """
        Given a lidar and camera sample_data token, load point-cloud and map it to the image plane.
        :param lidar_token: Lidar sample data token.
        :param camera_token: Camera sample data token.
        :return (pointcloud <np.float: 2, n)>, coloring <np.float: n>, image <Image>).
        """

        cam = self.nusc.get('sample_data', camera_token)
        top_lidar = self.nusc.get('sample_data', lidar_token)

        pc = PointCloud.from_file(
            osp.join(self.nusc.dataroot, top_lidar['filename']))
        im = Image.open(osp.join(self.nusc.dataroot, cam['filename']))

        # LIDAR points live in the lidar frame. So they need to be transformed via global to the image plane.

        # First step: transform the point cloud to ego vehicle frame for the timestamp of the LIDAR sweep.
        cs_record = self.nusc.get('calibrated_sensor',
                                  top_lidar['calibrated_sensor_token'])
        pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
        pc.translate(np.array(cs_record['translation']))

        # Second step: transform to the global frame.
        poserecord = self.nusc.get('ego_pose', top_lidar['ego_pose_token'])
        pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix)
        pc.translate(np.array(poserecord['translation']))

        # Third step: transform into the ego vehicle frame for the timestamp of the image.
        poserecord = self.nusc.get('ego_pose', cam['ego_pose_token'])
        pc.translate(-np.array(poserecord['translation']))
        pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix.T)

        # Fourth step: transform into the camera.
        cs_record = self.nusc.get('calibrated_sensor',
                                  cam['calibrated_sensor_token'])
        pc.translate(-np.array(cs_record['translation']))
        pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix.T)

        # Fifth step: actually take a "picture" of the point cloud.

        # Grab the depths (camera frame z axis points away from the camera).
        depths = pc.points[2, :]

        # Set the height to be the coloring.
        coloring = pc.points[2, :]

        # Take the actual picture (matrix multiplication with camera-matrix + renormalization).
        points = view_points(pc.points[:3, :],
                             np.array(cs_record['camera_intrinsic']),
                             normalize=True)

        # Remove points that are either outside or behind the camera. Leave a margin of 1 pixel for aesthetic reasons.
        mask = np.ones(depths.shape[0], dtype=bool)
        mask = np.logical_and(mask, depths > 0)
        mask = np.logical_and(mask, points[0, :] > 1)
        mask = np.logical_and(mask, points[0, :] < im.size[0] - 1)
        mask = np.logical_and(mask, points[1, :] > 1)
        mask = np.logical_and(mask, points[1, :] < im.size[1] - 1)
        points = points[:, mask]
        coloring = coloring[mask]

        return points, coloring, im