Esempio n. 1
0
    def render_sample_data(self,
                           sample_data_token: str,
                           with_anns: bool = True,
                           box_vis_level: BoxVisibility = 3,
                           axes_limit: float = 40,
                           ax: Axes = None) -> None:
        """
        Render sample data onto axis.
        :param sample_data_token: Sample_data token.
        :param with_anns: Whether to draw annotations.
        :param box_vis_level: If sample_data is an image, this sets required visibility for boxes.
        :param axes_limit: Axes limit for lidar data (measured in meters).
        :param ax: Axes onto which to render.
        """

        sd_record = self.nusc.get('sample_data', sample_data_token)
        sensor_modality = sd_record['sensor_modality']

        data_path, boxes, camera_intrinsic = self.nusc.get_sample_data(
            sample_data_token, box_vis_level=box_vis_level)

        if sensor_modality == 'lidar':
            data = PointCloud.from_file(data_path)
            if ax is None:
                _, ax = plt.subplots(1, 1, figsize=(9, 9))
            points = view_points(data.points[:3, :],
                                 np.eye(4),
                                 normalize=False)
            ax.scatter(points[0, :], points[1, :], c=points[2, :], s=1)
            if with_anns:
                for box in boxes:
                    c = np.array(self.get_color(box.name)) / 255.0
                    box.render(ax, view=np.eye(4), colors=[c, c, c])
            ax.set_xlim(-axes_limit, axes_limit)
            ax.set_ylim(-axes_limit, axes_limit)

        elif sensor_modality == 'camera':
            data = Image.open(data_path)
            if ax is None:
                _, ax = plt.subplots(1, 1, figsize=(9, 16))
            ax.imshow(data)
            if with_anns:
                for box in boxes:
                    c = np.array(self.get_color(box.name)) / 255.0
                    box.render(ax,
                               view=camera_intrinsic,
                               normalize=True,
                               colors=[c, c, c])
            ax.set_xlim(0, data.size[0])
            ax.set_ylim(data.size[1], 0)

        else:
            raise ValueError("RADAR rendering not implemented yet.")

        ax.axis('off')
        ax.set_title(sd_record['channel'])
        ax.set_aspect('equal')
Esempio n. 2
0
def getModel(PCs, boxes, offset=0, scale=1.0, normalize=False):

    if len(PCs) == 0:
        return PointCloud(np.ones((3, 0)))
    points = np.ones((PCs[0].points.shape[0], 0))

    for PC, box in zip(PCs, boxes):
        cropped_PC = cropAndCenterPC(PC,
                                     box,
                                     offset=offset,
                                     scale=scale,
                                     normalize=normalize)
        # try:
        if cropped_PC.points.shape[1] > 0:
            points = np.concatenate([points, cropped_PC.points], axis=1)

    PC = PointCloud(points)

    return PC
Esempio n. 3
0
def getlabelPC(PC, box, offset=0, scale=1.0):
    box_tmp = copy.deepcopy(box)
    new_PC = PointCloud(PC.points.copy())
    rot_mat = np.transpose(box_tmp.rotation_matrix)
    trans = -box_tmp.center

    # align data
    new_PC.translate(trans)
    box_tmp.translate(trans)
    new_PC.rotate((rot_mat))
    box_tmp.rotate(Quaternion(matrix=(rot_mat)))

    box_tmp.wlh = box_tmp.wlh * scale
    maxi = np.max(box_tmp.corners(), 1) + offset
    mini = np.min(box_tmp.corners(), 1) - offset

    x_filt_max = new_PC.points[0, :] < maxi[0]
    x_filt_min = new_PC.points[0, :] > mini[0]
    y_filt_max = new_PC.points[1, :] < maxi[1]
    y_filt_min = new_PC.points[1, :] > mini[1]
    z_filt_max = new_PC.points[2, :] < maxi[2]
    z_filt_min = new_PC.points[2, :] > mini[2]

    close = np.logical_and(x_filt_min, x_filt_max)
    close = np.logical_and(close, y_filt_min)
    close = np.logical_and(close, y_filt_max)
    close = np.logical_and(close, z_filt_min)
    close = np.logical_and(close, z_filt_max)

    new_label = np.zeros(new_PC.points.shape[1])
    new_label[close] = 1
    return new_label
Esempio n. 4
0
def cropPC(PC, box, offset=0, scale=1.0):
    box_tmp = copy.deepcopy(box)
    box_tmp.wlh = box_tmp.wlh * scale
    maxi = np.max(box_tmp.corners(), 1) + offset
    mini = np.min(box_tmp.corners(), 1) - offset

    x_filt_max = PC.points[0, :] < maxi[0]
    x_filt_min = PC.points[0, :] > mini[0]
    y_filt_max = PC.points[1, :] < maxi[1]
    y_filt_min = PC.points[1, :] > mini[1]
    z_filt_max = PC.points[2, :] < maxi[2]
    z_filt_min = PC.points[2, :] > mini[2]

    close = np.logical_and(x_filt_min, x_filt_max)
    close = np.logical_and(close, y_filt_min)
    close = np.logical_and(close, y_filt_max)
    close = np.logical_and(close, z_filt_min)
    close = np.logical_and(close, z_filt_max)

    new_PC = PointCloud(PC.points[:, close])
    return new_PC
    def getPCandBBfromPandas(self, box, calib):
        center = [box["x"], box["y"] - box["height"] / 2, box["z"]]
        size = [box["width"], box["length"], box["height"]]
        orientation = Quaternion(axis=[0, 1, 0],
                                 radians=box["rotation_y"]) * Quaternion(
                                     axis=[1, 0, 0], radians=np.pi / 2)
        BB = Box(center, size, orientation)

        try:
            # VELODYNE PointCloud
            velodyne_path = os.path.join(self.KITTI_velo, box["scene"],
                                         f'{box["frame"]:06}.bin')
            PC = PointCloud(
                np.fromfile(velodyne_path, dtype=np.float32).reshape(-1, 4).T)
            PC.transform(calib)
        except FileNotFoundError:
            # in case the Point cloud is missing
            # (0001/[000177-000180].bin)
            PC = PointCloud(np.array([[0, 0, 0]]).T)

        return PC, BB
Esempio n. 6
0
    def render_annotation(self,
                          anntoken: str,
                          margin: float = 10,
                          view: np.ndarray = np.eye(4),
                          box_vis_level: BoxVisibility = 3) -> None:
        """
        Render selected annotation.
        :param anntoken: Sample_annotation token.
        :param margin: How many meters in each direction to include in LIDAR view.
        :param view: LIDAR view point.
        :param box_vis_level: If sample_data is an image, this sets required visibility for boxes.
        """

        ann_record = self.nusc.get('sample_annotation', anntoken)
        sample_record = self.nusc.get('sample', ann_record['sample_token'])

        assert 'LIDAR_TOP' in sample_record['data'].keys(
        ), 'No LIDAR_TOP in data, cant render'

        fig, axes = plt.subplots(1, 2, figsize=(18, 9))

        # Figure out which camera the object is fully visible in (this may return nothing)
        boxes, cam = [], []
        cams = [key for key in sample_record['data'].keys() if 'CAM' in key]
        for cam in cams:
            _, boxes, _ = self.nusc.get_sample_data(
                sample_record['data'][cam],
                box_vis_level=box_vis_level,
                selected_anntokens=[anntoken])
            if len(boxes) > 0:
                break  # We found an image that matches. Let's abort.
        assert len(
            boxes
        ) > 0, "Could not find image where annotation if visible. Try using e.g. BoxVisibility.ANY."
        assert len(
            boxes) < 2, "Found multiple annotations. Something is wrong!"

        cam = sample_record['data'][cam]

        # Plot LIDAR view
        lidar = sample_record['data']['LIDAR_TOP']
        data_path, boxes, camera_intrinsic = self.nusc.get_sample_data(
            lidar, selected_anntokens=[anntoken])

        PointCloud.from_file(data_path).render_height(axes[0], view=view)
        for box in boxes:
            c = np.array(self.get_color(box.name)) / 255.0
            box.render(axes[0], view=view, colors=[c, c, c])
            corners = view_points(boxes[0].corners(), view, False)[:2, :]
            axes[0].set_xlim([
                np.min(corners[0, :]) - margin,
                np.max(corners[0, :]) + margin
            ])
            axes[0].set_ylim([
                np.min(corners[1, :]) - margin,
                np.max(corners[1, :]) + margin
            ])
            axes[0].axis('off')
            axes[0].set_aspect('equal')

        # Plot CAMERA view
        data_path, boxes, camera_intrinsic = self.nusc.get_sample_data(
            cam, selected_anntokens=[anntoken])
        im = Image.open(data_path)
        axes[1].imshow(im)
        axes[1].set_title(self.nusc.get('sample_data', cam)['channel'])
        axes[1].axis('off')
        axes[1].set_aspect('equal')
        for box in boxes:
            c = np.array(self.get_color(box.name)) / 255.0
            box.render(axes[1],
                       view=camera_intrinsic,
                       normalize=True,
                       colors=[c, c, c])
Esempio n. 7
0
    def map_pointcloud_to_image(self, lidar_token: str, camera_token: str):
        """
        Given a lidar and camera sample_data token, load point-cloud and map it to the image plane.
        :param lidar_token: Lidar sample data token.
        :param camera_token: Camera sample data token.
        :return (pointcloud <np.float: 2, n)>, coloring <np.float: n>, image <Image>).
        """

        cam = self.nusc.get('sample_data', camera_token)
        top_lidar = self.nusc.get('sample_data', lidar_token)

        pc = PointCloud.from_file(
            osp.join(self.nusc.dataroot, top_lidar['filename']))
        im = Image.open(osp.join(self.nusc.dataroot, cam['filename']))

        # LIDAR points live in the lidar frame. So they need to be transformed via global to the image plane.

        # First step: transform the point cloud to ego vehicle frame for the timestamp of the LIDAR sweep.
        cs_record = self.nusc.get('calibrated_sensor',
                                  top_lidar['calibrated_sensor_token'])
        pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
        pc.translate(np.array(cs_record['translation']))

        # Second step: transform to the global frame.
        poserecord = self.nusc.get('ego_pose', top_lidar['ego_pose_token'])
        pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix)
        pc.translate(np.array(poserecord['translation']))

        # Third step: transform into the ego vehicle frame for the timestamp of the image.
        poserecord = self.nusc.get('ego_pose', cam['ego_pose_token'])
        pc.translate(-np.array(poserecord['translation']))
        pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix.T)

        # Fourth step: transform into the camera.
        cs_record = self.nusc.get('calibrated_sensor',
                                  cam['calibrated_sensor_token'])
        pc.translate(-np.array(cs_record['translation']))
        pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix.T)

        # Fifth step: actually take a "picture" of the point cloud.

        # Grab the depths (camera frame z axis points away from the camera).
        depths = pc.points[2, :]

        # Set the height to be the coloring.
        coloring = pc.points[2, :]

        # Take the actual picture (matrix multiplication with camera-matrix + renormalization).
        points = view_points(pc.points[:3, :],
                             np.array(cs_record['camera_intrinsic']),
                             normalize=True)

        # Remove points that are either outside or behind the camera. Leave a margin of 1 pixel for aesthetic reasons.
        mask = np.ones(depths.shape[0], dtype=bool)
        mask = np.logical_and(mask, depths > 0)
        mask = np.logical_and(mask, points[0, :] > 1)
        mask = np.logical_and(mask, points[0, :] < im.size[0] - 1)
        mask = np.logical_and(mask, points[1, :] > 1)
        mask = np.logical_and(mask, points[1, :] < im.size[1] - 1)
        points = points[:, mask]
        coloring = coloring[mask]

        return points, coloring, im