예제 #1
0
    def render_annotation(self, anntoken: str, margin: float=10, view: np.ndarray=np.eye(4),
                          box_vis_level: BoxVisibility=3) -> None:
        """
        Render selected annotation.
        :param anntoken: Sample_annotation token.
        :param margin: How many meters in each direction to include in LIDAR view.
        :param view: LIDAR view point.
        :param box_vis_level: If sample_data is an image, this sets required visibility for boxes.
        """

        ann_record = self.nusc.get('sample_annotation', anntoken)
        sample_record = self.nusc.get('sample', ann_record['sample_token'])

        assert 'LIDAR_TOP' in sample_record['data'].keys(), 'No LIDAR_TOP in data, cant render'

        fig, axes = plt.subplots(1, 2, figsize=(18, 9))

        # Figure out which camera the object is fully visible in (this may return nothing)
        boxes, cam = [], []
        cams = [key for key in sample_record['data'].keys() if 'CAM' in key]
        for cam in cams:
            _, boxes, _ = self.nusc.get_sample_data(sample_record['data'][cam], box_vis_level=box_vis_level,
                                                    selected_anntokens=[anntoken])
            if len(boxes) > 0:
                break  # We found an image that matches. Let's abort.
        assert len(boxes) > 0, "Could not find image where annotation if visible. Try using e.g. BoxVisibility.ANY."
        assert len(boxes) < 2, "Found multiple annotations. Something is wrong!"

        cam = sample_record['data'][cam]

        # Plot LIDAR view
        lidar = sample_record['data']['LIDAR_TOP']
        data_path, boxes, camera_intrinsic = self.nusc.get_sample_data(lidar, selected_anntokens=[anntoken])

        PointCloud.from_file(data_path).render_height(axes[0], view=view)
        for box in boxes:
            c = np.array(self.get_color(box.name)) / 255.0
            box.render(axes[0], view=view, colors=[c, c, c])
            corners = view_points(boxes[0].corners(), view, False)[:2, :]
            axes[0].set_xlim([np.min(corners[0, :]) - margin, np.max(corners[0, :]) + margin])
            axes[0].set_ylim([np.min(corners[1, :]) - margin, np.max(corners[1, :]) + margin])
            axes[0].axis('off')
            axes[0].set_aspect('equal')

        # Plot CAMERA view
        data_path, boxes, camera_intrinsic = self.nusc.get_sample_data(cam, selected_anntokens=[anntoken])
        im = Image.open(data_path)
        axes[1].imshow(im)
        axes[1].set_title(self.nusc.get('sample_data', cam)['channel'])
        axes[1].axis('off')
        axes[1].set_aspect('equal')
        for box in boxes:
            c = np.array(self.get_color(box.name)) / 255.0
            box.render(axes[1], view=camera_intrinsic, normalize=True, colors=[c, c, c])
예제 #2
0
    def map_pointcloud_to_image(self, lidar_token: str, camera_token: str):
        """
        Given a lidar and camera sample_data token, load point-cloud and map it to the image plane.
        :param lidar_token: Lidar sample data token.
        :param camera_token: Camera sample data token.
        :return (pointcloud <np.float: 2, n)>, coloring <np.float: n>, image <Image>).
        """

        cam = self.nusc.get('sample_data', camera_token)
        top_lidar = self.nusc.get('sample_data', lidar_token)

        pc = PointCloud.from_file(osp.join(self.nusc.dataroot, top_lidar['filename']))
        im = Image.open(osp.join(self.nusc.dataroot, cam['filename']))

        # LIDAR points live in the lidar frame. So they need to be transformed via global to the image plane.

        # First step: transform the point cloud to ego vehicle frame for the timestamp of the LIDAR sweep.
        cs_record = self.nusc.get('calibrated_sensor', top_lidar['calibrated_sensor_token'])
        pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
        pc.translate(np.array(cs_record['translation']))

        # Second step: transform to the global frame.
        poserecord = self.nusc.get('ego_pose', top_lidar['ego_pose_token'])
        pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix)
        pc.translate(np.array(poserecord['translation']))

        # Third step: transform into the ego vehicle frame for the timestamp of the image.
        poserecord = self.nusc.get('ego_pose', cam['ego_pose_token'])
        pc.translate(-np.array(poserecord['translation']))
        pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix.T)

        # Fourth step: transform into the camera.
        cs_record = self.nusc.get('calibrated_sensor', cam['calibrated_sensor_token'])
        pc.translate(-np.array(cs_record['translation']))
        pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix.T)

        # Fifth step: actually take a "picture" of the point cloud.

        # Grab the depths (camera frame z axis points away from the camera).
        depths = pc.points[2, :]

        # Set the height to be the coloring.
        coloring = pc.points[2, :]

        # Take the actual picture (matrix multiplication with camera-matrix + renormalization).
        points = view_points(pc.points[:3, :], np.array(cs_record['camera_intrinsic']), normalize=True)

        # Remove points that are either outside or behind the camera. Leave a margin of 1 pixel for aesthetic reasons.
        mask = np.ones(depths.shape[0], dtype=bool)
        mask = np.logical_and(mask, depths > 0)
        mask = np.logical_and(mask, points[0, :] > 1)
        mask = np.logical_and(mask, points[0, :] < im.size[0] - 1)
        mask = np.logical_and(mask, points[1, :] > 1)
        mask = np.logical_and(mask, points[1, :] < im.size[1] - 1)
        points = points[:, mask]
        coloring = coloring[mask]

        return points, coloring, im
예제 #3
0
    def render_sample_data(self,
                           sample_data_token: str,
                           with_anns: bool = True,
                           box_vis_level: BoxVisibility = 3,
                           axes_limit: float = 40,
                           ax: Axes = None) -> None:
        """
        Render sample data onto axis.
        :param sample_data_token: Sample_data token.
        :param with_anns: Whether to draw annotations.
        :param box_vis_level: If sample_data is an image, this sets required visibility for boxes.
        :param axes_limit: Axes limit for lidar data (measured in meters).
        :param ax: Axes onto which to render.
        """

        sd_record = self.nusc.get('sample_data', sample_data_token)
        sensor_modality = sd_record['sensor_modality']

        data_path, boxes, camera_intrinsic = self.nusc.get_sample_data(
            sample_data_token, box_vis_level=box_vis_level)

        if sensor_modality == 'lidar':
            data = PointCloud.from_file(data_path)
            if ax is None:
                _, ax = plt.subplots(1, 1, figsize=(9, 9))
            points = view_points(data.points[:3, :],
                                 np.eye(4),
                                 normalize=False)
            ax.scatter(points[0, :], points[1, :], c=points[2, :], s=1)
            if with_anns:
                for box in boxes:
                    c = np.array(self.get_color(box.name)) / 255.0
                    box.render(ax, view=np.eye(4), colors=[c, c, c])
            ax.set_xlim(-axes_limit, axes_limit)
            ax.set_ylim(-axes_limit, axes_limit)

        elif sensor_modality == 'camera':
            data = Image.open(data_path)
            if ax is None:
                _, ax = plt.subplots(1, 1, figsize=(9, 16))
            ax.imshow(data)
            if with_anns:
                for box in boxes:
                    c = np.array(self.get_color(box.name)) / 255.0
                    box.render(ax,
                               view=camera_intrinsic,
                               normalize=True,
                               colors=[c, c, c])
            ax.set_xlim(0, data.size[0])
            ax.set_ylim(data.size[1], 0)

        else:
            raise ValueError("RADAR rendering not implemented yet.")

        ax.axis('off')
        ax.set_title(sd_record['channel'])
        ax.set_aspect('equal')
예제 #4
0
def export_scene_pointcloud(explorer: NuScenesExplorer, out_path: str, scene_token: str, channel: str = 'LIDAR_TOP',
                            min_dist: float = 3.0, max_dist: float = 30.0, verbose: bool = True) -> None:
    """
    Export fused point clouds of a scene to a Wavefront OBJ file.
    This point-cloud can be viewed in your favorite 3D rendering tool, e.g. Meshlab or Maya.
    :param explorer: NuScenesExplorer instance.
    :param out_path: Output path to write the point-cloud to.
    :param scene_token: Unique identifier of scene to render.
    :param channel: Channel to render.
    :param min_dist: Minimum distance to ego vehicle below which points are dropped.
    :param max_dist: Maximum distance to ego vehicle above which points are dropped.
    :param verbose: Whether to print messages to stdout.
    :return: <None>
    """

    # Check inputs.
    valid_channels = ['LIDAR_TOP', 'RADAR_FRONT', 'RADAR_FRONT_RIGHT', 'RADAR_FRONT_LEFT', 'RADAR_BACK_LEFT',
                      'RADAR_BACK_RIGHT']
    camera_channels = ['CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_LEFT', 'CAM_BACK', 'CAM_BACK_RIGHT']
    assert channel in valid_channels, 'Input channel {} not valid.'.format(channel)

    # Get records from DB.
    scene_rec = explorer.nusc.get('scene', scene_token)
    start_sample_rec = explorer.nusc.get('sample', scene_rec['first_sample_token'])
    sd_rec = explorer.nusc.get('sample_data', start_sample_rec['data'][channel])

    # Make list of frames
    cur_sd_rec = sd_rec
    sd_tokens = []
    while cur_sd_rec['next'] != '':
        cur_sd_rec = explorer.nusc.get('sample_data', cur_sd_rec['next'])
        sd_tokens.append(cur_sd_rec['token'])

    # Write point-cloud.
    with open(out_path, 'w') as f:
        num_points_total = 0
        for sd_token in tqdm(sd_tokens):
            if verbose:
                print('Processing {}'.format(sd_rec['filename']))
            sc_rec = explorer.nusc.get('sample_data', sd_token)
            sample_rec = explorer.nusc.get('sample', sc_rec['sample_token'])
            lidar_token = sd_rec['token']
            lidar_rec = explorer.nusc.get('sample_data', lidar_token)
            filename = osp.join(explorer.nusc.dataroot, lidar_rec['filename'])
            pc = PointCloud.from_file(filename)

            # Get point cloud colors.
            coloring = np.ones((3, pc.points.shape[1])) * -1
            for channel in camera_channels:
                camera_token = sample_rec['data'][channel]
                cam_coloring, cam_mask = pointcloud_color_from_image(nusc, lidar_token, camera_token)
                coloring[:, cam_mask] = cam_coloring

            # Points live in their own reference frame. So they need to be transformed via global to the image plane.
            # First step: transform the point cloud to the ego vehicle frame for the timestamp of the sweep.
            cs_record = explorer.nusc.get('calibrated_sensor', lidar_rec['calibrated_sensor_token'])
            pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
            pc.translate(np.array(cs_record['translation']))

            # Optional Filter by distance to remove the ego vehicle.
            dists_origin = np.sqrt(np.sum(pc.points[:3, :] ** 2, axis=0))
            keep = np.logical_and(min_dist <= dists_origin, dists_origin <= max_dist)
            pc.points = pc.points[:, keep]

            min_z = min(pc.points[2])
            max_z = max(pc.points[2])
            if verbose:
                print('Distance filter: Keeping %d of %d points...' % (keep.sum(), len(keep)))

            # Second step: transform to the global frame.
            poserecord = explorer.nusc.get('ego_pose', lidar_rec['ego_pose_token'])
            pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix)
            pc.translate(np.array(poserecord['translation']))

            # AKSHAY
            # points = pc.points
            # points[0, :] = points[0, :] - 1000
            # points[1, :] = points[1, :] - 600
            # r = np.sqrt((points[0, :]) ** 2 + (points[1, :]) ** 2 + (points[2, :]) ** 2)
            # omega = np.arcsin(np.divide((points[2, :]), r))  # elevation angle of each point
            # omega_degree = -np.rad2deg(omega)
            #
            # # plot historgram
            # omega_max = max(omega_degree)
            # omega_min = min(omega_degree)
            #
            # import matplotlib.pyplot as plt
            #
            # LASER_ANGLES = [-30.67, -9.33, -29.33, -8.00, -28.00, -6.67, -26.67, -5.33, -25.33, -4.00, -24.000, -2.67,
            #                 -22.67, -1.33, -21.33, 0.00, -20.00, 1.33, -18.67, 2.67, -17.33, 4.00, -16.00, 5.33, -14.67,
            #                 6.67, -13.33, 8.00, -12.00, 9.33, -10.67, 10.67]
            # [-30.67, -29.33, -28.0, -26.67, -25.33, -24.0, -22.67, -21.33, -20.0, -18.67, -17.33, -16.0,
            #                 -14.67, -13.33, -12.0, -10.67, -9.33, -8.0, -6.67, -5.33, -4.0, -2.67, -1.33, 0.0, 1.33,
            #                 2.67, 4.0, 5.33, 6.67, 8.0, 9.33, 10.67]
            # LASER_ANGLES = [-10.00, 0.67, -8.67, 2.00, -7.33, 3.33, -6.00, 4.67, -4.67, 6.00, -3.33, 7.33, -2.00, 8.67,-0.67,10.00]
            # LASER_ANGLES = sorted(LASER_ANGLES)
            # plt.hist(omega_degree, bins=LASER_ANGLES, range=[-31, 11])  # arguments are passed to np.histogram
            # plt.title("Histogram with 'auto' bins")
            # plt.show()
            # AKSHAY

            # Write points to file
            j = 0
            num_points_sweep = 0
            for v in pc.points.transpose():
                if j % 10 == 0:
                    # TODO: find out how to extract intensity values from binary data
                    # temporary set to random number
                    i = randint(0, 256)
                    # TODO: center data by subtracting mean
                    v[0] = v[0] - 1000
                    v[1] = v[1] - 600
                    f.write("{v[0]:.8f} {v[1]:.8f} {v[2]:.8f} {i}\n".format(v=v, i=i))
                    num_points_sweep = num_points_sweep + 1
                j = j + 1
            num_points_total += num_points_sweep

            if not sd_rec['next'] == "":
                sd_rec = explorer.nusc.get('sample_data', sd_rec['next'])
    # write header
    with open(out_path, 'r+') as f:
        content = f.read()
        f.seek(0, 0)
        f.write("# .PCD v0.7 - Point Cloud Data file format\n"
                + "VERSION 0.7\n"
                + "FIELDS x y z intensity\n"
                + "SIZE 4 4 4 4\n"
                + "TYPE F F F F\n"
                + "COUNT 1 1 1 1\n"
                + "WIDTH " + str(num_points_total) + "\n"
                + "HEIGHT 1\n"
                + "VIEWPOINT 0 0 0 1 0 0 0\n"
                + "POINTS " + str(num_points_total) + "\n"
                + "DATA ascii\n"
                + content)
예제 #5
0
def pointcloud_color_from_image(nusc, pointsensor_token: str, camera_token: str) -> Tuple[np.array, np.array]:
    """
    Given a point sensor (lidar/radar) token and camera sample_data token, load point-cloud and map it to the image
    plane, then retrieve the colors of the closest image pixels.
    :param pointsensor_token: Lidar/radar sample_data token.
    :param camera_token: Camera sample data token.
    :return (coloring <np.float: 3, n>, mask <np.bool: m>). Returns the colors for n points that reproject into the
        image out of m total points. The mask indicates which points are selected.
    """

    cam = nusc.get('sample_data', camera_token)
    pointsensor = nusc.get('sample_data', pointsensor_token)
    print(nusc.dataroot)
    print(pointsensor['filename'])
    pc = PointCloud.from_file(osp.join(nusc.dataroot, pointsensor['filename']))
    im = Image.open(osp.join(nusc.dataroot, cam['filename']))

    # Points live in the point sensor frame. So they need to be transformed via global to the image plane.
    # First step: transform the point-cloud to the ego vehicle frame for the timestamp of the sweep.
    cs_record = nusc.get('calibrated_sensor', pointsensor['calibrated_sensor_token'])
    pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
    pc.translate(np.array(cs_record['translation']))

    # Second step: transform to the global frame.
    poserecord = nusc.get('ego_pose', pointsensor['ego_pose_token'])
    pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix)
    pc.translate(np.array(poserecord['translation']))

    # Third step: transform into the ego vehicle frame for the timestamp of the image.
    poserecord = nusc.get('ego_pose', cam['ego_pose_token'])
    pc.translate(-np.array(poserecord['translation']))
    pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix.T)

    # Fourth step: transform into the camera.
    cs_record = nusc.get('calibrated_sensor', cam['calibrated_sensor_token'])
    pc.translate(-np.array(cs_record['translation']))
    pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix.T)

    # Fifth step: actually take a "picture" of the point cloud.
    # Grab the depths (camera frame z axis points away from the camera).
    depths = pc.points[2, :]

    # Take the actual picture (matrix multiplication with camera-matrix + renormalization).
    points = view_points(pc.points[:3, :], np.array(cs_record['camera_intrinsic']), normalize=True)

    # Remove points that are either outside or behind the camera. Leave a margin of 1 pixel for aesthetic reasons.
    mask = np.ones(depths.shape[0], dtype=bool)
    mask = np.logical_and(mask, depths > 0)
    mask = np.logical_and(mask, points[0, :] > 1)
    mask = np.logical_and(mask, points[0, :] < im.size[0] - 1)
    mask = np.logical_and(mask, points[1, :] > 1)
    mask = np.logical_and(mask, points[1, :] < im.size[1] - 1)
    points = points[:, mask]

    # Pick the colors of the points
    im_data = np.array(im)
    coloring = np.zeros(points.shape)
    for i, p in enumerate(points.transpose()):
        point = p[:2].round().astype(np.int32)
        coloring[:, i] = im_data[point[1], point[0], :]

    return coloring, mask
예제 #6
0
def export_scene_pointcloud(explorer: NuScenesExplorer,
                            out_path: str,
                            scene_token: str,
                            channel: str = 'LIDAR_TOP',
                            min_dist: float = 3.0,
                            max_dist: float = 30.0,
                            verbose: bool = True) -> None:
    """
    Export fused point clouds of a scene to a Wavefront OBJ file.
    This point-cloud can be viewed in your favorite 3D rendering tool, e.g. Meshlab or Maya.
    :param explorer: NuScenesExplorer instance.
    :param out_path: Output path to write the point-cloud to.
    :param scene_token: Unique identifier of scene to render.
    :param channel: Channel to render.
    :param min_dist: Minimum distance to ego vehicle below which points are dropped.
    :param max_dist: Maximum distance to ego vehicle above which points are dropped.
    :param verbose: Whether to print messages to stdout.
    :return: <None>
    """

    # Check inputs.
    valid_channels = [
        'LIDAR_TOP', 'RADAR_FRONT', 'RADAR_FRONT_RIGHT', 'RADAR_FRONT_LEFT',
        'RADAR_BACK_LEFT', 'RADAR_BACK_RIGHT'
    ]
    camera_channels = [
        'CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_LEFT',
        'CAM_BACK', 'CAM_BACK_RIGHT'
    ]
    assert channel in valid_channels, 'Input channel {} not valid.'.format(
        channel)

    # Get records from DB.
    scene_rec = explorer.nusc.get('scene', scene_token)
    start_sample_rec = explorer.nusc.get('sample',
                                         scene_rec['first_sample_token'])
    sd_rec = explorer.nusc.get('sample_data',
                               start_sample_rec['data'][channel])

    # Make list of frames
    cur_sd_rec = sd_rec
    sd_tokens = []
    while cur_sd_rec['next'] != '':
        cur_sd_rec = explorer.nusc.get('sample_data', cur_sd_rec['next'])
        sd_tokens.append(cur_sd_rec['token'])

    # Write point-cloud.
    with open(out_path, 'w') as f:
        for sd_token in tqdm(sd_tokens):
            if verbose:
                print('Processing {}'.format(sd_rec['filename']))
            sc_rec = explorer.nusc.get('sample_data', sd_token)
            sample_rec = explorer.nusc.get('sample', sc_rec['sample_token'])
            lidar_token = sd_rec['token']
            lidar_rec = explorer.nusc.get('sample_data', lidar_token)
            pc = PointCloud.from_file(
                osp.join(explorer.nusc.dataroot, lidar_rec['filename']))

            # Get point cloud colors.
            coloring = np.ones((3, pc.points.shape[1])) * -1
            for channel in camera_channels:
                camera_token = sample_rec['data'][channel]
                cam_coloring, cam_mask = pointcloud_color_from_image(
                    nusc, lidar_token, camera_token)
                coloring[:, cam_mask] = cam_coloring

            # Points live in their own reference frame. So they need to be transformed via global to the image plane.
            # First step: transform the point cloud to the ego vehicle frame for the timestamp of the sweep.
            cs_record = explorer.nusc.get('calibrated_sensor',
                                          lidar_rec['calibrated_sensor_token'])
            pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
            pc.translate(np.array(cs_record['translation']))

            # Optional Filter by distance to remove the ego vehicle.
            dists_origin = np.sqrt(np.sum(pc.points[:3, :]**2, axis=0))
            keep = np.logical_and(min_dist <= dists_origin,
                                  dists_origin <= max_dist)
            pc.points = pc.points[:, keep]
            if verbose:
                print('Distance filter: Keeping %d of %d points...' %
                      (keep.sum(), len(keep)))

            # Second step: transform to the global frame.
            poserecord = explorer.nusc.get('ego_pose',
                                           lidar_rec['ego_pose_token'])
            pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix)
            pc.translate(np.array(poserecord['translation']))

            # Write points to file
            i = 0
            # TODO: write also intensity values
            for v in pc.points.transpose():
                # if i % 10 == 0:
                f.write("{v[0]:.8f} {v[1]:.8f} {v[2]:.8f}\n".format(v=v))
                # i = i + 1

            if not sd_rec['next'] == "":
                sd_rec = explorer.nusc.get('sample_data', sd_rec['next'])
from nuscenes_utils.nuscenes import NuScenes

nusc = NuScenes(version='v0.1',
                dataroot='datasets/nuscenes/nuscenes_teaser_meta_v1',
                verbose=True)
pointsensor_channel = 'LIDAR_TOP'
camera_channel = 'CAM_FRONT'
my_sample = nusc.sample[0]
sample_token = my_sample['token']
sample_record = nusc.get('sample', sample_token)
pointsensor_token = sample_record['data'][pointsensor_channel]
camera_token = sample_record['data'][camera_channel]
cam = nusc.get('sample_data', camera_token)
img = cv2.imread(os.path.join(nusc.dataroot, cam['filename']))
pointsensor = nusc.get('sample_data', pointsensor_token)
point_cloud = PointCloud.from_file(os.path.join(nusc.dataroot, pointsensor['filename']))
# First step: transform the point-cloud to the ego vehicle frame for the timestamp of the sweep.
cs_record = nusc.get('calibrated_sensor', pointsensor['calibrated_sensor_token'])
point_cloud.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
point_cloud.translate(np.array(cs_record['translation']))
# Forth step: transform the point-cloud to camera frame
cs_record = nusc.get('calibrated_sensor', cam['calibrated_sensor_token'])
point_cloud.translate(-np.array(cs_record['translation']))
point_cloud.rotate(Quaternion(cs_record['rotation']).rotation_matrix.T)
depths = point_cloud.points[2, :]
distances = []
for i in range(len(point_cloud.points[0, :])):
    point = point_cloud.points[:, i]
    distance = sqrt((point[0] ** 2) + (point[1] ** 2) + (point[2] ** 2))
    distances.append(distance)
points = view_points(point_cloud.points[:3, :], np.array(cs_record['camera_intrinsic']), normalize=True)
def create_tf_example(nusc, sample_token, **kwargs):
    """Creates a tf.Example proto from sample image+.

    :param sample_token: [str] the sample token pointing to a specific nuscenes sample

    :returns: [tf.train.Example] The created tf.Example containing image plus
    """

    # Get the sensor_data from nuscenes data for a specific sample
    sample = nusc.get('sample', sample_token)

    # Get Radar Sensor
    radar_token = sample['data'][RADAR_CHANNEL]
    radar_sample_data = nusc.get('sample_data', radar_token)
    radar_calibrated_sensor = nusc.get('calibrated_sensor', radar_sample_data['calibrated_sensor_token'])
    radar_rotation = radar_calibrated_sensor['rotation']
    radar_translation = radar_calibrated_sensor['translation']
    # Get radar sensor data
    radar_file_path = os.path.join(nusc.dataroot, radar_sample_data['filename'])
    radar_data = PointCloud.load_pcd_bin(radar_file_path)  # Load radar points
    radar_data = np.array(radar_data, dtype=DATATYPE)  
    radar_data = np.reshape(radar_data, newshape=[radar_data.size,]) # tf_record only takes 1-D array
     

    # Get Camera Data
    camera_token = sample['data'][CAM_CHANNEL]
    camera_sample_data = nusc.get('sample_data', camera_token)
    camera_calibrated_sensor = nusc.get('calibrated_sensor', camera_sample_data['calibrated_sensor_token'])
    camera_intrinsic = np.array(camera_calibrated_sensor['camera_intrinsic'], dtype=DATATYPE)
    camera_intrinsic = np.reshape(camera_intrinsic, newshape=[camera_intrinsic.size,])# tf_record only takes 1-D array

    camera_rotation = camera_calibrated_sensor['rotation']
    camera_translation = camera_calibrated_sensor['translation']
    # Get camera sensor data
    camera_file_path = os.path.join(nusc.dataroot, camera_sample_data['filename'])
    with tf.gfile.GFile(camera_file_path, 'rb') as fid:
        encoded_jpg = fid.read()
    encoded_jpg_io = io.BytesIO(encoded_jpg)
    image_format = 'jpeg'
    # image = PIL.Image.open(encoded_jpg_io)
    key = hashlib.sha256(encoded_jpg).hexdigest()


    # Get the labels
    boxes, class_names, class_indices = get_label_data_2D_bounding_boxes(nusc, sample_token, sensor_channels=SENSOR_CHANNELS)
    
    # Box Dimensions
    xmins = []
    xmaxs = []
    ymins = []
    ymaxs = []

    if len(boxes) > 0:
        boxes = np.array(boxes)
        xmins = boxes[:,0] # List of normalized left x coordinates in bounding box (1 per box)
        xmaxs = boxes[:,2] # List of normalized right x coordinates in bounding box
                    # (1 per box)
        ymins = boxes[:,1] # List of normalized top y coordinates in bounding box (1 per box)
        ymaxs = boxes[:,3] # List of normalized bottom y coordinates in bounding box
                    # (1 per box)

    # Classification data
    category_names = [c.encode('utf8') for c in class_names] # List of string class name of bounding box (1 per box)
    category_ids = class_indices # List of integer class id of bounding box (1 per box)

    feature_dict = {
        'image/calibration/intrinsic':
            dataset_util.float_list_feature(camera_intrinsic),
        'image/calibration/rotation':
            dataset_util.float_list_feature(camera_rotation),
        'image/calibration/translation':
            dataset_util.float_list_feature(camera_translation),
        'image/key/sha256':
            dataset_util.bytes_feature(key.encode('utf8')),
        'image/encoded':
            dataset_util.bytes_feature(encoded_jpg),
        'image/format':
            dataset_util.bytes_feature(image_format.encode('utf8')),
        'image/object/bbox/xmin':
            dataset_util.float_list_feature(xmins),
        'image/object/bbox/xmax':
            dataset_util.float_list_feature(xmaxs),
        'image/object/bbox/ymin':
            dataset_util.float_list_feature(ymins),
        'image/object/bbox/ymax':
            dataset_util.float_list_feature(ymaxs),
        'image/object/class/text':
            dataset_util.bytes_list_feature(category_names),
        'image/object/class/label': 
            dataset_util.int64_list_feature(category_ids),
        'radar/raw':
            dataset_util.float_list_feature(radar_data),
        'radar/calibration/rotation':
            dataset_util.float_list_feature(radar_rotation),
        'radar/calibration/translation':
            dataset_util.float_list_feature(radar_translation),
        'sample_token':
            dataset_util.bytes_feature(str(sample_token).encode('utf8')),

    }
        
    example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
    return key, example
예제 #9
0
from random import randint

from nuscenes_utils.data_classes import PointCloud
import os
from nuscenes_utils.nuscenes import NuScenes

nusc = NuScenes()
input_path = nusc.dataroot + 'samples/LIDAR_TOP/'
output_path = nusc.dataroot + 'samples/LIDAR_TOP_ASCII/'
for idx, filename in enumerate(sorted(os.listdir(input_path))):
    if idx == 0:
        continue
    pc = PointCloud.from_file(input_path + filename)
    num_points = len(pc.points.transpose())
    with open(output_path + str(idx).zfill(6) + '.pcd', 'w') as f:
        # mean_x = sum(pc.points[0])/num_points
        # mean_y = sum(pc.points[1])/num_points
        # mean_z = sum(pc.points[2])/num_points
        # pc.points[0] -= mean_x
        # pc.points[1] -= mean_y
        # pc.points[2] -= mean_z

        # min and max values
        print('xmin: ', str(min(pc.points[0])))
        print('xmax: ', str(max(pc.points[0])))
        print('ymin: ', str(min(pc.points[1])))
        print('ymax: ', str(max(pc.points[1])))
        print('zmin: ', str(min(pc.points[2])))
        print('zmax: ', str(max(pc.points[2])))

        for v in pc.points.transpose():