Exemplo n.º 1
0
    parser.add_argument('--nuscene_root', dest='dataroot',
                        help='NuScene dataroot',
                        default='../data/nuscenes')

    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = parse_args()
    nusc = NuScenes(version='v0.1', dataroot=args.dataroot, verbose=True)
    fig = plt.figure(figsize=(16, 6))

    for i in tqdm(range(88, len(nusc.scene))):
        scene = nusc.scene[i]
        scene_rec = nusc.get('scene', scene['token'])
        sample_rec = nusc.get('sample', scene_rec['first_sample_token'])

        f_rad_rec = nusc.get('sample_data', sample_rec['data']['RADAR_FRONT'])
        f_cam_rec = nusc.get('sample_data', sample_rec['data']['CAM_FRONT'])

        has_more_frames = True
        while has_more_frames:
            bboxes = []
            ax1 = fig.add_subplot(1,1,1)
            # ax2 = fig.add_subplot(1,2,2)
            camera_token = f_cam_rec['token']
            radar_token = f_rad_rec['token']

            ## FRONT CAM + RADAR
            impath, boxes, camera_intrinsic = nusc.get_sample_data(camera_token)
Exemplo n.º 2
0
"""
Exports a video of each scene (with annotations) to disk.
"""
import os

from nuscenes_utils.nuscenes import NuScenes

# Load NuScenes class
nusc = NuScenes()
scene_tokens = [s['token'] for s in nusc.scene]

# Create output directory
out_dir = os.path.expanduser('~/nuscenes-visualization/scene-videos')
if not os.path.isdir(out_dir):
    os.makedirs(out_dir)

# Write videos to disk
for scene_token in scene_tokens:
    scene = nusc.get('scene', scene_token)
    print('Writing scene %s' % scene['name'])
    out_path = os.path.join(out_dir, scene['name']) + '.avi'
    if not os.path.exists(out_path):
        nusc.render_scene(scene['token'], out_path=out_path)
Exemplo n.º 3
0
from nuscenes_utils.geometry_utils import BoxVisibility

#-------------------------------------------------------------------------------
if __name__ == '__main__':

    nusc = NuScenes(version='v0.1', dataroot='../data/nuscenes', verbose=True)
    # fig, axes = plt.subplots(1, 1, figsize=(16, 9))
    fig = plt.figure(figsize=(16, 6))

    skip_scenes = 30
    for scene in nusc.scene:
        skip_scenes -= 1
        if skip_scenes > 0:
            continue

        scene_rec = nusc.get('scene', scene['token'])
        sample_rec = nusc.get('sample', scene_rec['first_sample_token'])

        f_rad_rec = nusc.get('sample_data', sample_rec['data']['RADAR_FRONT'])
        br_rad_rec = nusc.get('sample_data',
                              sample_rec['data']['RADAR_BACK_RIGHT'])
        bl_rad_rec = nusc.get('sample_data',
                              sample_rec['data']['RADAR_BACK_LEFT'])
        fr_rad_rec = nusc.get('sample_data',
                              sample_rec['data']['RADAR_FRONT_RIGHT'])
        fl_rad_rec = nusc.get('sample_data',
                              sample_rec['data']['RADAR_FRONT_LEFT'])

        f_cam_rec = nusc.get('sample_data', sample_rec['data']['CAM_FRONT'])
        b_cam_rec = nusc.get('sample_data', sample_rec['data']['CAM_BACK'])
        fr_cam_rec = nusc.get('sample_data',
import cv2
import numpy as np
from pyquaternion import Quaternion
import os
from nuscenes_utils.data_classes import PointCloud
from nuscenes_utils.geometry_utils import view_points
from nuscenes_utils.nuscenes import NuScenes

nusc = NuScenes(version='v0.1',
                dataroot='datasets/nuscenes/nuscenes_teaser_meta_v1',
                verbose=True)
pointsensor_channel = 'LIDAR_TOP'
camera_channel = 'CAM_FRONT'
my_sample = nusc.sample[0]
sample_token = my_sample['token']
sample_record = nusc.get('sample', sample_token)
pointsensor_token = sample_record['data'][pointsensor_channel]
camera_token = sample_record['data'][camera_channel]
cam = nusc.get('sample_data', camera_token)
img = cv2.imread(os.path.join(nusc.dataroot, cam['filename']))
pointsensor = nusc.get('sample_data', pointsensor_token)
point_cloud = PointCloud.from_file(os.path.join(nusc.dataroot, pointsensor['filename']))
# First step: transform the point-cloud to the ego vehicle frame for the timestamp of the sweep.
cs_record = nusc.get('calibrated_sensor', pointsensor['calibrated_sensor_token'])
point_cloud.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
point_cloud.translate(np.array(cs_record['translation']))
# Forth step: transform the point-cloud to camera frame
cs_record = nusc.get('calibrated_sensor', cam['calibrated_sensor_token'])
point_cloud.translate(-np.array(cs_record['translation']))
point_cloud.rotate(Quaternion(cs_record['rotation']).rotation_matrix.T)
depths = point_cloud.points[2, :]
Exemplo n.º 5
0
from nuscenes_utils.data_classes import PointCloud
from nuscenes_utils.geometry_utils import BoxVisibility, view_points
from nuscenes_utils.nuscenes import NuScenes
import numpy as np

nusc = NuScenes(version='v0.1',
                dataroot='nuscenes/nuscenes_teaser_meta_v1/',
                verbose=True)

# output_path = 'input/NuScenes/Annotations/'
output_path = 'tmp/'

# iterate over all 3977 samples
for idx, sample in enumerate(nusc.sample):
    sample_token = sample['token']
    sample_record = nusc.get('sample', sample_token)
    selected_data = {}
    for channel, sample_data_token in sample_record['data'].items():
        sample_data_record = nusc.get('sample_data', sample_data_token)
        sensor_modality = sample_data_record['sensor_modality']
        if sensor_modality in ['lidar']:
            # create output directory
            os.makedirs(output_path + channel, exist_ok=True)
            selected_data[channel] = sample_data_token
            # boxes returned are transformed from global to camera/LIDAR space within get_sample_data
            data_path, boxes, camera_intrinsic = nusc.get_sample_data(
                sample_data_token, box_vis_level=BoxVisibility.IN_FRONT)
            with open(output_path + channel + '/' + str(idx).zfill(6) + '.txt',
                      'w') as file_writer:
                for box in boxes:
                    category = box.name
Exemplo n.º 6
0
#function in helpers.py to load all the data
map_data_all = load_traj_data(nusc, only_vehicles=False)

instance_id = 0
for scene_ix in range(len(map_data_all)):
    textFile = open('traj_format/' + str(scene_ix) + '.txt', 'w')
    #First iterate over all frames (t is the FRAME_ID)
    for t in range(map_data_all[scene_ix]['pos'].shape[1]):
        #For each sample,  iterate over all its annotations
        for i in range(map_data_all[scene_ix]['pos'].shape[0]):
            #Because of how we built the map_data_all matrix, the first annotation always
            #corresponds to the ego vehicle

            if (i != 0):
                ins_token = map_data_all[scene_ix]['instance_tokens'][i]
                instance = nusc.get('instance', ins_token)
                ann = nusc.get('sample_annotation',
                               instance['first_annotation_token'])
                atribute = nusc.get('attribute', ann['attribute_tokens'][0])
                name = ann['category_name']

            else:
                name = 'vehicle.ego'

            x = np.round(map_data_all[scene_ix]['pos'][i, t, 0])
            y = np.round(map_data_all[scene_ix]['pos'][i, t, 1])

            #t is the frame id
            if (x > 0 and y > 0):
                sentence = str(t) + ' ' + str(i) + ' ' + str(x) + ' ' + str(
                    y) + ' ' + name + '\n'
Exemplo n.º 7
0
def export_scene_pointcloud(nusc: NuScenes, out_path: str, scene_token: str, channel: str='LIDAR_TOP',
                            min_dist: float=3.0, max_dist: float=30.0, verbose: bool=True) -> None:
    """
    :param nusc: NuScenes instance.
    :param out_path: Output path to write the point-cloud to.
    :param scene_token: Unique identifier of scene to render.
    :param channel: Channel to render.
    :param min_dist: Minimum distance to ego vehicle below which points are dropped.
    :param max_dist: Maximum distance to ego vehicle above which points are dropped.
    :param verbose: Whether to print messages to stdout.
    """

    # Check inputs.
    valid_channels = ['LIDAR_TOP', 'RADAR_FRONT', 'RADAR_FRONT_RIGHT', 'RADAR_FRONT_LEFT', 'RADAR_BACK_LEFT',
                      'RADAR_BACK_RIGHT']
    camera_channels = ['CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_LEFT', 'CAM_BACK', 'CAM_BACK_RIGHT']
    assert channel in valid_channels, 'Input channel {} not valid.'.format(channel)

    # Get records from DB.
    scene_rec = nusc.get('scene', scene_token)
    start_sample_rec = nusc.get('sample', scene_rec['first_sample_token'])
    sd_rec = nusc.get('sample_data', start_sample_rec['data'][channel])

    # Make list of frames
    cur_sd_rec = sd_rec
    sd_tokens = []
    while cur_sd_rec['next'] != '':
        cur_sd_rec = nusc.get('sample_data', cur_sd_rec['next'])
        sd_tokens.append(cur_sd_rec['token'])

    # Write point-cloud.
    with open(out_path, 'w') as f:
        f.write("OBJ File:\n")

        for sd_token in tqdm(sd_tokens):
            if verbose:
                print('Processing {}'.format(sd_rec['filename']))
            sc_rec = nusc.get('sample_data', sd_token)
            sample_rec = nusc.get('sample', sc_rec['sample_token'])
            lidar_token = sd_rec['token']
            lidar_rec = nusc.get('sample_data', lidar_token)
            pc = PointCloud.from_file(osp.join(nusc.dataroot, lidar_rec['filename']))

            # Get point cloud colors.
            coloring = np.ones((3, pc.points.shape[1])) * -1
            for channel in camera_channels:
                camera_token = sample_rec['data'][channel]
                cam_coloring, cam_mask = pointcloud_color_from_image(nusc, lidar_token, camera_token)
                coloring[:, cam_mask] = cam_coloring

            # Points live in their own reference frame. So they need to be transformed via global to the image plane.
            # First step: transform the point cloud to the ego vehicle frame for the timestamp of the sweep.
            cs_record = nusc.get('calibrated_sensor', lidar_rec['calibrated_sensor_token'])
            pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
            pc.translate(np.array(cs_record['translation']))

            # Optional Filter by distance to remove the ego vehicle.
            dists_origin = np.sqrt(np.sum(pc.points[:3, :] ** 2, axis=0))
            keep = np.logical_and(min_dist <= dists_origin, dists_origin <= max_dist)
            pc.points = pc.points[:, keep]
            coloring = coloring[:, keep]
            if verbose:
                print('Distance filter: Keeping %d of %d points...' % (keep.sum(), len(keep)))

            # Second step: transform to the global frame.
            poserecord = nusc.get('ego_pose', lidar_rec['ego_pose_token'])
            pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix)
            pc.translate(np.array(poserecord['translation']))

            # Write points to file
            for (v, c) in zip(pc.points.transpose(), coloring.transpose()):
                if (c == -1).any():
                    # Ignore points without a color.
                    pass
                else:
                    f.write("v {v[0]:.8f} {v[1]:.8f} {v[2]:.8f} {c[0]:.4f} {c[1]:.4f} {c[2]:.4f}\n".format(v=v, c=c/255.0))

            if not sd_rec['next'] == "":
                sd_rec = nusc.get('sample_data', sd_rec['next'])
Exemplo n.º 8
0
def pointcloud_color_from_image(nusc: NuScenes, pointsensor_token: str, camera_token: str) -> Tuple[np.array, np.array]:
    """
    Given a point sensor (lidar/radar) token and camera sample_data token, load point-cloud and map it to the image
    plane, then retrieve the colors of the closest image pixels.
    :param nusc: NuScenes instance.
    :param pointsensor_token: Lidar/radar sample_data token.
    :param camera_token: Camera sample data token.
    :return (coloring <np.float: 3, n>, mask <np.bool: m>). Returns the colors for n points that reproject into the
        image out of m total points. The mask indicates which points are selected.
    """

    cam = nusc.get('sample_data', camera_token)
    pointsensor = nusc.get('sample_data', pointsensor_token)

    pc = PointCloud.from_file(osp.join(nusc.dataroot, pointsensor['filename']))
    im = Image.open(osp.join(nusc.dataroot, cam['filename']))

    # Points live in the point sensor frame. So they need to be transformed via global to the image plane.
    # First step: transform the point-cloud to the ego vehicle frame for the timestamp of the sweep.
    cs_record = nusc.get('calibrated_sensor', pointsensor['calibrated_sensor_token'])
    pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
    pc.translate(np.array(cs_record['translation']))

    # Second step: transform to the global frame.
    poserecord = nusc.get('ego_pose', pointsensor['ego_pose_token'])
    pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix)
    pc.translate(np.array(poserecord['translation']))

    # Third step: transform into the ego vehicle frame for the timestamp of the image.
    poserecord = nusc.get('ego_pose', cam['ego_pose_token'])
    pc.translate(-np.array(poserecord['translation']))
    pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix.T)

    # Fourth step: transform into the camera.
    cs_record = nusc.get('calibrated_sensor', cam['calibrated_sensor_token'])
    pc.translate(-np.array(cs_record['translation']))
    pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix.T)

    # Fifth step: actually take a "picture" of the point cloud.
    # Grab the depths (camera frame z axis points away from the camera).
    depths = pc.points[2, :]

    # Take the actual picture (matrix multiplication with camera-matrix + renormalization).
    points = view_points(pc.points[:3, :], np.array(cs_record['camera_intrinsic']), normalize=True)

    # Remove points that are either outside or behind the camera. Leave a margin of 1 pixel for aesthetic reasons.
    mask = np.ones(depths.shape[0], dtype=bool)
    mask = np.logical_and(mask, depths > 0)
    mask = np.logical_and(mask, points[0, :] > 1)
    mask = np.logical_and(mask, points[0, :] < im.size[0] - 1)
    mask = np.logical_and(mask, points[1, :] > 1)
    mask = np.logical_and(mask, points[1, :] < im.size[1] - 1)
    points = points[:, mask]

    # Pick the colors of the points
    im_data = np.array(im)
    coloring = np.zeros(points.shape)
    for i, p in enumerate(points.transpose()):
        point = p[:2].round().astype(np.int32)
        coloring[:, i] = im_data[point[1], point[0], :]

    return coloring, mask
Exemplo n.º 9
0
from nuscenes_utils.nuscenes import NuScenes

nusc = NuScenes()
for (sensor, sd_token) in nusc.sample[0]['data'].items():
    cs_token = nusc.get('sample_data', sd_token)['calibrated_sensor_token']
    translation = nusc.get('calibrated_sensor', cs_token)['translation']
    print("%s %s" % (sensor, translation))