def main(_):
    assert FLAGS.output_dir, '`output_dir` missing.'

    if not tf.gfile.IsDirectory(FLAGS.output_dir):
        tf.gfile.MakeDirs(FLAGS.output_dir)
    train_output_path = os.path.join(FLAGS.output_dir, 'nuscenes_train.record')
    val_output_path = os.path.join(FLAGS.output_dir, 'nuscenes_val.record')
    testdev_output_path = os.path.join(FLAGS.output_dir, 'nuscenes_testdev.record')

    # Creating the data generator
    nusc = NuScenes(version='v0.1', dataroot=NUSCENES_PATH, verbose=True)
    
    # Get all sample tokens
    val_scenes = 10
    train_sample_tokens = list(get_sample_tokens(nusc, range(len(nusc.scene)-val_scenes)))
    val_sample_tokens = list(get_sample_tokens(nusc, range(len(nusc.scene)-val_scenes, len(nusc.scene))))

    _create_tf_record_from_nuscenes_database(nusc, train_sample_tokens,
        train_output_path,
        num_shards=100)
    _create_tf_record_from_nuscenes_database(nusc, val_sample_tokens,
        val_output_path,
        num_shards=10)
    # _create_tf_record_from_nuscenes_database(
    #     FLAGS.testdev_annotations_file,
    #     FLAGS.test_image_dir,
    #     testdev_output_path,
    #     FLAGS.include_masks,
    #     num_shards=100)
    return 0
Beispiel #2
0
NUSCENES_SDK_PATH = '/python-sdk'
NUSCENES_DATABASE_PATH = '/python-sdk/data/nuscenes'

import sys
sys.path.append(NUSCENES_SDK_PATH)
import numpy as np

from nuscenes_utils.nuscenes import NuScenes
from nusc_data_extractor import NuScenesDataExtractor

""" Start """

nusc = NuScenes(dataroot=NUSCENES_DATABASE_PATH)
de_nusc = NuScenesDataExtractor(nusc=nusc)

# select scene
scene_name = 'scene-0123'
de_nusc.set_scene(scene_name)

# extract data
lidar_points = de_nusc.get_lidar_pointcloud()
radar_points, radar_velocities = de_nusc.get_radar_pointcloud()
radar_points, radar_velocities = de_nusc.get_all_radar_pointclouds()
camera_image = de_nusc.get_camera_image()

# advance timestep
de_nusc.advance()


print(" [*] Done.")
Beispiel #3
0

if __name__ == '__main__':
    # Read input parameters
    parser = argparse.ArgumentParser(description='Export a scene in Wavefront point cloud format.',
                                     formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--scene', default='0', type=str, help='number of a scene/frame, e.g. 1023')
    parser.add_argument('--input_dir', default='../../../datasets/nuscenes/nuscenes_teaser_meta_v1/samples/LIDAR_TOP/',
                        type=str, help='Input folder')
    parser.add_argument('--output_dir', default='../converted_point_clouds/', type=str, help='Output folder')
    parser.add_argument('--verbose', default=1, type=int, help='Whether to print outputs to stdout')
    parser.add_argument('--all', default=0, type=int,
                        help='Whether to convert all points clouds in input folder or not (0=no,1=yes)')
    args = parser.parse_args()
    # out_dir = args.output_dir
    input_dir = args.input_dir
    out_dir = args.output_dir
    verbose = bool(args.verbose)
    convert_all = bool(args.all)

    nusc = NuScenes()
    if convert_all:
        # for i in range(len(os.listdir(input_dir))):
        for scene_name_int, scene in enumerate(nusc.scene):
            # scene_name = nusc.scene[]
            convert_point_cloud(scene['name'], scene_name_int)
    else:
        scene_name_int = int(args.scene)
        scene_name = 'scene-' + str(scene_name_int + 1).zfill(4)
        convert_point_cloud(scene_name, scene_name_int)
"""
Exports a video of each scene (with annotations) to disk.
"""
import os

from nuscenes_utils.nuscenes import NuScenes

# Load NuScenes class
nusc = NuScenes()
scene_tokens = [s['token'] for s in nusc.scene]

# Create output directory
out_dir = os.path.expanduser('~/nuscenes-visualization/scene-videos')
if not os.path.isdir(out_dir):
    os.makedirs(out_dir)

# Write videos to disk
for scene_token in scene_tokens:
    scene = nusc.get('scene', scene_token)
    print('Writing scene %s' % scene['name'])
    out_path = os.path.join(out_dir, scene['name']) + '.avi'
    if not os.path.exists(out_path):
        nusc.render_scene(scene['token'], out_path=out_path)
Beispiel #5
0
from visualization import draw_xywh_bbox

def parse_args():
    # Parse the input arguments
    parser = argparse.ArgumentParser(description='Test the 3D to 2D bbox conversion')
    parser.add_argument('--nuscene_root', dest='dataroot',
                        help='NuScene dataroot',
                        default='../data/nuscenes')

    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = parse_args()
    nusc = NuScenes(version='v0.1', dataroot=args.dataroot, verbose=True)
    fig = plt.figure(figsize=(16, 6))

    for i in tqdm(range(88, len(nusc.scene))):
        scene = nusc.scene[i]
        scene_rec = nusc.get('scene', scene['token'])
        sample_rec = nusc.get('sample', scene_rec['first_sample_token'])

        f_rad_rec = nusc.get('sample_data', sample_rec['data']['RADAR_FRONT'])
        f_cam_rec = nusc.get('sample_data', sample_rec['data']['CAM_FRONT'])

        has_more_frames = True
        while has_more_frames:
            bboxes = []
            ax1 = fig.add_subplot(1,1,1)
            # ax2 = fig.add_subplot(1,2,2)
Beispiel #6
0
"""
Code written by Jose Garrido Ramas. Loads the trajectories in the nuscenes dataset
"""

import pickle
from nuscenes_utils.nuscenes import NuScenes
import numpy as np
from helpers import *

nusc = NuScenes(version='v0.1', dataroot='dataset', verbose=True)

#As loading the NuscenesDataset takes some time, dump in in the
#data folder for later reuse in the other functions. nusc contains
#information on the annotations and the trajectories.
pickle.dump(nusc, open("data/nusc.p", "wb"))

#function LOAD_TRAJ_DATA in helpers.py loads the vehicle positions (though it can
#easily be modified to also load other annotations). MAP_DATA_ALL is a dictionary:
#MAP_DATA_ALL['POS'] is a list of scenes. Each scene is represented as a numpy array
#of shape instances x samples (timesteps) x 2 (coordinates x and y). Missing values
#are represented as -999.
map_data_all = load_traj_data(nusc)
np.save('data/map_data_all.npy', map_data_all)

#Split data in chunks of 7 seconds each using the function in helpers.py
#SPLIT_DATA
fs = 2
split_size = 7 * fs  #samples
step = 4 * fs  #samples

#MAP_DATA_SPLIT saves the data in the dataset and is a list of scenes x partitions x instances x time_steps x 2.
from math import sqrt
import matplotlib as mpl
import matplotlib.cm as cm
import cv2
import numpy as np
from pyquaternion import Quaternion
import os
from nuscenes_utils.data_classes import PointCloud
from nuscenes_utils.geometry_utils import view_points
from nuscenes_utils.nuscenes import NuScenes

nusc = NuScenes(version='v0.1',
                dataroot='datasets/nuscenes/nuscenes_teaser_meta_v1',
                verbose=True)
pointsensor_channel = 'LIDAR_TOP'
camera_channel = 'CAM_FRONT'
my_sample = nusc.sample[0]
sample_token = my_sample['token']
sample_record = nusc.get('sample', sample_token)
pointsensor_token = sample_record['data'][pointsensor_channel]
camera_token = sample_record['data'][camera_channel]
cam = nusc.get('sample_data', camera_token)
img = cv2.imread(os.path.join(nusc.dataroot, cam['filename']))
pointsensor = nusc.get('sample_data', pointsensor_token)
point_cloud = PointCloud.from_file(os.path.join(nusc.dataroot, pointsensor['filename']))
# First step: transform the point-cloud to the ego vehicle frame for the timestamp of the sweep.
cs_record = nusc.get('calibrated_sensor', pointsensor['calibrated_sensor_token'])
point_cloud.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
point_cloud.translate(np.array(cs_record['translation']))
# Forth step: transform the point-cloud to camera frame
cs_record = nusc.get('calibrated_sensor', cam['calibrated_sensor_token'])
Beispiel #8
0
import matplotlib.pyplot as plt
import random
import time
from pprint import pprint
from PIL import Image
from tqdm import tqdm
from pycocotools_plus.coco import COCO_PLUS
from nuscenes_utils.nuscenes import NuScenes
from nuscenes_utils.data_classes import PointCloud
from nuscenes_utils.radar_utils import *
from nuscenes_utils.geometry_utils import BoxVisibility

#-------------------------------------------------------------------------------
if __name__ == '__main__':

    nusc = NuScenes(version='v0.1', dataroot='../data/nuscenes', verbose=True)
    # fig, axes = plt.subplots(1, 1, figsize=(16, 9))
    fig = plt.figure(figsize=(16, 6))

    skip_scenes = 30
    for scene in nusc.scene:
        skip_scenes -= 1
        if skip_scenes > 0:
            continue

        scene_rec = nusc.get('scene', scene['token'])
        sample_rec = nusc.get('sample', scene_rec['first_sample_token'])

        f_rad_rec = nusc.get('sample_data', sample_rec['data']['RADAR_FRONT'])
        br_rad_rec = nusc.get('sample_data',
                              sample_rec['data']['RADAR_BACK_RIGHT'])
Beispiel #9
0
import os

from pyquaternion import Quaternion
from nuscenes_utils.data_classes import PointCloud
from nuscenes_utils.geometry_utils import BoxVisibility, view_points
from nuscenes_utils.nuscenes import NuScenes
import numpy as np

nusc = NuScenes(version='v0.1',
                dataroot='nuscenes/nuscenes_teaser_meta_v1/',
                verbose=True)

# output_path = 'input/NuScenes/Annotations/'
output_path = 'tmp/'

# iterate over all 3977 samples
for idx, sample in enumerate(nusc.sample):
    sample_token = sample['token']
    sample_record = nusc.get('sample', sample_token)
    selected_data = {}
    for channel, sample_data_token in sample_record['data'].items():
        sample_data_record = nusc.get('sample_data', sample_data_token)
        sensor_modality = sample_data_record['sensor_modality']
        if sensor_modality in ['lidar']:
            # create output directory
            os.makedirs(output_path + channel, exist_ok=True)
            selected_data[channel] = sample_data_token
            # boxes returned are transformed from global to camera/LIDAR space within get_sample_data
            data_path, boxes, camera_intrinsic = nusc.get_sample_data(
                sample_data_token, box_vis_level=BoxVisibility.IN_FRONT)
            with open(output_path + channel + '/' + str(idx).zfill(6) + '.txt',
Beispiel #10
0
"""
Author Jose Garrido Ramas. Load nuscenes data, the format being 100 text files (one for each scene). Each scene 
contains the trajectory data in the format (FRAME_ID, INSTANCE_ID, X, Y, CATEGORY_NAME). 
"""

from nuscenes_utils.nuscenes import NuScenes
import numpy as np
from helpers import *

nusc = NuScenes(version='v0.1', dataroot='dataset', verbose=True)

#function in helpers.py to load all the data
map_data_all = load_traj_data(nusc, only_vehicles=False)

instance_id = 0
for scene_ix in range(len(map_data_all)):
    textFile = open('traj_format/' + str(scene_ix) + '.txt', 'w')
    #First iterate over all frames (t is the FRAME_ID)
    for t in range(map_data_all[scene_ix]['pos'].shape[1]):
        #For each sample,  iterate over all its annotations
        for i in range(map_data_all[scene_ix]['pos'].shape[0]):
            #Because of how we built the map_data_all matrix, the first annotation always
            #corresponds to the ego vehicle

            if (i != 0):
                ins_token = map_data_all[scene_ix]['instance_tokens'][i]
                instance = nusc.get('instance', ins_token)
                ann = nusc.get('sample_annotation',
                               instance['first_annotation_token'])
                atribute = nusc.get('attribute', ann['attribute_tokens'][0])
                name = ann['category_name']
Beispiel #11
0
    args = parser.parse_args()

    assert args.train_ratio >= 0 and args.train_ratio <= 1, \
        "--train_ratio must be in range [0 1]"
    assert args.include_sweeps in ['True','False'], \
        "--include_sweeps must be 'True' or 'False'"

    return args


#-------------------------------------------------------------------------------
if __name__ == '__main__':
    random.seed(13)
    args = parse_args()

    nusc = NuScenes(version='v0.1', dataroot=args.dataroot, verbose=True)
    coco_train = COCO_PLUS(args.train_ann_file,
                           args.train_imgs_dir,
                           new_dataset=True)
    coco_val = COCO_PLUS(args.val_ann_file,
                         args.val_imgs_dir,
                         new_dataset=True)

    for i in tqdm(range(0, len(nusc.scene))):
        scene = nusc.scene[i]
        scene_rec = nusc.get('scene', scene['token'])
        sample_rec = nusc.get('sample', scene_rec['first_sample_token'])

        ## Get front sensors data
        f_cam_rec = nusc.get('sample_data', sample_rec['data']['CAM_FRONT'])
        f_rad_rec = nusc.get('sample_data', sample_rec['data']['RADAR_FRONT'])
Beispiel #12
0
def export_scene_pointcloud(nusc: NuScenes, out_path: str, scene_token: str, channel: str='LIDAR_TOP',
                            min_dist: float=3.0, max_dist: float=30.0, verbose: bool=True) -> None:
    """
    :param nusc: NuScenes instance.
    :param out_path: Output path to write the point-cloud to.
    :param scene_token: Unique identifier of scene to render.
    :param channel: Channel to render.
    :param min_dist: Minimum distance to ego vehicle below which points are dropped.
    :param max_dist: Maximum distance to ego vehicle above which points are dropped.
    :param verbose: Whether to print messages to stdout.
    """

    # Check inputs.
    valid_channels = ['LIDAR_TOP', 'RADAR_FRONT', 'RADAR_FRONT_RIGHT', 'RADAR_FRONT_LEFT', 'RADAR_BACK_LEFT',
                      'RADAR_BACK_RIGHT']
    camera_channels = ['CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_LEFT', 'CAM_BACK', 'CAM_BACK_RIGHT']
    assert channel in valid_channels, 'Input channel {} not valid.'.format(channel)

    # Get records from DB.
    scene_rec = nusc.get('scene', scene_token)
    start_sample_rec = nusc.get('sample', scene_rec['first_sample_token'])
    sd_rec = nusc.get('sample_data', start_sample_rec['data'][channel])

    # Make list of frames
    cur_sd_rec = sd_rec
    sd_tokens = []
    while cur_sd_rec['next'] != '':
        cur_sd_rec = nusc.get('sample_data', cur_sd_rec['next'])
        sd_tokens.append(cur_sd_rec['token'])

    # Write point-cloud.
    with open(out_path, 'w') as f:
        f.write("OBJ File:\n")

        for sd_token in tqdm(sd_tokens):
            if verbose:
                print('Processing {}'.format(sd_rec['filename']))
            sc_rec = nusc.get('sample_data', sd_token)
            sample_rec = nusc.get('sample', sc_rec['sample_token'])
            lidar_token = sd_rec['token']
            lidar_rec = nusc.get('sample_data', lidar_token)
            pc = PointCloud.from_file(osp.join(nusc.dataroot, lidar_rec['filename']))

            # Get point cloud colors.
            coloring = np.ones((3, pc.points.shape[1])) * -1
            for channel in camera_channels:
                camera_token = sample_rec['data'][channel]
                cam_coloring, cam_mask = pointcloud_color_from_image(nusc, lidar_token, camera_token)
                coloring[:, cam_mask] = cam_coloring

            # Points live in their own reference frame. So they need to be transformed via global to the image plane.
            # First step: transform the point cloud to the ego vehicle frame for the timestamp of the sweep.
            cs_record = nusc.get('calibrated_sensor', lidar_rec['calibrated_sensor_token'])
            pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
            pc.translate(np.array(cs_record['translation']))

            # Optional Filter by distance to remove the ego vehicle.
            dists_origin = np.sqrt(np.sum(pc.points[:3, :] ** 2, axis=0))
            keep = np.logical_and(min_dist <= dists_origin, dists_origin <= max_dist)
            pc.points = pc.points[:, keep]
            coloring = coloring[:, keep]
            if verbose:
                print('Distance filter: Keeping %d of %d points...' % (keep.sum(), len(keep)))

            # Second step: transform to the global frame.
            poserecord = nusc.get('ego_pose', lidar_rec['ego_pose_token'])
            pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix)
            pc.translate(np.array(poserecord['translation']))

            # Write points to file
            for (v, c) in zip(pc.points.transpose(), coloring.transpose()):
                if (c == -1).any():
                    # Ignore points without a color.
                    pass
                else:
                    f.write("v {v[0]:.8f} {v[1]:.8f} {v[2]:.8f} {c[0]:.4f} {c[1]:.4f} {c[2]:.4f}\n".format(v=v, c=c/255.0))

            if not sd_rec['next'] == "":
                sd_rec = nusc.get('sample_data', sd_rec['next'])
Beispiel #13
0
def pointcloud_color_from_image(nusc: NuScenes, pointsensor_token: str, camera_token: str) -> Tuple[np.array, np.array]:
    """
    Given a point sensor (lidar/radar) token and camera sample_data token, load point-cloud and map it to the image
    plane, then retrieve the colors of the closest image pixels.
    :param nusc: NuScenes instance.
    :param pointsensor_token: Lidar/radar sample_data token.
    :param camera_token: Camera sample data token.
    :return (coloring <np.float: 3, n>, mask <np.bool: m>). Returns the colors for n points that reproject into the
        image out of m total points. The mask indicates which points are selected.
    """

    cam = nusc.get('sample_data', camera_token)
    pointsensor = nusc.get('sample_data', pointsensor_token)

    pc = PointCloud.from_file(osp.join(nusc.dataroot, pointsensor['filename']))
    im = Image.open(osp.join(nusc.dataroot, cam['filename']))

    # Points live in the point sensor frame. So they need to be transformed via global to the image plane.
    # First step: transform the point-cloud to the ego vehicle frame for the timestamp of the sweep.
    cs_record = nusc.get('calibrated_sensor', pointsensor['calibrated_sensor_token'])
    pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
    pc.translate(np.array(cs_record['translation']))

    # Second step: transform to the global frame.
    poserecord = nusc.get('ego_pose', pointsensor['ego_pose_token'])
    pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix)
    pc.translate(np.array(poserecord['translation']))

    # Third step: transform into the ego vehicle frame for the timestamp of the image.
    poserecord = nusc.get('ego_pose', cam['ego_pose_token'])
    pc.translate(-np.array(poserecord['translation']))
    pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix.T)

    # Fourth step: transform into the camera.
    cs_record = nusc.get('calibrated_sensor', cam['calibrated_sensor_token'])
    pc.translate(-np.array(cs_record['translation']))
    pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix.T)

    # Fifth step: actually take a "picture" of the point cloud.
    # Grab the depths (camera frame z axis points away from the camera).
    depths = pc.points[2, :]

    # Take the actual picture (matrix multiplication with camera-matrix + renormalization).
    points = view_points(pc.points[:3, :], np.array(cs_record['camera_intrinsic']), normalize=True)

    # Remove points that are either outside or behind the camera. Leave a margin of 1 pixel for aesthetic reasons.
    mask = np.ones(depths.shape[0], dtype=bool)
    mask = np.logical_and(mask, depths > 0)
    mask = np.logical_and(mask, points[0, :] > 1)
    mask = np.logical_and(mask, points[0, :] < im.size[0] - 1)
    mask = np.logical_and(mask, points[1, :] > 1)
    mask = np.logical_and(mask, points[1, :] < im.size[1] - 1)
    points = points[:, mask]

    # Pick the colors of the points
    im_data = np.array(im)
    coloring = np.zeros(points.shape)
    for i, p in enumerate(points.transpose()):
        point = p[:2].round().astype(np.int32)
        coloring[:, i] = im_data[point[1], point[0], :]

    return coloring, mask
"""
Exports an image for each map location with all the ego poses drawn on the map.
"""
import os

import matplotlib.pyplot as plt
import numpy as np

from nuscenes_utils.nuscenes import NuScenes

# Load NuScenes class
nusc = NuScenes()
locations = np.unique([l['location'] for l in nusc.log])

# Create output directory
out_dir = os.path.expanduser('~/nuscenes-visualization/map-poses')
if not os.path.isdir(out_dir):
    os.makedirs(out_dir)

for location in locations:
    nusc.render_egoposes_on_map(log_location=location)
    out_path = os.path.join(out_dir, 'egoposes-%s.png' % location)
    plt.tight_layout()
    plt.savefig(out_path)
Beispiel #15
0
from nuscenes_utils.nuscenes import NuScenes

nusc = NuScenes()
for (sensor, sd_token) in nusc.sample[0]['data'].items():
    cs_token = nusc.get('sample_data', sd_token)['calibrated_sensor_token']
    translation = nusc.get('calibrated_sensor', cs_token)['translation']
    print("%s %s" % (sensor, translation))