Esempio n. 1
0
def gen_scene_splits(dataroot: str):
    """
    Retruns all nuScenes scene splits by scene token, as specified in https://github.com/nutonomy/nuscenes-devkit/blob/master/python-sdk/nuscenes/utils/splits.py.
    Arguments:
        dataroot: Directory path of the nuScenes datasets, <str>.
    """
    # Imports
    from nuscenes.nuscenes import NuScenes
    from nuscenes.utils.splits import create_splits_scenes

    # Define
    scene_splits = create_splits_scenes()

    # Load trainval
    version = 'v1.0-trainval'
    nusc = NuScenes(version=version, dataroot=dataroot + version, verbose=False)

    for split, scene_names in scene_splits.items():
        if split == 'test':
            continue
        else:
            scene_splits[split] = [nusc.field2token('scene', 'name', scene_name)[0] for scene_name in scene_names]

    # Load test
    version = 'v1.0-test'
    nusc = NuScenes(version=version, dataroot=dataroot + version, verbose=False)
    scene_splits['test'] = [nusc.field2token('scene', 'name', scene_name)[0] for scene_name in scene_splits['test']]

    return scene_splits
Esempio n. 2
0
    def __init__(self,
                 nusc_root,
                 nusc_version,
                 split,
                 max_cam_sweeps=6,
                 max_lidar_sweeps=10,
                 max_radar_sweeps=6,
                 logging_level="INFO",
                 logger=None,
                 nusc=None):
        """
        Image database object that holds the sample data tokens for the nuscenes
        dataset.
        :param root_path: location of the nuscenes dataset
        :param nusc_version: the version of the dataset to use ('v1.0-trainval', 
                             'v1.0-test', 'v1.0-mini')
        :param max_cam_sweeps: number of sweep tokens to return for each camera
        :param max_lidar_sweeps: number of sweep tokens to return for lidar
        :param max_radar_sweeps: number of sweep tokens to return for each radar
        """

        self.nusc_root = nusc_root
        self.nusc_version = nusc_version
        self.split = split
        self.max_cam_sweeps = max_cam_sweeps
        self.max_lidar_sweeps = max_lidar_sweeps
        self.max_radar_sweeps = max_radar_sweeps
        self.id_length = 8
        self.db = {}

        assert nusc_version in constants.NUSCENES_SPLITS.keys(), \
            "Nuscenes version not valid."
        assert split in constants.NUSCENES_SPLITS[nusc_version], \
            "Nuscenes split ({}) is not valid for {}".format(split, nusc_version)

        if logger is None:
            self.logger = logging.initialize_logger('pynuscenes',
                                                    logging_level)
        else:
            self.logger = logger

        if nusc is not None:
            if self.nusc.version != nusc_version:
                self.logger.info(
                    'Loading nuscenes {} dataset'.format(nusc_version))
                self.nusc = NuScenes(version=nusc_version,
                                     dataroot=self.nusc_root,
                                     verbose=True)
            else:
                self.nusc = nusc
        else:
            self.logger.info(
                'Loading nuscenes {} dataset'.format(nusc_version))
            self.nusc = NuScenes(version=nusc_version,
                                 dataroot=self.nusc_root,
                                 verbose=True)

        self.SENSOR_NAMES = [x['channel'] for x in self.nusc.sensor]
Esempio n. 3
0
def main():

    #Instantiate an object of the NuScenes dataset class
    nusc = NuScenes(version='v1.0-mini', dataroot='/home/odysseas/thesis/data/sets/nuscenes_mini/', verbose=True)

    #Load front_rad sample_data tokens
    rad_sd_tokens = load_keyframe_rad_tokens(nusc)

    #Load actual sample_data
    radar_pointclouds = tokens_to_data(nusc, rad_sd_tokens)

    depths_list = []
    #Load x (front/depth) value of all radar data
    depths_sum = 0.0
    total_number_of_detections = 0
    for pcl in range(len(radar_pointclouds)):
        number_of_detections = len(radar_pointclouds[pcl].points)
        total_number_of_detections += number_of_detections
        for detection in range(len(radar_pointclouds[pcl].points)):
            #radar_pointclouds[pcl].points = np.delete(radar_pointclouds[pcl].points, np.argwhere(radar_pointclouds[pcl].points[0] > 100))
            depth = radar_pointclouds[pcl].points[detection][0]
            depths_list.append(depth)
            depths_sum += depth
    depths_avg = depths_sum / total_number_of_detections

    print('Total number of radar points detected across all scenes: {}'.format(total_number_of_detections))
    print('Average depth for all radar detections across all scenes: {}'.format(depths_avg))

    plt.hist(depths_list, bins = 100)
    plt.title("Histogram of radar depths")
    plt.ylabel("Number of detections")
    plt.xlabel("Depth")
    plt.xticks((np.arange(0, max(depths_list)+1, 20)))
    plt.savefig('Radar_Depths_Histogram.png', bbox_inches='tight')
def main():
    root_path = '/extssd/jiaxin/nuscenes/test'
    nusc = NuScenes(version='v1.0-test', dataroot=root_path, verbose=True)

    sensor = 'CAM_FRONT'

    counter = 0
    for i, scene in enumerate(nusc.scene):
        scene_token = scene['token']
        scene = nusc.get('scene', scene_token)
        first_sample = nusc.get('sample', scene['first_sample_token'])
        camera = nusc.get('sample_data', first_sample['data'][sensor])

        img = np.array(
            Image.open(os.path.join(nusc.dataroot,
                                    camera['filename'])).convert('L'))
        H, W = img.shape[0], img.shape[1]

        img_mean = np.mean(img.astype(np.float32))

        white_mask = img > 150
        white_area = np.sum(white_mask.astype(np.float32))

        if img_mean < 110 and white_area < (H * W) * 0.1:
            print('\'%s\',' % (scene_token))
            counter += 1
            plt.figure()
            plt.gray()
            plt.imshow(img)
            plt.show()

    print('%d night scenes' % counter)
Esempio n. 5
0
def compile_data(version, dataroot, data_aug_conf, grid_conf, bsz, nworkers,
                 parser_name):
    nusc = NuScenes(version='v1.0-{}'.format(version),
                    dataroot=os.path.join(dataroot, version),
                    verbose=False)
    parser = {
        'vizdata': VizData,
        'segmentationdata': SegmentationData,
    }[parser_name]
    traindata = parser(nusc,
                       is_train=True,
                       data_aug_conf=data_aug_conf,
                       grid_conf=grid_conf)
    valdata = parser(nusc,
                     is_train=False,
                     data_aug_conf=data_aug_conf,
                     grid_conf=grid_conf)

    trainloader = torch.utils.data.DataLoader(traindata,
                                              batch_size=bsz,
                                              shuffle=True,
                                              num_workers=nworkers,
                                              drop_last=True,
                                              worker_init_fn=worker_rnd_init)
    valloader = torch.utils.data.DataLoader(valdata,
                                            batch_size=bsz,
                                            shuffle=False,
                                            num_workers=nworkers)

    return trainloader, valloader
    def test_egoposes_on_map(self):
        """ Test that all ego poses land on """
        nusc = NuScenes(version=self.version,
                        dataroot=os.environ['NUSCENES'],
                        verbose=False)
        whitelist = [
            'scene-0499', 'scene-0501', 'scene-0502', 'scene-0515',
            'scene-0517'
        ]

        invalid_scenes = []
        for scene in tqdm.tqdm(nusc.scene, leave=False):
            if scene['name'] in whitelist:
                continue

            log = nusc.get('log', scene['log_token'])
            map_name = log['location']
            nusc_map = self.nusc_maps[map_name]
            ratio_valid = get_egoposes_on_drivable_ratio(
                nusc, nusc_map, scene['token'])
            if ratio_valid != 1.0:
                print(
                    'Error: Scene %s has a ratio of %f ego poses on the driveable area!'
                    % (scene['name'], ratio_valid))
                invalid_scenes.append(scene['name'])

        self.assertEqual(len(invalid_scenes), 0)
Esempio n. 7
0
def merge_depth_sf(depth_meta_path, 
                sf_meta_path, save_path):
    
    with open(depth_meta_path, 'r') as f:
        depth_meta = json.load(f)

    with open(sf_meta_path, 'r') as f:
        sf_meta = json.load(f)
    
    split = 'train'
    data_path = 'data/nuscenes/'
    nusc = NuScenes(
        version=SPLITS[split], dataroot=data_path, verbose=True)
    
    imgpath2paths = {}
    for depth_info in depth_meta:
        sample_token = depth_info['sample_token']
    
        depth_path = depth_info['depth_path']
        img_path = depth_info['img_path']
        cam_name = img_path.split('/')[-2]

        cam_token = nusc.get('sample', sample_token)['data'][cam_name]
        sf_path = sf_meta[cam_token]['points_path']
        img_path = sf_meta[cam_token]['img_path'] # use this version of img path

        tmp = {'token': cam_token, 'depth_path': depth_path, 
                'cam_name': cam_name, 'sf_path': sf_path, 
                'img_path': img_path}
        imgpath2paths[img_path] = tmp

    with open(save_path, 'w') as f:
        json.dump(imgpath2paths, f)
    def __init__(self, set_name="mini_train"):

        #assert statements
        set_paths = [
            'train', 'val', 'test', 'mini_train', 'mini_val', 'train_detect',
            'train_track'
        ]
        assert set_name in set_paths, "Incorrect set_name"

        #Initialize data and Prediction Helper classes
        self.data_path = DATA_PATH
        self.nusc = NuScenes(version=DATA_VERSION,
                             dataroot=self.data_path,
                             verbose=True)
        self.helper = PredictHelper(self.nusc)

        #get all the scenes
        self.scenes = create_splits_scenes()

        #get all the scenes in the trainset
        self.set_name = set_name
        self.trainset = self.scenes[
            self.set_name]  #List of scenes as part of training set
        self.prediction_scenes = json.load(
            open(self.data_path + "maps/prediction_scenes.json", "r")
        )  #Dictionary containing list of instance and sample tokens for each scene

        print("Number of samples in train set: %d" % (len(self.trainset)))
    def __init__(self,
                 nuscenes_root,
                 version="v1.0-trainval",
                 max_scenes=None,
                 *,
                 read_radar: bool = True,
                 read_camera: bool = True,
                 read_semantics: bool = True,
                 read_bounding_boxes: bool = True):
        self.nusc = NuScenes(version=version,
                             dataroot=nuscenes_root,
                             verbose=False)
        self.root = pathlib.Path(nuscenes_root)

        # global counter to sanity-check if we calculate the same number of points
        # within boxes as the dataset authors
        self.ns_lidar_pts_to_calculated_diff = 0

        # flags defining the data entries to return from 'read'
        self.read_radar = read_radar
        self.read_camera = read_camera
        self.read_semantics = read_semantics
        self.read_bounding_boxes = read_bounding_boxes

        if self.read_semantics and not hasattr(self.nusc, "lidarseg"):
            raise RuntimeError("Error: nuScenes-lidarseg not installed!")

        # assert that the training targets range from 0 - (|mapping| - 1)
        assert len(set(
            NUSCENES_SEM_CLASSES.values())) == len(NUSCENES_SEM_CLASSES)
        assert all(a == b
                   for a, b in zip(sorted(NUSCENES_SEM_CLASSES.values()),
                                   range(len(NUSCENES_SEM_CLASSES))))

        split_name = {
            "v1.0-trainval": "nuscenes_default",
            "v1.0-mini": "nuscenes_mini",
        }.get(version, "nuscenes_{}".format(version))

        # create split dict
        self.split = {
            "name": split_name,
            "data": {k: []
                     for k in self.scene_split_lists.keys()},
        }
        for i, scene in enumerate(self.nusc.scene):
            if max_scenes is not None and i > max_scenes:
                break
            name = scene["name"]
            for k, v in self.scene_split_lists.items():
                if name in v:
                    split_list = self.split["data"][k]
                    split_list.extend([
                        self.sample_id_template.format(name, i)
                        for i in range(0, scene["nbr_samples"])
                    ])
                    break
            else:
                raise RuntimeError(
                    "Found scene that is not in a split: {}".format(name))
Esempio n. 10
0
def main():

    nusc = NuScenes(version='v1.0-mini',
                    dataroot='/home/odysseas/thesis/data/sets/nuscenes/',
                    verbose=True)
    #Fix sensor.json file
    sensor = nusc.sensor
    sensor[:] = [
        record for record in sensor if (record['channel'] == "CAM_FRONT") or (
            record['channel'] == "RADAR_FRONT")
    ]

    with open('./sensor.json', 'w') as fout:
        json.dump(sensor, fout, indent=0)

    cam_front_token = sensor[0]["token"]
    radar_front_token = sensor[1]["token"]

    #Fix calibrated_sensor.json file
    calibrated_sensor = nusc.calibrated_sensor
    calibrated_sensor[:] = [
        record for record in calibrated_sensor
        if (record['sensor_token'] == cam_front_token) or (
            record['sensor_token'] == radar_front_token)
    ]

    with open('./calibrated_sensor.json', 'w') as fout:
        json.dump(calibrated_sensor, fout, indent=0)
    def __init__(self,
                 nusc_kitti_dir: str = '/home/developer/nuscenes/nusc_kitti',
                 cam_name: str = 'CAM_FRONT',
                 lidar_name: str = 'LIDAR_TOP',
                 image_count: int = 10,
                 nusc_version: str = 'v1.0-mini',
                 split: str = 'mini_train'):
        """
        :param nusc_kitti_dir: Where to write the KITTI-style annotations.
        :param cam_name: Name of the camera to export. Note that only one camera is allowed in KITTI.
        :param lidar_name: Name of the lidar sensor.
        :param image_count: Number of images to convert.
        :param nusc_version: nuScenes version to use.
        :param split: Dataset split to use.
        """
        self.nusc_kitti_dir = os.path.expanduser(nusc_kitti_dir)
        self.cam_name = cam_name
        self.lidar_name = lidar_name
        self.image_count = image_count
        self.nusc_version = nusc_version
        self.split = split

        # Create nusc_kitti_dir.
        if not os.path.isdir(self.nusc_kitti_dir):
            os.makedirs(self.nusc_kitti_dir)

        # Select subset of the data to look at.
        self.nusc = NuScenes(version=nusc_version,
                             dataroot='/home/developer/nuscenes')
Esempio n. 12
0
def create_tf_record_train_as_val(fn_out, split, vis_results):
    label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map)
    writer = tf.python_io.TFRecordWriter(fn_out)
    params = read_params(FLAGS.param)
    logging.debug('Params: ' + str(params))
    nusc = NuScenes(version='v1.0-trainval', dataroot=FLAGS.nuscenes, verbose=True)
    sensor = 'LIDAR_TOP'
    nu_to_kitti_lidar = Quaternion(axis=(0, 0, 1), angle=np.pi / 2).inverse
    split_logs = create_splits_logs(split, nusc)
    sample_tokens = split_to_samples(nusc, split_logs)
    random.shuffle(sample_tokens)
    print('Number of samples:', len(sample_tokens))

    for sample_token in sample_tokens[1:100]:
        sample = nusc.get('sample', sample_token)
        lidar_top_data = nusc.get('sample_data', sample['data'][sensor])
        if not lidar_top_data['prev']:
            continue
        lidar_top_data_prev = nusc.get('sample_data', lidar_top_data['prev'])
        labels_corners, labels_center, labels_data = compute_labels_image(nusc, sample, sensor,
                                                                          nu_to_kitti_lidar, params)
        filename = os.path.splitext(os.path.splitext(lidar_top_data['filename'])[0])[0]
        filename_prev = os.path.splitext(os.path.splitext(lidar_top_data_prev['filename'])[0])[0]
        tf_example = dict_to_tf_example(labels_corners, labels_center, labels_data, params, label_map_dict,
                                        FLAGS.data, FLAGS.data_beliefs, filename, filename_prev)
        writer.write(tf_example.SerializeToString())
        if (vis_results):
            visualize_results(FLAGS.data, filename, labels_corners, os.path.join(FLAGS.output, 'Debug'))
def export_2d_annotation(root_path, info_path, version, mono3d=True):
    """Export 2d annotation from the info file and raw data.

    Args:
        root_path (str): Root path of the raw data.
        info_path (str): Path of the info file.
        version (str): Dataset version.
        mono3d (bool): Whether to export mono3d annotation. Default: True.
    """
    # get bbox annotations for camera
    camera_types = [
        'CAM_FRONT',
        'CAM_FRONT_RIGHT',
        'CAM_FRONT_LEFT',
        'CAM_BACK',
        'CAM_BACK_LEFT',
        'CAM_BACK_RIGHT',
    ]
    nusc_infos = mmcv.load(info_path)['infos']
    nusc = NuScenes(version=version, dataroot=root_path, verbose=True)
    # info_2d_list = []
    cat2Ids = [
        dict(id=nus_categories.index(cat_name), name=cat_name)
        for cat_name in nus_categories
    ]
    coco_ann_id = 0
    coco_2d_dict = dict(annotations=[], images=[], categories=cat2Ids)
    for info in mmcv.track_iter_progress(nusc_infos):
        for cam in camera_types:
            cam_info = info['cams'][cam]
            coco_infos = get_2d_boxes(nusc,
                                      cam_info['sample_data_token'],
                                      visibilities=['', '1', '2', '3', '4'],
                                      mono3d=mono3d)
            (height, width, _) = mmcv.imread(cam_info['data_path']).shape
            coco_2d_dict['images'].append(
                dict(file_name=cam_info['data_path'].split('data/nuscenes/')
                     [-1],
                     id=cam_info['sample_data_token'],
                     token=info['token'],
                     cam2ego_rotation=cam_info['sensor2ego_rotation'],
                     cam2ego_translation=cam_info['sensor2ego_translation'],
                     ego2global_rotation=info['ego2global_rotation'],
                     ego2global_translation=info['ego2global_translation'],
                     cam_intrinsic=cam_info['cam_intrinsic'],
                     width=width,
                     height=height))
            for coco_info in coco_infos:
                if coco_info is None:
                    continue
                # add an empty key for coco format
                coco_info['segmentation'] = []
                coco_info['id'] = coco_ann_id
                coco_2d_dict['annotations'].append(coco_info)
                coco_ann_id += 1
    if mono3d:
        json_prefix = f'{info_path[:-4]}_mono3d'
    else:
        json_prefix = f'{info_path[:-4]}'
    mmcv.dump(coco_2d_dict, f'{json_prefix}.coco.json')
Esempio n. 14
0
def factory(dataset, dir_nuscenes):
    """Define dataset type and split training and validation"""

    assert dataset in ['nuscenes', 'nuscenes_mini', 'nuscenes_teaser']
    if dataset == 'nuscenes_mini':
        version = 'v1.0-mini'
    else:
        version = 'v1.0-trainval'

    nusc = NuScenes(version=version, dataroot=dir_nuscenes, verbose=True)
    scenes = nusc.scene

    if dataset == 'nuscenes_teaser':
        with open("splits/nuscenes_teaser_scenes.txt", "r") as file:
            teaser_scenes = file.read().splitlines()
        scenes = [scene for scene in scenes if scene['token'] in teaser_scenes]
        with open("splits/split_nuscenes_teaser.json", "r") as file:
            dic_split = json.load(file)
        split_train = [
            scene['name'] for scene in scenes
            if scene['token'] in dic_split['train']
        ]
        split_val = [
            scene['name'] for scene in scenes
            if scene['token'] in dic_split['val']
        ]
    else:
        split_scenes = splits.create_splits_scenes()
        split_train, split_val = split_scenes['train'], split_scenes['val']

    return nusc, scenes, split_train, split_val
Esempio n. 15
0
    def prep_list_of_sessions(self):
        if self.phase in ['train', 'validation']:
            version = 'v1.0-trainval'
        elif self.phase == 'test':
            version = 'v1.0-test'
        self.NuScenes_data = NuScenes(version=version,
                                      dataroot=DATASET_ROOT,
                                      verbose=True)
        self.num_sessions = len(self.NuScenes_data.scene)

        self.cloud_tokens = []
        self.session_lengths = []
        self.session_locations = []
        self.session_names = []

        for session_ind in range(self.num_sessions):

            record = self.NuScenes_data.scene[session_ind]
            session_token = record['token']
            self.session_names.append(record['name'])
            location = self.NuScenes_data.get('log',
                                              record['log_token'])['location']
            self.session_locations.append(location)
            sample_token = record["first_sample_token"]
            sample = self.NuScenes_data.get("sample", sample_token)
            lidar_token = sample["data"]["LIDAR_TOP"]
            cur_lidar_tokens = []
            while len(lidar_token) > 0:
                cur_lidar_tokens.append(lidar_token)
                lidar_data = self.NuScenes_data.get("sample_data", lidar_token)
                lidar_token = lidar_data["next"]
            self.cloud_tokens.append(cur_lidar_tokens)
            self.session_lengths.append(len(cur_lidar_tokens))
    def __init__(self, root, mode, opt: options.Options):
        super(nuScenesLoader, self).__init__()
        self.root = root
        self.opt = opt
        self.mode = mode

        # farthest point sample
        self.farthest_sampler = FarthestSampler(dim=3)

        # list of (traversal, pc_timestamp, pc_timestamp_idx, traversal_pc_num)
        if mode == 'train':
            self.nuscenes_path = os.path.join(root, 'trainval')
            version = 'v1.0-trainval'
        else:
            self.nuscenes_path = os.path.join(root, 'test')
            version = 'v1.0-test'

        self.dataset = make_nuscenes_dataset(self.nuscenes_path)
        self.nusc = NuScenes(version=version,
                             dataroot=self.nuscenes_path,
                             verbose=True)

        self.camera_name_list = [
            'CAM_FRONT', 'CAM_FRONT_LEFT', 'CAM_FRONT_RIGHT', 'CAM_BACK',
            'CAM_BACK_LEFT', 'CAM_BACK_RIGHT'
        ]
Esempio n. 17
0
    def evaluation(self, det_annos, class_names, **kwargs):
        import json
        from nuscenes.nuscenes import NuScenes
        from . import nuscenes_utils
        nusc = NuScenes(version=self.dataset_cfg.VERSION,
                        dataroot=str(self.root_path),
                        verbose=True)
        nusc_annos = nuscenes_utils.transform_det_annos_to_nusc_annos(
            det_annos, nusc)
        nusc_annos['meta'] = {
            'use_camera': False,
            'use_lidar': True,
            'use_radar': False,
            'use_map': False,
            'use_external': False,
        }

        output_path = Path(kwargs['output_path'])
        output_path.mkdir(exist_ok=True, parents=True)
        res_path = str(output_path / 'results_nusc.json')
        with open(res_path, 'w') as f:
            json.dump(nusc_annos, f)

        self.logger.info(
            f'The predictions of NuScenes have been saved to {res_path}')

        if self.dataset_cfg.VERSION == 'v1.0-test':
            return 'No ground-truth annotations for evaluation', {}

        from nuscenes.eval.detection.config import config_factory
        from nuscenes.eval.detection.evaluate import NuScenesEval

        eval_set_map = {
            'v1.0-mini': 'mini_val',
            'v1.0-trainval': 'val',
            'v1.0-test': 'test'
        }
        try:
            eval_version = 'detection_cvpr_2019'
            eval_config = config_factory(eval_version)
        except:
            eval_version = 'cvpr_2019'
            eval_config = config_factory(eval_version)

        nusc_eval = NuScenesEval(
            nusc,
            config=eval_config,
            result_path=res_path,
            eval_set=eval_set_map[self.dataset_cfg.VERSION],
            output_dir=str(output_path),
            verbose=True,
        )
        metrics_summary = nusc_eval.main(plot_examples=0, render_curves=False)

        with open(output_path / 'metrics_summary.json', 'r') as f:
            metrics = json.load(f)

        result_str, result_dict = nuscenes_utils.format_nuscene_results(
            metrics, self.class_names, version=eval_version)
        return result_str, result_dict
def quick_test(dataroot='/data/nuscenes', gpuid=0, nworkers=10):
    """Evaluate detections with PKL.
    """
    nusc = NuScenes(version='v1.0-mini', dataroot=dataroot, verbose=True)
    nusc_maps = get_nusc_maps(dataroot)
    cfg = config_factory('detection_cvpr_2019')
    device = torch.device(f'cuda:{gpuid}') if gpuid >= 0\
        else torch.device('cpu')
    print(f'using device: {device}')

    get_example_submission()

    nusc_eval = DetectionEval(nusc,
                              config=cfg,
                              result_path='./example_submission.json',
                              eval_set='mini_train',
                              output_dir='./res',
                              verbose=True)
    info = calculate_pkl(nusc_eval.gt_boxes,
                         nusc_eval.pred_boxes,
                         nusc_eval.sample_tokens,
                         nusc_eval.nusc,
                         nusc_maps,
                         device,
                         nworkers,
                         bsz=128,
                         plot_kextremes=5,
                         verbose=True)
    print({k: v for k, v in info.items() if k != 'full'})
Esempio n. 19
0
def create_nuscenes_info(version, data_path, save_path, max_sweeps=10):
    from nuscenes.nuscenes import NuScenes
    from nuscenes.utils import splits
    from . import nuscenes_utils
    data_path = data_path / version / 'v1.0-trainval_meta'  # change data path
    save_path = save_path / version

    assert version in ['v1.0-trainval', 'v1.0-test', 'v1.0-mini']
    if version == 'v1.0-trainval':
        train_scenes = splits.train
        val_scenes = splits.val
    elif version == 'v1.0-test':
        train_scenes = splits.test
        val_scenes = []
    elif version == 'v1.0-mini':
        train_scenes = splits.mini_train
        val_scenes = splits.mini_val
    else:
        raise NotImplementedError

    nusc = NuScenes(version=version, dataroot=data_path, verbose=True)
    available_scenes = nuscenes_utils.get_available_scenes(nusc)
    available_scene_names = [s['name'] for s in available_scenes]
    train_scenes = list(
        filter(lambda x: x in available_scene_names, train_scenes))
    val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes))
    train_scenes = set([
        available_scenes[available_scene_names.index(s)]['token']
        for s in train_scenes
    ])
    val_scenes = set([
        available_scenes[available_scene_names.index(s)]['token']
        for s in val_scenes
    ])

    print('%s: train scene(%d), val scene(%d)' %
          (version, len(train_scenes), len(val_scenes)))

    train_nusc_infos, val_nusc_infos = nuscenes_utils.fill_trainval_infos(
        data_path=data_path,
        nusc=nusc,
        train_scenes=train_scenes,
        val_scenes=val_scenes,
        test='test' in version,
        max_sweeps=max_sweeps)

    if version == 'v1.0-test':
        print('test sample: %d' % len(train_nusc_infos))
        with open(save_path / f'nuscenes_infos_{max_sweeps}sweeps_test.pkl',
                  'wb') as f:
            pickle.dump(train_nusc_infos, f)
    else:
        print('train sample: %d, val sample: %d' %
              (len(train_nusc_infos), len(val_nusc_infos)))
        with open(save_path / f'nuscenes_infos_{max_sweeps}sweeps_train.pkl',
                  'wb') as f:
            pickle.dump(train_nusc_infos, f)
        with open(save_path / f'nuscenes_infos_{max_sweeps}sweeps_val.pkl',
                  'wb') as f:
            pickle.dump(val_nusc_infos, f)
Esempio n. 20
0
def get_pose_intrinsic(
    save_path='/public/MARS/datasets/nuScenes-SF/meta/cam_pose_intrinsic.json'
):

    split = 'train'
    data_path = 'data/nuscenes/'
    nusc = NuScenes(version=SPLITS[split], dataroot=data_path, verbose=True)
    samples = nusc.sample

    cam_token2cam_intext = {}

    for sample in tqdm(samples):
        for cam_name in CamNames:
            cam_token = sample['data'][cam_name]
            cam_data = nusc.get('sample_data', cam_token)
            ego_pose = nusc.get('ego_pose', cam_data['ego_pose_token'])
            cam_cs = nusc.get('calibrated_sensor',
                              cam_data['calibrated_sensor_token'])

            # used to transform from ego to global
            pose_matrix = quat_trans2matrix(ego_pose['rotation'],
                                            ego_pose['translation'])
            # used to transform from cameral to ego
            cam_pose = quat_trans2matrix(cam_cs['rotation'],
                                         cam_cs['translation'])

            cam_pose_world = np.matmul(pose_matrix, cam_pose)

            ret = {'pose': cam_pose_world.tolist()}
            ret['intrinsic'] = cam_cs['camera_intrinsic']

            cam_token2cam_intext[cam_token] = ret

    with open(save_path, 'w') as f:
        json.dump(cam_token2cam_intext, f)
def get_boxes2d(sample_data_token: str, image_annotations_token2ind = {}, image_annotations = []):
    nusc = NuScenes(version='v1.0-mini', dataroot='dataset/nuScenes/v1.0-mini', verbose=True)
    # Retrieve sensor & pose records
    sd_record = nusc.get('sample_data', sample_data_token)
    curr_sample_record = nusc.get('sample', sd_record['sample_token'])
    #curr_sample_record['image_anns']

    boxes2d = []
    if curr_sample_record['prev'] == "" or sd_record['is_key_frame']:
        # If no previous annotations available, or if sample_data is keyframe just return the current ones.
        for i,x in enumerate(curr_sample_record['anns']):
            record = nusc.get('sample_annotation', x)
            instance_token = record['instance_token']
            record2d = image_annotations[image_annotations_token2ind[instance_token]]
            box2d = Box2d(record2d['bbox_corners'], name=record2d['category_name'], token=record2d['sample_annotation_token'],
                          visibility=record2d['visibility_token'],filename=record2d['filename'])
            boxes2d.append(box2d)

    else:
        prev_sample_record = nusc.get('sample', curr_sample_record['prev'])

        curr_ann_recs = [nusc.get('sample_annotation', token) for token in curr_sample_record['anns']]
        prev_ann_recs = [nusc.get('sample_annotation', token) for token in prev_sample_record['anns']]

        # Maps instance tokens to prev_ann records
        prev_inst_map = {entry['instance_token']: entry for entry in prev_ann_recs}

        t0 = prev_sample_record['timestamp']
        t1 = curr_sample_record['timestamp']
        t = sd_record['timestamp']

        # There are rare situations where the timestamps in the DB are off so ensure that t0 < t < t1.
        t = max(t0, min(t1, t))

        boxes = []
        for curr_ann_rec in curr_ann_recs:

            if curr_ann_rec['instance_token'] in prev_inst_map:
                # If the annotated instance existed in the previous frame, interpolate center & orientation.
                prev_ann_rec = prev_inst_map[curr_ann_rec['instance_token']]

                # Interpolate center.
                center = [np.interp(t, [t0, t1], [c0, c1]) for c0, c1 in zip(prev_ann_rec['translation'],
                                                                             curr_ann_rec['translation'])]

                # Interpolate orientation.
                rotation = Quaternion.slerp(q0=Quaternion(prev_ann_rec['rotation']),
                                            q1=Quaternion(curr_ann_rec['rotation']),
                                            amount=(t - t0) / (t1 - t0))

                box = Box(center, curr_ann_rec['size'], rotation, name=curr_ann_rec['category_name'],
                          token=curr_ann_rec['token'])
            else:
                # If not, simply grab the current annotation.
                box = self.get_box(curr_ann_rec['token'])

            boxes.append(box)
    return boxes2d
Esempio n. 22
0
def init_nu():
    """ -------------------------------------------------------------------------------------------------------------
    Initialize nuScene

    return:         [tuple] NuScenes instance, NuScenesCanBus instance
    ------------------------------------------------------------------------------------------------------------- """
    nu              = NuScenes( version=nuscenes_ver, dataroot=dir_orig, verbose=verbose )      # NuScenes database
    nc              = NuScenesCanBus( dataroot=dir_orig )                                       # NuScenes CAN bus
    return nu, nc
Esempio n. 23
0
    def __init__(self,
                 data_root,
                 filenames,
                 height,
                 width,
                 frame_idxs,
                 num_scales,
                 version='v1.0-mini',
                 sensor='CAM_FRONT',
                 is_train=False,
                 img_ext='.jpg'):
        super(NuscDataset, self).__init__()

        self.data_path = data_root
        self.data_path = '/share/nuscenes'
        self.filenames = filenames
        self.height = height
        self.width = width
        self.num_scales = num_scales
        self.interp = Image.ANTIALIAS

        self.frame_idxs = frame_idxs

        self.is_train = is_train
        self.img_ext = img_ext

        self.loader = pil_loader
        self.to_tensor = transforms.ToTensor()
        self.nusc = NuScenes(version=version,
                             dataroot=self.data_path,
                             verbose=True)
        self.sensor = sensor
        self.data_root = '/share/nuscenes'
        self.full_res_shape = (1600, 640)

        # We need to specify augmentations differently in newer versions of torchvision.
        # We first try the newer tuple version; if this fails we fall back to scalars
        try:
            self.brightness = (0.8, 1.2)
            self.contrast = (0.8, 1.2)
            self.saturation = (0.8, 1.2)
            self.hue = (-0.1, 0.1)
            transforms.ColorJitter.get_params(self.brightness, self.contrast,
                                              self.saturation, self.hue)
        except TypeError:
            self.brightness = 0.2
            self.contrast = 0.2
            self.saturation = 0.2
            self.hue = 0.1

        self.resize = {}
        for i in range(self.num_scales):
            s = 2**i
            self.resize[i] = transforms.Resize(
                (self.height // s, self.width // s), interpolation=self.interp)

        self.load_depth = self.check_depth()
Esempio n. 24
0
def create_nuscenes(root: str, version: str = "v1.0-trainval") -> NuScenes:
    """
    Creates nuScenes object that can be later passed to NuscenesBEVDataset.
    Warning, it takes up a considerable abound of RAM space.
    :param root: path to folder with nuscenes dataset
    :param version: version of the dataset
    :return: created NuScenes object
    """
    return NuScenes(dataroot=root, version=version)
Esempio n. 25
0
 def __init__(self):
     self.nusc = NuScenes(version='v1.0-mini',
                          dataroot='../data/sets/nuscenes',
                          verbose=False)
     self.sceneID = 0
     self.scene = self.nusc.scene[self.sceneID]
     self.current_sample = self.nusc.get('sample',
                                         self.scene['first_sample_token'])
     print('Data Reader Initialized')
Esempio n. 26
0
 def __init__(self, config={}):
     self.config = {
         'version': 'v1.0-mini',
         'dataroot': os.path.join(os.environ['PKG_PATH'],'data')
     }
     self.config.update(config)
     
     
     self.nusc = NuScenes(version=self.config['version'], dataroot=self.config['dataroot'], verbose=True)
     self.nusc_can = NuScenesCanBus(dataroot=self.config['dataroot'])
 
     self.utils = Utils(self.nusc, self.nusc_can)
Esempio n. 27
0
def _test_visual(idx=0):
    split = 'train'
    data_path = 'data/nuscenes/'
    nusc = NuScenes(version=SPLITS[split], dataroot=data_path, verbose=True)

    samples = nusc.sample

    sample = samples[idx]
    sample_token = sample['token']
    #tp_parse_sample(sample_token, nusc)
    visualize_sample(sample_token, nusc)
    embed()
Esempio n. 28
0
def export_2d_annotation(root_path, info_path, version):
    """Export 2d annotation from the info file and raw data.

    Args:
        root_path (str): Root path of the raw data.
        info_path (str): Path of the info file.
        version (str): Dataset version.
    """
    # get bbox annotations for camera
    camera_types = [
        "CAM_FRONT",
        "CAM_FRONT_RIGHT",
        "CAM_FRONT_LEFT",
        "CAM_BACK",
        "CAM_BACK_LEFT",
        "CAM_BACK_RIGHT",
    ]
    nusc_infos = mmcv.load(info_path)["infos"]
    nusc = NuScenes(version=version, dataroot=root_path, verbose=True)
    # info_2d_list = []
    cat2Ids = [
        dict(id=nus_categories.index(cat_name), name=cat_name)
        for cat_name in nus_categories
    ]
    coco_ann_id = 0
    coco_2d_dict = dict(annotations=[], images=[], categories=cat2Ids)
    for info in mmcv.track_iter_progress(nusc_infos):
        for cam in camera_types:
            cam_info = info["cams"][cam]
            coco_infos = get_2d_boxes(
                nusc,
                cam_info["sample_data_token"],
                visibilities=["", "1", "2", "3", "4"],
            )
            (height, width, _) = mmcv.imread(cam_info["data_path"]).shape
            coco_2d_dict["images"].append(
                dict(
                    file_name=cam_info["data_path"],
                    id=cam_info["sample_data_token"],
                    width=width,
                    height=height,
                )
            )
            for coco_info in coco_infos:
                if coco_info is None:
                    continue
                # add an empty key for coco format
                coco_info["segmentation"] = []
                coco_info["id"] = coco_ann_id
                coco_2d_dict["annotations"].append(coco_info)
                coco_ann_id += 1
    mmcv.dump(coco_2d_dict, f"{info_path[:-4]}.coco.json")
def create_nuscenes_infos(root_path, version="v1.0-trainval"):
    from nuscenes.nuscenes import NuScenes
    nusc = NuScenes(version=version, dataroot=root_path, verbose=True)
    from nuscenes.utils import splits
    available_vers = ["v1.0-trainval", "v1.0-test", "v1.0-mini"]
    assert version in available_vers
    if version == "v1.0-trainval":
        train_scenes = splits.train
        val_scenes = splits.val
    elif version == "v1.0-test":
        train_scenes = splits.test
        val_scenes = []
    elif version == "v1.0-mini":
        train_scenes = splits.mini_train
        val_scenes = splits.mini_val
    else:
        raise ValueError("unknown")
    test = "test" in version
    root_path = Path(root_path)
    # filter exist scenes. you may only download part of dataset.
    available_scenes = _get_available_scenes(nusc)
    available_scene_names = [s["name"] for s in available_scenes]
    train_scenes = list(
        filter(lambda x: x in available_scene_names, train_scenes))
    val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes))
    train_scenes = set([
        available_scenes[available_scene_names.index(s)]["token"]
        for s in train_scenes
    ])
    val_scenes = set([
        available_scenes[available_scene_names.index(s)]["token"]
        for s in val_scenes
    ])
    if test:
        print(f"test scene: {len(train_scenes)}")
    else:
        print(
            f"train scene: {len(train_scenes)}, val scene: {len(val_scenes)}")
    train_nusc_infos, val_nusc_infos = _fill_trainval_infos(
        nusc, train_scenes, val_scenes, test)
    if test:
        print(f"test sample: {len(train_nusc_infos)}")
        with open(root_path / "infos_test.pkl", 'wb') as f:
            pickle.dump(train_nusc_infos, f)
    else:
        print(
            f"train sample: {len(train_nusc_infos)}, val sample: {len(val_nusc_infos)}"
        )
        with open(root_path / "infos_train.pkl", 'wb') as f:
            pickle.dump(train_nusc_infos, f)
        with open(root_path / "infos_val.pkl", 'wb') as f:
            pickle.dump(val_nusc_infos, f)
Esempio n. 30
0
def convert(split='val',
            data_path='data/nuscenes/',
            save_path='data/nuscenes/depth_maps'):
    nusc = NuScenes(version=SPLITS[split], dataroot=data_path, verbose=True)
    nusc_exp = NuScenesExplorer(nusc)

    save_dir = os.path.join(save_path, split)
    if not os.path.isdir(save_dir):
        pass

    ret = []

    for sample in nusc.sample:

        sample_token = sample['token']
        print(sample_token, len(ret))

        lidar_token = sample['data'][LidarName]

        for cam_name in CamNames:
            cam_token = sample['data'][cam_name]
            depth_map_path = sample_token + cam_name + '.pts'
            depth_map_path = os.path.join(save_dir, 'depth_data',
                                          depth_map_path)
            img_path = nusc.get_sample_data_path(cam_token)

            data_info = {}
            data_info['sample_token'] = sample_token
            data_info['lidar_token'] = lidar_token
            data_info['depth_path'] = depth_map_path
            data_info['img_path'] = img_path

            ret.append(data_info)
            continue
            points, coloring, im = nusc_exp.map_pointcloud_to_image(
                lidar_token, cam_token)

            float_x_cords = points[0]  # < 1600
            float_y_cords = points[1]  # < 900
            float_depth = coloring  #

            point_with_depth = np.stack(
                [float_x_cords, float_y_cords, float_depth], axis=-1)
            np.save(depth_map_path, point_with_depth)

            #nusc.render_pointcloud_in_image(sample_token, camera_channel='CAM_FRONT', out_path='./render.png', verbose=False)

    meta_file_path = os.path.join(save_dir, 'meta.json')
    with open(meta_file_path, 'w') as f:
        json.dump(ret, f)