コード例 #1
0
def load_keyframe_rad_tokens(nusc: NuScenes) -> (List[str]):
    """
    This method takes a Nuscenes instance and returns two lists with the
    sample_tokens of all RADAR_FRONT sample_data which
    have (almost) the same timestamp as their corresponding sample
    (is_key_frame = True).
    :param nusc: Nuscenes instance
    :return: rad_sd_tokens List of radar sample data tokens
    """


    rad_sd_tokens = []
    for scene_rec in nusc.scene:
        print('Loading samples of scene %s....' % scene_rec['name'], end = '')
        start_sample_rec = nusc.get('sample', scene_rec['first_sample_token'])

        rad_front_sd_rec = nusc.get('sample_data', start_sample_rec['data']['RADAR_FRONT'])

        cur_rad_front_sd_rec = rad_front_sd_rec
        rad_sd_tokens.append(cur_rad_front_sd_rec['token'])

        #Append all keyframe radar sample tokens in list
        while cur_rad_front_sd_rec['next'] != '':
            cur_rad_front_sd_rec = nusc.get('sample_data', cur_rad_front_sd_rec['next'])
            if cur_rad_front_sd_rec['is_key_frame']:
                rad_sd_tokens.append(cur_rad_front_sd_rec['token'])
        print("done!")

    return rad_sd_tokens
コード例 #2
0
def create_tf_record_train_as_val(fn_out, split, vis_results):
    label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map)
    writer = tf.python_io.TFRecordWriter(fn_out)
    params = read_params(FLAGS.param)
    logging.debug('Params: ' + str(params))
    nusc = NuScenes(version='v1.0-trainval', dataroot=FLAGS.nuscenes, verbose=True)
    sensor = 'LIDAR_TOP'
    nu_to_kitti_lidar = Quaternion(axis=(0, 0, 1), angle=np.pi / 2).inverse
    split_logs = create_splits_logs(split, nusc)
    sample_tokens = split_to_samples(nusc, split_logs)
    random.shuffle(sample_tokens)
    print('Number of samples:', len(sample_tokens))

    for sample_token in sample_tokens[1:100]:
        sample = nusc.get('sample', sample_token)
        lidar_top_data = nusc.get('sample_data', sample['data'][sensor])
        if not lidar_top_data['prev']:
            continue
        lidar_top_data_prev = nusc.get('sample_data', lidar_top_data['prev'])
        labels_corners, labels_center, labels_data = compute_labels_image(nusc, sample, sensor,
                                                                          nu_to_kitti_lidar, params)
        filename = os.path.splitext(os.path.splitext(lidar_top_data['filename'])[0])[0]
        filename_prev = os.path.splitext(os.path.splitext(lidar_top_data_prev['filename'])[0])[0]
        tf_example = dict_to_tf_example(labels_corners, labels_center, labels_data, params, label_map_dict,
                                        FLAGS.data, FLAGS.data_beliefs, filename, filename_prev)
        writer.write(tf_example.SerializeToString())
        if (vis_results):
            visualize_results(FLAGS.data, filename, labels_corners, os.path.join(FLAGS.output, 'Debug'))
コード例 #3
0
ファイル: NuScenes.py プロジェクト: AmnonDrory/PointDSC
    def prep_list_of_sessions(self):
        if self.phase in ['train', 'validation']:
            version = 'v1.0-trainval'
        elif self.phase == 'test':
            version = 'v1.0-test'
        self.NuScenes_data = NuScenes(version=version,
                                      dataroot=DATASET_ROOT,
                                      verbose=True)
        self.num_sessions = len(self.NuScenes_data.scene)

        self.cloud_tokens = []
        self.session_lengths = []
        self.session_locations = []
        self.session_names = []

        for session_ind in range(self.num_sessions):

            record = self.NuScenes_data.scene[session_ind]
            session_token = record['token']
            self.session_names.append(record['name'])
            location = self.NuScenes_data.get('log',
                                              record['log_token'])['location']
            self.session_locations.append(location)
            sample_token = record["first_sample_token"]
            sample = self.NuScenes_data.get("sample", sample_token)
            lidar_token = sample["data"]["LIDAR_TOP"]
            cur_lidar_tokens = []
            while len(lidar_token) > 0:
                cur_lidar_tokens.append(lidar_token)
                lidar_data = self.NuScenes_data.get("sample_data", lidar_token)
                lidar_token = lidar_data["next"]
            self.cloud_tokens.append(cur_lidar_tokens)
            self.session_lengths.append(len(cur_lidar_tokens))
コード例 #4
0
def get_pose_intrinsic(
    save_path='/public/MARS/datasets/nuScenes-SF/meta/cam_pose_intrinsic.json'
):

    split = 'train'
    data_path = 'data/nuscenes/'
    nusc = NuScenes(version=SPLITS[split], dataroot=data_path, verbose=True)
    samples = nusc.sample

    cam_token2cam_intext = {}

    for sample in tqdm(samples):
        for cam_name in CamNames:
            cam_token = sample['data'][cam_name]
            cam_data = nusc.get('sample_data', cam_token)
            ego_pose = nusc.get('ego_pose', cam_data['ego_pose_token'])
            cam_cs = nusc.get('calibrated_sensor',
                              cam_data['calibrated_sensor_token'])

            # used to transform from ego to global
            pose_matrix = quat_trans2matrix(ego_pose['rotation'],
                                            ego_pose['translation'])
            # used to transform from cameral to ego
            cam_pose = quat_trans2matrix(cam_cs['rotation'],
                                         cam_cs['translation'])

            cam_pose_world = np.matmul(pose_matrix, cam_pose)

            ret = {'pose': cam_pose_world.tolist()}
            ret['intrinsic'] = cam_cs['camera_intrinsic']

            cam_token2cam_intext[cam_token] = ret

    with open(save_path, 'w') as f:
        json.dump(cam_token2cam_intext, f)
コード例 #5
0
    def __init__(self,
                 nusc_kitti_dir: str = '/home/developer/nuscenes/nusc_kitti',
                 cam_name: str = 'CAM_FRONT',
                 lidar_name: str = 'LIDAR_TOP',
                 image_count: int = 10,
                 nusc_version: str = 'v1.0-mini',
                 split: str = 'mini_train'):
        """
        :param nusc_kitti_dir: Where to write the KITTI-style annotations.
        :param cam_name: Name of the camera to export. Note that only one camera is allowed in KITTI.
        :param lidar_name: Name of the lidar sensor.
        :param image_count: Number of images to convert.
        :param nusc_version: nuScenes version to use.
        :param split: Dataset split to use.
        """
        self.nusc_kitti_dir = os.path.expanduser(nusc_kitti_dir)
        self.cam_name = cam_name
        self.lidar_name = lidar_name
        self.image_count = image_count
        self.nusc_version = nusc_version
        self.split = split

        # Create nusc_kitti_dir.
        if not os.path.isdir(self.nusc_kitti_dir):
            os.makedirs(self.nusc_kitti_dir)

        # Select subset of the data to look at.
        self.nusc = NuScenes(version=nusc_version,
                             dataroot='/home/developer/nuscenes')
コード例 #6
0
def main():
    root_path = '/extssd/jiaxin/nuscenes/test'
    nusc = NuScenes(version='v1.0-test', dataroot=root_path, verbose=True)

    sensor = 'CAM_FRONT'

    counter = 0
    for i, scene in enumerate(nusc.scene):
        scene_token = scene['token']
        scene = nusc.get('scene', scene_token)
        first_sample = nusc.get('sample', scene['first_sample_token'])
        camera = nusc.get('sample_data', first_sample['data'][sensor])

        img = np.array(
            Image.open(os.path.join(nusc.dataroot,
                                    camera['filename'])).convert('L'))
        H, W = img.shape[0], img.shape[1]

        img_mean = np.mean(img.astype(np.float32))

        white_mask = img > 150
        white_area = np.sum(white_mask.astype(np.float32))

        if img_mean < 110 and white_area < (H * W) * 0.1:
            print('\'%s\',' % (scene_token))
            counter += 1
            plt.figure()
            plt.gray()
            plt.imshow(img)
            plt.show()

    print('%d night scenes' % counter)
コード例 #7
0
def get_rad_to_cam(nusc: NuScenes, cam_sd_token: str, rad_sd_token: str):
    """
    Method to get the extrinsic calibration matrix from radar_front to camera_front
    for a specifi sample.
    Every sample_data has a record on which calibrated - sensor the
    data is collected from ("calibrated_sensor_token" key)
    The calibrated_sensor record consists of the definition of a
    particular sensor (lidar/radar/camera) as calibrated on a particular vehicle
    :param nusc: Nuscenes instance
    :param cam_sd_token : A token of a specific camera_front sample_data
    :param rad_sd_token : A token of a specific radar_front sample_data
    :param nuscenes_way : Temporal debug param until transformation order is clear
    :return: rad_to_cam <np.float: 4, 4> Returns homogeneous transform matrix from radar to camera
    """
    cam_cs_token = nusc.get('sample_data',
                            cam_sd_token)["calibrated_sensor_token"]
    cam_cs_rec = nusc.get('calibrated_sensor', cam_cs_token)

    rad_cs_token = nusc.get('sample_data',
                            rad_sd_token)["calibrated_sensor_token"]
    rad_cs_rec = nusc.get('calibrated_sensor', rad_cs_token)

    #Based on how transforms are handled in nuScenes scripts like scripts/export_kitti.py
    rad_to_ego = transform_matrix(rad_cs_rec['translation'],
                                  Quaternion(rad_cs_rec['rotation']),
                                  inverse=False)
    ego_to_cam = transform_matrix(cam_cs_rec['translation'],
                                  Quaternion(cam_cs_rec['rotation']),
                                  inverse=True)
    rad_to_cam = np.dot(ego_to_cam, rad_to_ego)
    return rad_to_cam
コード例 #8
0
def load_keyframe_rad_cam_data(
        nusc: NuScenes) -> (List[str], List[str], List[str]):
    """
    This method takes a Nuscenes instance and returns two lists with the
    sample_tokens of all CAM_FRONT and RADAR_FRONT sample_data which
    have (almost) the same timestamp as their corresponding sample
    (is_key_frame = True). In addition, it returns the sample_names which are set
    equal to the filename of each CAM_FRONT sample_data.
    :param nusc: Nuscenes instance
    :return: (cam_sd_tokens, rad_sd_tokens, sample_names).
    Tuple with lists of camera and radar tokens as well as sample_names
    """

    #Lists to hold tokens of all cam and rad sample_data that have is_key_frame = True
    #These have (almost) the same timestamp as their corresponding sample and
    #correspond to the files in the ..sets/nuscenes/samples/ folder
    cam_sd_tokens = []
    rad_sd_tokens = []
    sample_names = []
    for scene_rec in nusc.scene:
        #scene_name = scene_rec["name"] + "_sample_"
        print('Loading samples of scene %s....' % scene_rec['name'], end='')
        start_sample_rec = nusc.get('sample', scene_rec['first_sample_token'])
        #sample_name = scene_name + str(start_sample_rec["timestamp"])

        cam_front_sd_rec = nusc.get('sample_data',
                                    start_sample_rec['data']['CAM_FRONT'])
        rad_front_sd_rec = nusc.get('sample_data',
                                    start_sample_rec['data']['RADAR_FRONT'])

        cur_cam_front_sd_rec = cam_front_sd_rec
        cur_rad_front_sd_rec = rad_front_sd_rec
        sample_name = cur_cam_front_sd_rec["filename"].replace(
            'samples/CAM_FRONT/', '').replace('.jpg', '')
        #Append the first sample_name, cam and rad sample_data tokens in lists
        sample_names.append(sample_name)
        cam_sd_tokens.append(cur_cam_front_sd_rec['token'])
        rad_sd_tokens.append(cur_rad_front_sd_rec['token'])

        #Append all keyframe sample_names and camera sample tokens in list
        while cur_cam_front_sd_rec['next'] != '':
            cur_cam_front_sd_rec = nusc.get('sample_data',
                                            cur_cam_front_sd_rec['next'])
            sample_name = cur_cam_front_sd_rec["filename"].replace(
                'samples/CAM_FRONT/', '').replace('.jpg', '')
            if cur_cam_front_sd_rec['is_key_frame']:
                sample_names.append(sample_name)
                cam_sd_tokens.append(cur_cam_front_sd_rec['token'])

        #Append all keyframe radar sample tokens in list
        while cur_rad_front_sd_rec['next'] != '':
            cur_rad_front_sd_rec = nusc.get('sample_data',
                                            cur_rad_front_sd_rec['next'])
            if cur_rad_front_sd_rec['is_key_frame']:
                rad_sd_tokens.append(cur_rad_front_sd_rec['token'])
        print("done!")

    assert (len(cam_sd_tokens) == len(rad_sd_tokens) == len(sample_names))

    return (cam_sd_tokens, rad_sd_tokens, sample_names)
コード例 #9
0
    def __init__(self,
                 nuscenes_root,
                 version="v1.0-trainval",
                 max_scenes=None,
                 *,
                 read_radar: bool = True,
                 read_camera: bool = True,
                 read_semantics: bool = True,
                 read_bounding_boxes: bool = True):
        self.nusc = NuScenes(version=version,
                             dataroot=nuscenes_root,
                             verbose=False)
        self.root = pathlib.Path(nuscenes_root)

        # global counter to sanity-check if we calculate the same number of points
        # within boxes as the dataset authors
        self.ns_lidar_pts_to_calculated_diff = 0

        # flags defining the data entries to return from 'read'
        self.read_radar = read_radar
        self.read_camera = read_camera
        self.read_semantics = read_semantics
        self.read_bounding_boxes = read_bounding_boxes

        if self.read_semantics and not hasattr(self.nusc, "lidarseg"):
            raise RuntimeError("Error: nuScenes-lidarseg not installed!")

        # assert that the training targets range from 0 - (|mapping| - 1)
        assert len(set(
            NUSCENES_SEM_CLASSES.values())) == len(NUSCENES_SEM_CLASSES)
        assert all(a == b
                   for a, b in zip(sorted(NUSCENES_SEM_CLASSES.values()),
                                   range(len(NUSCENES_SEM_CLASSES))))

        split_name = {
            "v1.0-trainval": "nuscenes_default",
            "v1.0-mini": "nuscenes_mini",
        }.get(version, "nuscenes_{}".format(version))

        # create split dict
        self.split = {
            "name": split_name,
            "data": {k: []
                     for k in self.scene_split_lists.keys()},
        }
        for i, scene in enumerate(self.nusc.scene):
            if max_scenes is not None and i > max_scenes:
                break
            name = scene["name"]
            for k, v in self.scene_split_lists.items():
                if name in v:
                    split_list = self.split["data"][k]
                    split_list.extend([
                        self.sample_id_template.format(name, i)
                        for i in range(0, scene["nbr_samples"])
                    ])
                    break
            else:
                raise RuntimeError(
                    "Found scene that is not in a split: {}".format(name))
コード例 #10
0
    def __init__(self, root, mode, opt: options.Options):
        super(nuScenesLoader, self).__init__()
        self.root = root
        self.opt = opt
        self.mode = mode

        # farthest point sample
        self.farthest_sampler = FarthestSampler(dim=3)

        # list of (traversal, pc_timestamp, pc_timestamp_idx, traversal_pc_num)
        if mode == 'train':
            self.nuscenes_path = os.path.join(root, 'trainval')
            version = 'v1.0-trainval'
        else:
            self.nuscenes_path = os.path.join(root, 'test')
            version = 'v1.0-test'

        self.dataset = make_nuscenes_dataset(self.nuscenes_path)
        self.nusc = NuScenes(version=version,
                             dataroot=self.nuscenes_path,
                             verbose=True)

        self.camera_name_list = [
            'CAM_FRONT', 'CAM_FRONT_LEFT', 'CAM_FRONT_RIGHT', 'CAM_BACK',
            'CAM_BACK_LEFT', 'CAM_BACK_RIGHT'
        ]
コード例 #11
0
def get_egoposes_on_drivable_ratio(nusc: NuScenes, nusc_map: NuScenesMap,
                                   scene_token: str) -> float:
    """
    Get the ratio of ego poses on the drivable area.
    :param nusc: A NuScenes instance.
    :param nusc_map: The NuScenesMap instance of a particular map location.
    :param scene_token: The token of the current scene.
    :return: The ratio of poses that fall on the driveable area.
    """

    # Go through each sample in the scene.
    sample_tokens = nusc.field2token('sample', 'scene_token', scene_token)
    poses_all = 0
    poses_valid = 0
    for sample_token in sample_tokens:

        # Poses are associated with the sample_data. Here we use the lidar sample_data.
        sample_record = nusc.get('sample', sample_token)
        sample_data_record = nusc.get('sample_data',
                                      sample_record['data']['LIDAR_TOP'])
        pose_record = nusc.get('ego_pose',
                               sample_data_record['ego_pose_token'])

        # Check if the ego pose is on the driveable area.
        ego_pose = pose_record['translation'][:2]
        record = nusc_map.record_on_point(ego_pose[0], ego_pose[1],
                                          'drivable_area')
        if len(record) > 0:
            poses_valid += 1
        poses_all += 1
    ratio_valid = poses_valid / poses_all

    return ratio_valid
コード例 #12
0
    def test_egoposes_on_map(self):
        """ Test that all ego poses land on """
        nusc = NuScenes(version=self.version,
                        dataroot=os.environ['NUSCENES'],
                        verbose=False)
        whitelist = [
            'scene-0499', 'scene-0501', 'scene-0502', 'scene-0515',
            'scene-0517'
        ]

        invalid_scenes = []
        for scene in tqdm.tqdm(nusc.scene, leave=False):
            if scene['name'] in whitelist:
                continue

            log = nusc.get('log', scene['log_token'])
            map_name = log['location']
            nusc_map = self.nusc_maps[map_name]
            ratio_valid = get_egoposes_on_drivable_ratio(
                nusc, nusc_map, scene['token'])
            if ratio_valid != 1.0:
                print(
                    'Error: Scene %s has a ratio of %f ego poses on the driveable area!'
                    % (scene['name'], ratio_valid))
                invalid_scenes.append(scene['name'])

        self.assertEqual(len(invalid_scenes), 0)
コード例 #13
0
def merge_depth_sf(depth_meta_path, 
                sf_meta_path, save_path):
    
    with open(depth_meta_path, 'r') as f:
        depth_meta = json.load(f)

    with open(sf_meta_path, 'r') as f:
        sf_meta = json.load(f)
    
    split = 'train'
    data_path = 'data/nuscenes/'
    nusc = NuScenes(
        version=SPLITS[split], dataroot=data_path, verbose=True)
    
    imgpath2paths = {}
    for depth_info in depth_meta:
        sample_token = depth_info['sample_token']
    
        depth_path = depth_info['depth_path']
        img_path = depth_info['img_path']
        cam_name = img_path.split('/')[-2]

        cam_token = nusc.get('sample', sample_token)['data'][cam_name]
        sf_path = sf_meta[cam_token]['points_path']
        img_path = sf_meta[cam_token]['img_path'] # use this version of img path

        tmp = {'token': cam_token, 'depth_path': depth_path, 
                'cam_name': cam_name, 'sf_path': sf_path, 
                'img_path': img_path}
        imgpath2paths[img_path] = tmp

    with open(save_path, 'w') as f:
        json.dump(imgpath2paths, f)
コード例 #14
0
ファイル: nuscenes_db.py プロジェクト: zyg11/RRPN
    def __init__(self,
                 nusc_root,
                 nusc_version,
                 split,
                 max_cam_sweeps=6,
                 max_lidar_sweeps=10,
                 max_radar_sweeps=6,
                 logging_level="INFO",
                 logger=None,
                 nusc=None):
        """
        Image database object that holds the sample data tokens for the nuscenes
        dataset.
        :param root_path: location of the nuscenes dataset
        :param nusc_version: the version of the dataset to use ('v1.0-trainval', 
                             'v1.0-test', 'v1.0-mini')
        :param max_cam_sweeps: number of sweep tokens to return for each camera
        :param max_lidar_sweeps: number of sweep tokens to return for lidar
        :param max_radar_sweeps: number of sweep tokens to return for each radar
        """

        self.nusc_root = nusc_root
        self.nusc_version = nusc_version
        self.split = split
        self.max_cam_sweeps = max_cam_sweeps
        self.max_lidar_sweeps = max_lidar_sweeps
        self.max_radar_sweeps = max_radar_sweeps
        self.id_length = 8
        self.db = {}

        assert nusc_version in constants.NUSCENES_SPLITS.keys(), \
            "Nuscenes version not valid."
        assert split in constants.NUSCENES_SPLITS[nusc_version], \
            "Nuscenes split ({}) is not valid for {}".format(split, nusc_version)

        if logger is None:
            self.logger = logging.initialize_logger('pynuscenes',
                                                    logging_level)
        else:
            self.logger = logger

        if nusc is not None:
            if self.nusc.version != nusc_version:
                self.logger.info(
                    'Loading nuscenes {} dataset'.format(nusc_version))
                self.nusc = NuScenes(version=nusc_version,
                                     dataroot=self.nusc_root,
                                     verbose=True)
            else:
                self.nusc = nusc
        else:
            self.logger.info(
                'Loading nuscenes {} dataset'.format(nusc_version))
            self.nusc = NuScenes(version=nusc_version,
                                 dataroot=self.nusc_root,
                                 verbose=True)

        self.SENSOR_NAMES = [x['channel'] for x in self.nusc.sensor]
コード例 #15
0
ファイル: nusc_dataset.py プロジェクト: XXXVincent/MonoDepth2
    def __init__(self,
                 data_root,
                 filenames,
                 height,
                 width,
                 frame_idxs,
                 num_scales,
                 version='v1.0-mini',
                 sensor='CAM_FRONT',
                 is_train=False,
                 img_ext='.jpg'):
        super(NuscDataset, self).__init__()

        self.data_path = data_root
        self.data_path = '/share/nuscenes'
        self.filenames = filenames
        self.height = height
        self.width = width
        self.num_scales = num_scales
        self.interp = Image.ANTIALIAS

        self.frame_idxs = frame_idxs

        self.is_train = is_train
        self.img_ext = img_ext

        self.loader = pil_loader
        self.to_tensor = transforms.ToTensor()
        self.nusc = NuScenes(version=version,
                             dataroot=self.data_path,
                             verbose=True)
        self.sensor = sensor
        self.data_root = '/share/nuscenes'
        self.full_res_shape = (1600, 640)

        # We need to specify augmentations differently in newer versions of torchvision.
        # We first try the newer tuple version; if this fails we fall back to scalars
        try:
            self.brightness = (0.8, 1.2)
            self.contrast = (0.8, 1.2)
            self.saturation = (0.8, 1.2)
            self.hue = (-0.1, 0.1)
            transforms.ColorJitter.get_params(self.brightness, self.contrast,
                                              self.saturation, self.hue)
        except TypeError:
            self.brightness = 0.2
            self.contrast = 0.2
            self.saturation = 0.2
            self.hue = 0.1

        self.resize = {}
        for i in range(self.num_scales):
            s = 2**i
            self.resize[i] = transforms.Resize(
                (self.height // s, self.width // s), interpolation=self.interp)

        self.load_depth = self.check_depth()
コード例 #16
0
 def __init__(self):
     self.nusc = NuScenes(version='v1.0-mini',
                          dataroot='../data/sets/nuscenes',
                          verbose=False)
     self.sceneID = 0
     self.scene = self.nusc.scene[self.sceneID]
     self.current_sample = self.nusc.get('sample',
                                         self.scene['first_sample_token'])
     print('Data Reader Initialized')
コード例 #17
0
def tokens_to_data_pairs(nusc: NuScenes, cam_sd_tokens: List[str],
                         rad_sd_tokens: List[str], depth: float,
                         threshold: float) -> list(zip()):
    """
    This method takes a pair of lists with the Camera and Radar sample_data tokens,
    filters the RadarPointCloud for detections closer than the parameter depth,
    loads the actual data in two corresponding lists and returns the zipped lists
    :param nusc: Nuscenes instance
    :param cam_sd_tokens: List with all the camera sample_data tokens
    :param rad_sd_tokens: List with all the radar sample_data tokens
    :param depth: Distance from the radar sensor (x value) above which all detections are omitted
    :param threshold: No pairs of points in the resulted dataset will have distance less than this threshold
    :return: list(zip(np.array, np.array)) List of zipped array lists with the data
    """
    rgb_images_list = []
    for i in range(len(cam_sd_tokens)):
        cam_sd_path = nusc.get_sample_data_path(cam_sd_tokens[i])
        if not os.path.isfile(cam_sd_path):
            continue
        #im = Image.open(cam_sd_path)
        #im = im.resize((IMAGE_WIDTH, IMAGE_HEIGHT), Image.BILINEAR)
        img = cv2.imread(cam_sd_path)
        #Resize with Bilinear interpolation
        img = cv2.resize(img, (IMAGE_WIDTH, IMAGE_HEIGHT))
        img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        rgb_images_list.append(img)

    radar_pcl_list = []
    for i in range(len(rad_sd_tokens)):
        rad_sd_path = nusc.get_sample_data_path(rad_sd_tokens[i])
        if not os.path.isfile(rad_sd_path):
            continue
        #radar_pcl = RadarPointCloud.from_file(rad_sd_path, invalid_states = range(18), dynprop_states = range(18), ambig_states = range(18))
        #radar_pcl = RadarPointCloud.from_file(rad_sd_path, invalid_states = None, dynprop_states = [0,2,6], ambig_states = None)
        radar_pcl = RadarPointCloud.from_file(rad_sd_path)
        # radar_pcl.points.shape is (18, num_points)
        radar_pcl.points, dist_list = filter_pointcloud(
            radar_pcl.points, threshold)
        # radar_pcl.points.shape became (3, num_points)
        #RADNET expects shape (num_points, 4)
        radar_pcl.points = radar_pcl.points.transpose()
        #radar_pcl.points = radar_pcl.points[:, :3]
        #radar_pcl.points = radar_pcl.points[radar_pcl.points[:, 0] < depth]
        radar_pcl.points = np.hstack((radar_pcl.points,
                                      np.ones((radar_pcl.points.shape[0], 1),
                                              dtype=radar_pcl.points.dtype)))

        radar_pcl_list.append(radar_pcl)

    assert (len(rgb_images_list) == len(radar_pcl_list))
    image_radar_pairs = list(zip(rgb_images_list, radar_pcl_list))

    del rgb_images_list
    del radar_pcl_list

    return image_radar_pairs
コード例 #18
0
ファイル: convert_scene.py プロジェクト: xli4217/nuscenes_env
 def __init__(self, config={}):
     self.config = {
         'version': 'v1.0-mini',
         'dataroot': os.path.join(os.environ['PKG_PATH'],'data')
     }
     self.config.update(config)
     
     
     self.nusc = NuScenes(version=self.config['version'], dataroot=self.config['dataroot'], verbose=True)
     self.nusc_can = NuScenesCanBus(dataroot=self.config['dataroot'])
 
     self.utils = Utils(self.nusc, self.nusc_can)
コード例 #19
0
    def __init__(self,
                 data_path,
                 version='v1.0-trainval',
                 split='train',
                 return_ref=False):
        assert version in ['v1.0-trainval', 'v1.0-test', 'v1.0-mini']
        if version == 'v1.0-trainval':
            train_scenes = splits.train
            val_scenes = splits.val
        elif version == 'v1.0-test':
            train_scenes = splits.test
            val_scenes = []
        elif version == 'v1.0-mini':
            train_scenes = splits.mini_train
            val_scenes = splits.mini_val
        else:
            raise NotImplementedError
        self.split = split
        self.data_path = data_path
        self.return_ref = return_ref

        self.nusc = NuScenes(version=version, dataroot=data_path, verbose=True)

        self.map_name_from_general_index_to_segmentation_index = {}
        for index in self.nusc.lidarseg_idx2name_mapping:
            self.map_name_from_general_index_to_segmentation_index[
                index] = map_name_from_segmentation_class_to_segmentation_index[
                    map_name_from_general_to_segmentation_class[
                        self.nusc.lidarseg_idx2name_mapping[index]]]

        available_scenes = get_available_scenes(self.nusc)
        available_scene_names = [s['name'] for s in available_scenes]
        train_scenes = list(
            filter(lambda x: x in available_scene_names, train_scenes))
        val_scenes = list(
            filter(lambda x: x in available_scene_names, val_scenes))
        train_scenes = set([
            available_scenes[available_scene_names.index(s)]['token']
            for s in train_scenes
        ])
        val_scenes = set([
            available_scenes[available_scene_names.index(s)]['token']
            for s in val_scenes
        ])

        self.train_token_list, self.val_token_list = get_path_infos(
            self.nusc, train_scenes, val_scenes)

        print('%s: train scene(%d), val scene(%d)' %
              (version, len(train_scenes), len(val_scenes)))
コード例 #20
0
def convert(split='val',
            data_path='data/nuscenes/',
            save_path='data/nuscenes/depth_maps'):
    nusc = NuScenes(version=SPLITS[split], dataroot=data_path, verbose=True)
    nusc_exp = NuScenesExplorer(nusc)

    save_dir = os.path.join(save_path, split)
    if not os.path.isdir(save_dir):
        pass

    ret = []

    for sample in nusc.sample:

        sample_token = sample['token']
        print(sample_token, len(ret))

        lidar_token = sample['data'][LidarName]

        for cam_name in CamNames:
            cam_token = sample['data'][cam_name]
            depth_map_path = sample_token + cam_name + '.pts'
            depth_map_path = os.path.join(save_dir, 'depth_data',
                                          depth_map_path)
            img_path = nusc.get_sample_data_path(cam_token)

            data_info = {}
            data_info['sample_token'] = sample_token
            data_info['lidar_token'] = lidar_token
            data_info['depth_path'] = depth_map_path
            data_info['img_path'] = img_path

            ret.append(data_info)
            continue
            points, coloring, im = nusc_exp.map_pointcloud_to_image(
                lidar_token, cam_token)

            float_x_cords = points[0]  # < 1600
            float_y_cords = points[1]  # < 900
            float_depth = coloring  #

            point_with_depth = np.stack(
                [float_x_cords, float_y_cords, float_depth], axis=-1)
            np.save(depth_map_path, point_with_depth)

            #nusc.render_pointcloud_in_image(sample_token, camera_channel='CAM_FRONT', out_path='./render.png', verbose=False)

    meta_file_path = os.path.join(save_dir, 'meta.json')
    with open(meta_file_path, 'w') as f:
        json.dump(ret, f)
コード例 #21
0
    def __init__(self,args):
        self.args_dp=args['DataPrepare']
        self.args_vg=args['VoxelGenerator']
        self.cache_path=self.args_dp.data_root+'/'+self.args_dp.cache_name

        if os.path.exists(self.cache_path):
            self._Data_frags=pickle.load(open(self.cache_path, 'rb'))
        else:
            self.nusc = NuScenes(version=self.args_dp.version, dataroot=self.args_dp.data_root, verbose=self.args_dp.verbose)
            self._Data_frags=self.getFragAnnotations()
            pickle.dump(self._Data_frags,open(self.cache_path, 'wb'))

        if True:
            self._Data_frags=[item for scene_data in self._Data_frags for item in scene_data]
コード例 #22
0
def main(dataroot: str,
         version: str,
         output_prefix: str,
         output_format: str = 'kml') -> None:
    """
    Extract the latlon coordinates for each available pose and write the results to a file.
    The file is organized by location and scene_name.
    :param dataroot: Path of the nuScenes dataset.
    :param version: NuScenes version.
    :param output_format: The output file format, kml or json.
    :param output_prefix: Where to save the output file (without the file extension).
    """
    # Init nuScenes.
    nusc = NuScenes(dataroot=dataroot, version=version, verbose=False)

    coordinates_per_location = {}
    print(f'Extracting coordinates...')
    for scene in tqdm(nusc.scene):
        # Retrieve nuScenes poses.
        scene_name = scene['name']
        scene_token = scene['token']
        location = nusc.get('log', scene['log_token'])[
            'location']  # Needed to extract the reference coordinate.
        poses = get_poses(
            nusc, scene_token
        )  # For each pose, we will extract the corresponding coordinate.

        # Compute and store coordinates.
        coordinates = derive_latlon(location, poses)
        if location not in coordinates_per_location:
            coordinates_per_location[location] = {}
        coordinates_per_location[location][scene_name] = coordinates

    # Create output directory if necessary.
    dest_dir = os.path.dirname(output_prefix)
    if dest_dir != '' and not os.path.exists(dest_dir):
        os.makedirs(dest_dir)

    # Write to json.
    output_path = f'{output_prefix}_{version}.{output_format}'
    if output_format == 'json':
        with open(output_path, 'w') as fh:
            json.dump(coordinates_per_location, fh, sort_keys=True, indent=4)
    elif output_format == 'kml':
        # Write to kml.
        export_kml(coordinates_per_location, output_path)
    else:
        raise Exception('Error: Invalid output format: %s' % output_format)

    print(f"Saved the coordinates in {output_path}")
コード例 #23
0
    def __init__(self, data_set_name, data_set_path, output_path=''):
        self.__scan_data = {}
        self.__writers = {}

        if os.path.exists(data_set_path):
            self.__nusc = NuScenes(version=data_set_name,
                                   dataroot=data_set_path,
                                   verbose=True)
        else:
            print('Given path: {}, does not exist'.format(data_set_path))

        self.__output_path = output_path
        if not (self.__output_path == ''):
            if not os.path.exists(self.__output_path):
                os.makedirs(self.__output_path)
コード例 #24
0
    def __init__(self, version, root, transform=None, target_transform=None, *, verbose=True,
                 specific_tokens=None, sensor_modality='camera', sensor='CAM_FRONT', lidar='LIDAR_TOP',
                 pretransform_data=True, preload_data=True, only_annotated=False):
        super(NuScenesDataset, self).__init__(root, transform=transform, target_transform=target_transform)
        self.nusc = NuScenes(version=version, dataroot=root, verbose=verbose)
        self.lidar = lidar
        self.only_annotated = only_annotated
        self.sensor = ""
        self.sensor_modality = ""
        if specific_tokens:
            self.tokens = specific_tokens
        elif sensor:
            self.tokens = self.nusc.field2token(table_name="sample_data", field="channel",
                                                query=sensor)
            self.sensor = sensor
        elif sensor_modality:
            self.tokens = self.nusc.field2token(table_name="sample_data", field="sensor_modality",
                                                query=sensor_modality)
            self.sensor_modality = sensor_modality
        else:
            raise ValueError("Both sensor_modality or sensor parameters cannot be None.")

        if only_annotated:
            tokens = []
            for t in self.tokens:
                sample_data = self.nusc.get("sample_data", t)
                if sample_data["is_key_frame"]:
                    tokens.append(t)
            self.tokens = tokens
        if verbose:
            print("Number of valid sample data tokens: {}".format(len(self.tokens)))

        self.objects = []
        self.images = []
        self.scene_tokens = []
        self.transforms = transforms
        self.pretransform_data = pretransform_data
        self.preload_data = preload_data
        if self.preload_data:
            for t in self.tokens:
                img = Image.open(self.get_filepath(t))
                if self.transform and self.pretransform_data:
                    img = self.transform(img)
                self.images.append(img)
                self.objects = []
        for t in self.tokens:
            # find scene token
            self.scene_tokens.append(self.get_scene_token(t))
コード例 #25
0
def create_nuscenes_info(version, data_path, save_path, max_sweeps=10):
    from nuscenes.nuscenes import NuScenes
    from nuscenes.utils import splits
    from . import nuscenes_utils
    data_path = data_path / version / 'v1.0-trainval_meta'  # change data path
    save_path = save_path / version

    assert version in ['v1.0-trainval', 'v1.0-test', 'v1.0-mini']
    if version == 'v1.0-trainval':
        train_scenes = splits.train
        val_scenes = splits.val
    elif version == 'v1.0-test':
        train_scenes = splits.test
        val_scenes = []
    elif version == 'v1.0-mini':
        train_scenes = splits.mini_train
        val_scenes = splits.mini_val
    else:
        raise NotImplementedError

    nusc = NuScenes(version=version, dataroot=data_path, verbose=True)
    available_scenes = nuscenes_utils.get_available_scenes(nusc)
    available_scene_names = [s['name'] for s in available_scenes]
    train_scenes = list(
        filter(lambda x: x in available_scene_names, train_scenes))
    val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes))
    train_scenes = set([
        available_scenes[available_scene_names.index(s)]['token']
        for s in train_scenes
    ])
    val_scenes = set([
        available_scenes[available_scene_names.index(s)]['token']
        for s in val_scenes
    ])

    print('%s: train scene(%d), val scene(%d)' %
          (version, len(train_scenes), len(val_scenes)))

    train_nusc_infos, val_nusc_infos = nuscenes_utils.fill_trainval_infos(
        data_path=data_path,
        nusc=nusc,
        train_scenes=train_scenes,
        val_scenes=val_scenes,
        test='test' in version,
        max_sweeps=max_sweeps)

    if version == 'v1.0-test':
        print('test sample: %d' % len(train_nusc_infos))
        with open(save_path / f'nuscenes_infos_{max_sweeps}sweeps_test.pkl',
                  'wb') as f:
            pickle.dump(train_nusc_infos, f)
    else:
        print('train sample: %d, val sample: %d' %
              (len(train_nusc_infos), len(val_nusc_infos)))
        with open(save_path / f'nuscenes_infos_{max_sweeps}sweeps_train.pkl',
                  'wb') as f:
            pickle.dump(train_nusc_infos, f)
        with open(save_path / f'nuscenes_infos_{max_sweeps}sweeps_val.pkl',
                  'wb') as f:
            pickle.dump(val_nusc_infos, f)
コード例 #26
0
    def evaluation(self, det_annos, class_names, **kwargs):
        import json
        from nuscenes.nuscenes import NuScenes
        from . import nuscenes_utils
        nusc = NuScenes(version=self.dataset_cfg.VERSION,
                        dataroot=str(self.root_path),
                        verbose=True)
        nusc_annos = nuscenes_utils.transform_det_annos_to_nusc_annos(
            det_annos, nusc)
        nusc_annos['meta'] = {
            'use_camera': False,
            'use_lidar': True,
            'use_radar': False,
            'use_map': False,
            'use_external': False,
        }

        output_path = Path(kwargs['output_path'])
        output_path.mkdir(exist_ok=True, parents=True)
        res_path = str(output_path / 'results_nusc.json')
        with open(res_path, 'w') as f:
            json.dump(nusc_annos, f)

        self.logger.info(
            f'The predictions of NuScenes have been saved to {res_path}')

        if self.dataset_cfg.VERSION == 'v1.0-test':
            return 'No ground-truth annotations for evaluation', {}

        from nuscenes.eval.detection.config import config_factory
        from nuscenes.eval.detection.evaluate import NuScenesEval

        eval_set_map = {
            'v1.0-mini': 'mini_val',
            'v1.0-trainval': 'val',
            'v1.0-test': 'test'
        }
        try:
            eval_version = 'detection_cvpr_2019'
            eval_config = config_factory(eval_version)
        except:
            eval_version = 'cvpr_2019'
            eval_config = config_factory(eval_version)

        nusc_eval = NuScenesEval(
            nusc,
            config=eval_config,
            result_path=res_path,
            eval_set=eval_set_map[self.dataset_cfg.VERSION],
            output_dir=str(output_path),
            verbose=True,
        )
        metrics_summary = nusc_eval.main(plot_examples=0, render_curves=False)

        with open(output_path / 'metrics_summary.json', 'r') as f:
            metrics = json.load(f)

        result_str, result_dict = nuscenes_utils.format_nuscene_results(
            metrics, self.class_names, version=eval_version)
        return result_str, result_dict
コード例 #27
0
def quick_test(dataroot='/data/nuscenes', gpuid=0, nworkers=10):
    """Evaluate detections with PKL.
    """
    nusc = NuScenes(version='v1.0-mini', dataroot=dataroot, verbose=True)
    nusc_maps = get_nusc_maps(dataroot)
    cfg = config_factory('detection_cvpr_2019')
    device = torch.device(f'cuda:{gpuid}') if gpuid >= 0\
        else torch.device('cpu')
    print(f'using device: {device}')

    get_example_submission()

    nusc_eval = DetectionEval(nusc,
                              config=cfg,
                              result_path='./example_submission.json',
                              eval_set='mini_train',
                              output_dir='./res',
                              verbose=True)
    info = calculate_pkl(nusc_eval.gt_boxes,
                         nusc_eval.pred_boxes,
                         nusc_eval.sample_tokens,
                         nusc_eval.nusc,
                         nusc_maps,
                         device,
                         nworkers,
                         bsz=128,
                         plot_kextremes=5,
                         verbose=True)
    print({k: v for k, v in info.items() if k != 'full'})
def export_2d_annotation(root_path, info_path, version, mono3d=True):
    """Export 2d annotation from the info file and raw data.

    Args:
        root_path (str): Root path of the raw data.
        info_path (str): Path of the info file.
        version (str): Dataset version.
        mono3d (bool): Whether to export mono3d annotation. Default: True.
    """
    # get bbox annotations for camera
    camera_types = [
        'CAM_FRONT',
        'CAM_FRONT_RIGHT',
        'CAM_FRONT_LEFT',
        'CAM_BACK',
        'CAM_BACK_LEFT',
        'CAM_BACK_RIGHT',
    ]
    nusc_infos = mmcv.load(info_path)['infos']
    nusc = NuScenes(version=version, dataroot=root_path, verbose=True)
    # info_2d_list = []
    cat2Ids = [
        dict(id=nus_categories.index(cat_name), name=cat_name)
        for cat_name in nus_categories
    ]
    coco_ann_id = 0
    coco_2d_dict = dict(annotations=[], images=[], categories=cat2Ids)
    for info in mmcv.track_iter_progress(nusc_infos):
        for cam in camera_types:
            cam_info = info['cams'][cam]
            coco_infos = get_2d_boxes(nusc,
                                      cam_info['sample_data_token'],
                                      visibilities=['', '1', '2', '3', '4'],
                                      mono3d=mono3d)
            (height, width, _) = mmcv.imread(cam_info['data_path']).shape
            coco_2d_dict['images'].append(
                dict(file_name=cam_info['data_path'].split('data/nuscenes/')
                     [-1],
                     id=cam_info['sample_data_token'],
                     token=info['token'],
                     cam2ego_rotation=cam_info['sensor2ego_rotation'],
                     cam2ego_translation=cam_info['sensor2ego_translation'],
                     ego2global_rotation=info['ego2global_rotation'],
                     ego2global_translation=info['ego2global_translation'],
                     cam_intrinsic=cam_info['cam_intrinsic'],
                     width=width,
                     height=height))
            for coco_info in coco_infos:
                if coco_info is None:
                    continue
                # add an empty key for coco format
                coco_info['segmentation'] = []
                coco_info['id'] = coco_ann_id
                coco_2d_dict['annotations'].append(coco_info)
                coco_ann_id += 1
    if mono3d:
        json_prefix = f'{info_path[:-4]}_mono3d'
    else:
        json_prefix = f'{info_path[:-4]}'
    mmcv.dump(coco_2d_dict, f'{json_prefix}.coco.json')
コード例 #29
0
def factory(dataset, dir_nuscenes):
    """Define dataset type and split training and validation"""

    assert dataset in ['nuscenes', 'nuscenes_mini', 'nuscenes_teaser']
    if dataset == 'nuscenes_mini':
        version = 'v1.0-mini'
    else:
        version = 'v1.0-trainval'

    nusc = NuScenes(version=version, dataroot=dir_nuscenes, verbose=True)
    scenes = nusc.scene

    if dataset == 'nuscenes_teaser':
        with open("splits/nuscenes_teaser_scenes.txt", "r") as file:
            teaser_scenes = file.read().splitlines()
        scenes = [scene for scene in scenes if scene['token'] in teaser_scenes]
        with open("splits/split_nuscenes_teaser.json", "r") as file:
            dic_split = json.load(file)
        split_train = [
            scene['name'] for scene in scenes
            if scene['token'] in dic_split['train']
        ]
        split_val = [
            scene['name'] for scene in scenes
            if scene['token'] in dic_split['val']
        ]
    else:
        split_scenes = splits.create_splits_scenes()
        split_train, split_val = split_scenes['train'], split_scenes['val']

    return nusc, scenes, split_train, split_val
コード例 #30
0
def main():

    nusc = NuScenes(version='v1.0-mini',
                    dataroot='/home/odysseas/thesis/data/sets/nuscenes/',
                    verbose=True)
    #Fix sensor.json file
    sensor = nusc.sensor
    sensor[:] = [
        record for record in sensor if (record['channel'] == "CAM_FRONT") or (
            record['channel'] == "RADAR_FRONT")
    ]

    with open('./sensor.json', 'w') as fout:
        json.dump(sensor, fout, indent=0)

    cam_front_token = sensor[0]["token"]
    radar_front_token = sensor[1]["token"]

    #Fix calibrated_sensor.json file
    calibrated_sensor = nusc.calibrated_sensor
    calibrated_sensor[:] = [
        record for record in calibrated_sensor
        if (record['sensor_token'] == cam_front_token) or (
            record['sensor_token'] == radar_front_token)
    ]

    with open('./calibrated_sensor.json', 'w') as fout:
        json.dump(calibrated_sensor, fout, indent=0)