def filter_eval_boxes(nusc: NuScenes, eval_boxes: EvalBoxes, max_dist: Dict[str, float], verbose: bool = False) -> EvalBoxes: """ Applies filtering to boxes. Distance, bike-racks and points per box. :param nusc: An instance of the NuScenes class. :param eval_boxes: An instance of the EvalBoxes class. :param max_dist: Maps the detection name to the eval distance threshold for that class. :param verbose: Whether to print to stdout. """ # Accumulators for number of filtered boxes. total, dist_filter, point_filter, bike_rack_filter = 0, 0, 0, 0 for ind, sample_token in enumerate(eval_boxes.sample_tokens): # Filter on distance first total += len(eval_boxes[sample_token]) eval_boxes.boxes[sample_token] = [box for box in eval_boxes[sample_token] if box.ego_dist < max_dist[box.detection_name]] dist_filter += len(eval_boxes[sample_token]) # Then remove boxes with zero points in them. Eval boxes have -1 points by default. eval_boxes.boxes[sample_token] = [box for box in eval_boxes[sample_token] if not box.num_pts == 0] point_filter += len(eval_boxes[sample_token]) # Perform bike-rack filtering sample_anns = nusc.get('sample', sample_token)['anns'] bikerack_recs = [nusc.get('sample_annotation', ann) for ann in sample_anns if nusc.get('sample_annotation', ann)['category_name'] == 'static_object.bicycle_rack'] bikerack_boxes = [Box(rec['translation'], rec['size'], Quaternion(rec['rotation'])) for rec in bikerack_recs] filtered_boxes = [] for box in eval_boxes[sample_token]: if box.detection_name in ['bicycle', 'motorcycle']: in_a_bikerack = False for bikerack_box in bikerack_boxes: if np.sum(points_in_box(bikerack_box, np.expand_dims(np.array(box.translation), axis=1))) > 0: in_a_bikerack = True if not in_a_bikerack: filtered_boxes.append(box) else: filtered_boxes.append(box) eval_boxes.boxes[sample_token] = filtered_boxes bike_rack_filter += len(eval_boxes.boxes[sample_token]) if verbose: print("=> Original number of boxes: %d" % total) print("=> After distance based filtering: %d" % dist_filter) print("=> After LIDAR points based filtering: %d" % point_filter) print("=> After bike rack filtering: %d" % bike_rack_filter) return eval_boxes
def create_nuscenes_infos(root_path, version="v1.0-trainval", max_sweeps=10): from second.data.nuscenes import NuScenes nusc = NuScenes(version=version, dataroot=root_path, verbose=True) from nuscenes.utils import splits available_vers = ["v1.0-trainval", "v1.0-test", "v1.0-mini"] assert version in available_vers if version == "v1.0-trainval": train_scenes = splits.train val_scenes = splits.val elif version == "v1.0-test": train_scenes = splits.test val_scenes = [] elif version == "v1.0-mini": train_scenes = splits.mini_train val_scenes = splits.mini_val else: raise ValueError("unknown") test = "test" in version root_path = Path(root_path) # filter exist scenes. you may only download part of dataset. available_scenes = _get_available_scenes(nusc) available_scene_names = [s["name"] for s in available_scenes] train_scenes = list( filter(lambda x: x in available_scene_names, train_scenes)) val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes)) train_scenes = set([ available_scenes[available_scene_names.index(s)]["token"] for s in train_scenes ]) val_scenes = set([ available_scenes[available_scene_names.index(s)]["token"] for s in val_scenes ]) if test: print(f"test scene: {len(train_scenes)}") else: print( f"train scene: {len(train_scenes)}, val scene: {len(val_scenes)}") train_nusc_infos, val_nusc_infos = _fill_trainval_infos( nusc, train_scenes, val_scenes, test, max_sweeps=max_sweeps) metadata = { "version": version, } if test: print(f"test sample: {len(train_nusc_infos)}") data = { "infos": train_nusc_infos, "metadata": metadata, } with open(root_path / "infos_test.pkl", 'wb') as f: pickle.dump(data, f) else: print( f"train sample: {len(train_nusc_infos)}, val sample: {len(val_nusc_infos)}" ) data = { "infos": train_nusc_infos, "metadata": metadata, } with open(root_path / "infos_train.pkl", 'wb') as f: pickle.dump(data, f) data["infos"] = val_nusc_infos with open(root_path / "infos_val.pkl", 'wb') as f: pickle.dump(data, f)
help='How many example visualizations to write to disk.') parser.add_argument('--render_curves', type=int, default=1, help='Whether to render PR and TP curves to disk.') parser.add_argument('--verbose', type=int, default=1, help='Whether to print to stdout.') args = parser.parse_args() result_path_ = os.path.expanduser(args.result_path) output_dir_ = os.path.expanduser(args.output_dir) eval_set_ = args.eval_set dataroot_ = args.dataroot version_ = args.version config_name_ = args.config_name plot_examples_ = args.plot_examples render_curves_ = bool(args.render_curves) verbose_ = bool(args.verbose) cfg_ = config_factory(config_name_) nusc_ = NuScenes(version=version_, verbose=verbose_, dataroot=dataroot_) nusc_eval = NuScenesEval(nusc_, config=cfg_, result_path=result_path_, eval_set=eval_set_, output_dir=output_dir_, verbose=verbose_) nusc_eval.main(plot_examples=plot_examples_, render_curves=render_curves_)
def visualize_sample(nusc: NuScenes, sample_token: str, gt_boxes: EvalBoxes, pred_boxes: EvalBoxes, nsweeps: int = 1, conf_th: float = 0.15, eval_range: float = 50, verbose: bool = True, savepath: str = None) -> None: """ Visualizes a sample from BEV with annotations and detection results. :param nusc: NuScenes object. :param sample_token: The nuScenes sample token. :param gt_boxes: Ground truth boxes grouped by sample. :param pred_boxes: Prediction grouped by sample. :param nsweeps: Number of sweeps used for lidar visualization. :param conf_th: The confidence threshold used to filter negatives. :param eval_range: Range in meters beyond which boxes are ignored. :param verbose: Whether to print to stdout. :param savepath: If given, saves the the rendering here instead of displaying. """ # Retrieve sensor & pose records. sample_rec = nusc.get('sample', sample_token) sd_record = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP']) cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token']) pose_record = nusc.get('ego_pose', sd_record['ego_pose_token']) # Get boxes. boxes_gt_global = gt_boxes[sample_token] boxes_est_global = pred_boxes[sample_token] # Map GT boxes to lidar. boxes_gt = boxes_to_sensor(boxes_gt_global, pose_record, cs_record) # Map EST boxes to lidar. boxes_est = boxes_to_sensor(boxes_est_global, pose_record, cs_record) # Add scores to EST boxes. for box_est, box_est_global in zip(boxes_est, boxes_est_global): box_est.score = box_est_global.detection_score # Get point cloud in lidar frame. pc, _ = LidarPointCloud.from_file_multisweep(nusc, sample_rec, 'LIDAR_TOP', 'LIDAR_TOP', nsweeps=nsweeps) # Init axes. _, ax = plt.subplots(1, 1, figsize=(9, 9)) # Show point cloud. points = view_points(pc.points[:3, :], np.eye(4), normalize=False) dists = np.sqrt(np.sum(pc.points[:2, :]**2, axis=0)) colors = np.minimum(1, dists / eval_range) ax.scatter(points[0, :], points[1, :], c=colors, s=0.2) # Show ego vehicle. ax.plot(0, 0, 'x', color='black') # Show GT boxes. for box in boxes_gt: box.render(ax, view=np.eye(4), colors=('g', 'g', 'g'), linewidth=2) # Show EST boxes. for box in boxes_est: # Show only predictions with a high score. assert not np.isnan(box.score), 'Error: Box score cannot be NaN!' if box.score >= conf_th: box.render(ax, view=np.eye(4), colors=('b', 'b', 'b'), linewidth=1) # Limit visible range. axes_limit = eval_range + 3 # Slightly bigger to include boxes that extend beyond the range. ax.set_xlim(-axes_limit, axes_limit) ax.set_ylim(-axes_limit, axes_limit) # Show / save plot. if verbose: print('Rendering sample token %s' % sample_token) plt.title(sample_token) if savepath is not None: plt.savefig(savepath) plt.close() else: plt.show()
if __name__ == '__main__': # Settings. parser = argparse.ArgumentParser( description='Prints out the scenes for each split.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--dataroot', type=str, default='/data/sets/nuscenes', help='Default nuScenes data directory.') parser.add_argument( '--version', type=str, default='v1.0-trainval', help='Which version of the nuScenes dataset to evaluate on, e.g. v1.0.' ) parser.add_argument('--verbose', type=int, default=1, help='Whether to print to stdout.') args = parser.parse_args() dataroot = args.dataroot version = args.version verbose_ = bool(args.verbose) # Init. nusc = NuScenes(version=version, verbose=verbose_, dataroot=dataroot) # Print the scene-level stats. scene_splits = create_splits_scenes(verbose=True)