def __init__(self, nusc: NuScenes, config: DetectionConfig, result_path: str, eval_set: str, output_dir: str = None, verbose: bool = True): """ Initialize a NuScenesEval object. :param nusc: A NuScenes object. :param config: A DetectionConfig object. :param result_path: Path of the nuScenes JSON result file. :param eval_set: The dataset split to evaluate on, e.g. train or val. :param output_dir: Folder to save plots and results to. :param verbose: Whether to print to stdout. """ self.nusc = nusc self.result_path = result_path self.eval_set = eval_set self.output_dir = output_dir self.verbose = verbose self.cfg = config # Make dirs self.plot_dir = os.path.join(self.output_dir, 'plots') if not os.path.isdir(self.output_dir): os.makedirs(self.output_dir) if not os.path.isdir(self.plot_dir): os.makedirs(self.plot_dir) # Load data self.pred_boxes = load_prediction(self.result_path, self.cfg.max_boxes_per_sample, verbose=verbose) self.gt_boxes = load_gt(self.nusc, self.eval_set, verbose=verbose) assert set(self.pred_boxes.sample_tokens) == set(self.gt_boxes.sample_tokens), \ "Samples in split doesn't match samples in predictions." # Add center distances self.pred_boxes = add_center_dist(nusc, self.pred_boxes) self.gt_boxes = add_center_dist(nusc, self.gt_boxes) # Filter boxes (distance, points per box, etc.) if verbose: print('=> Filtering predictions') self.pred_boxes = filter_eval_boxes(nusc, self.pred_boxes, self.cfg.class_range, verbose=verbose) if verbose: print('=> Filtering ground truth annotations') self.gt_boxes = filter_eval_boxes(nusc, self.gt_boxes, self.cfg.class_range, verbose=verbose) self.sample_tokens = self.gt_boxes.sample_tokens
def __init__( self, *, data_root=const.DEFAULT_DATA_ROOT, split="train", version="v1.0", full_dataset: bool = False, download=False, sensor_data: List[str] = SENSOR_DATA_KEYS, coordinates="global", ): self.root = os.path.join(data_root, NUSCENES_LOCAL_PATH) if version not in VERSIONS: raise ValueError( f"version provided was {version} but only valid versions are: " f"{VERSIONS}") if full_dataset is False: split = f"mini_{split}" version = f"{version}-mini" else: version = f"{version}-trainval" if download: self.download(version=version) nu = NuScenes(dataroot=self.root, version=version) self.nu = nu self.scenes = nu.scene self.split = split self.sample_tokens = loaders.load_gt(nusc=nu, eval_split=split).sample_tokens for s in sensor_data: if s not in SENSOR_DATA_KEYS: raise ValueError(f"sensor key: {s} is not a valid sensor. " f"Valid sensors are: {SENSOR_DATA_KEYS}") self.data_keys = sensor_data if coordinates not in COORDINATES: raise ValueError(f"coordinates can only be one of {COORDINATES} " f"but {self.coordinates} was given.") self.coordinates = coordinates