Beispiel #1
0
def eval_main_old(root_path, version, eval_version, res_path, eval_set, output_dir):
    #import pdb; pdb.set_trace()
    nusc = NuScenes(version=version, dataroot=str(root_path), verbose=False)

    cfg = config_factory(eval_version)
    nusc_eval = NuScenesEval(nusc, config=cfg, result_path=res_path, eval_set=eval_set,
                            output_dir=output_dir,
                            verbose=False)
    nusc_eval.main(render_curves=False)
Beispiel #2
0
    def _evaluate_single(self,
                         result_path,
                         logger=None,
                         metric='bbox',
                         result_name='img_bbox'):
        """Evaluation for a single model in nuScenes protocol.

        Args:
            result_path (str): Path of the result file.
            logger (logging.Logger | str | None): Logger used for printing
                related information during evaluation. Default: None.
            metric (str): Metric name used for evaluation. Default: 'bbox'.
            result_name (str): Result name in the metric prefix.
                Default: 'img_bbox'.

        Returns:
            dict: Dictionary of evaluation details.
        """
        from nuscenes import NuScenes
        from nuscenes.eval.detection.evaluate import NuScenesEval

        output_dir = osp.join(*osp.split(result_path)[:-1])
        nusc = NuScenes(version=self.version,
                        dataroot=self.data_root,
                        verbose=False)
        eval_set_map = {
            'v1.0-mini': 'mini_val',
            'v1.0-trainval': 'val',
        }
        nusc_eval = NuScenesEval(nusc,
                                 config=self.eval_detection_configs,
                                 result_path=result_path,
                                 eval_set=eval_set_map[self.version],
                                 output_dir=output_dir,
                                 verbose=False)
        nusc_eval.main(render_curves=True)

        # record metrics
        metrics = mmcv.load(osp.join(output_dir, 'metrics_summary.json'))
        detail = dict()
        metric_prefix = f'{result_name}_NuScenes'
        for name in self.CLASSES:
            for k, v in metrics['label_aps'][name].items():
                val = float('{:.4f}'.format(v))
                detail['{}/{}_AP_dist_{}'.format(metric_prefix, name, k)] = val
            for k, v in metrics['label_tp_errors'][name].items():
                val = float('{:.4f}'.format(v))
                detail['{}/{}_{}'.format(metric_prefix, name, k)] = val
            for k, v in metrics['tp_errors'].items():
                val = float('{:.4f}'.format(v))
                detail['{}/{}'.format(metric_prefix,
                                      self.ErrNameMapping[k])] = val

        detail['{}/NDS'.format(metric_prefix)] = metrics['nd_score']
        detail['{}/mAP'.format(metric_prefix)] = metrics['mean_ap']
        return detail
Beispiel #3
0
    def _evaluate_single(self,
                         result_path,
                         logger=None,
                         metric="bbox",
                         result_name="pts_bbox"):
        """Evaluation for a single model in nuScenes protocol.

        Args:
            result_path (str): Path of the result file.
            logger (logging.Logger | str | None): Logger used for printing
                related information during evaluation. Default: None.
            metric (str): Metric name used for evaluation. Default: 'bbox'.
            result_name (str): Result name in the metric prefix.
                Default: 'pts_bbox'.

        Returns:
            dict: Dictionary of evaluation details.
        """
        from nuscenes import NuScenes
        from nuscenes.eval.detection.evaluate import NuScenesEval

        output_dir = osp.join(*osp.split(result_path)[:-1])
        nusc = NuScenes(version=self.version,
                        dataroot=self.data_root,
                        verbose=False)
        eval_set_map = {
            "v1.0-mini": "mini_val",
            "v1.0-trainval": "val",
        }
        nusc_eval = NuScenesEval(
            nusc,
            config=self.eval_detection_configs,
            result_path=result_path,
            eval_set=eval_set_map[self.version],
            output_dir=output_dir,
            verbose=False,
        )
        nusc_eval.main(render_curves=False)

        # record metrics
        metrics = mmcv.load(osp.join(output_dir, "metrics_summary.json"))
        detail = dict()
        metric_prefix = f"{result_name}_NuScenes"
        for name in self.CLASSES:
            for k, v in metrics["label_aps"][name].items():
                val = float("{:.4f}".format(v))
                detail["{}/{}_AP_dist_{}".format(metric_prefix, name, k)] = val
            for k, v in metrics["label_tp_errors"][name].items():
                val = float("{:.4f}".format(v))
                detail["{}/{}_{}".format(metric_prefix, name, k)] = val

        detail["{}/NDS".format(metric_prefix)] = metrics["nd_score"]
        detail["{}/mAP".format(metric_prefix)] = metrics["mean_ap"]
        return detail
Beispiel #4
0
def eval_main(root_path, version, eval_version, res_path, eval_set,
              output_dir):
    nusc = NuScenes(version=version, dataroot=str(root_path), verbose=False)

    cfg = config_factory(eval_version)
    nusc_eval = NuScenesEval(nusc,
                             config=cfg,
                             result_path=res_path,
                             eval_set=eval_set,
                             output_dir=output_dir,
                             verbose=False)
    nusc_eval.main(render_curves=True, plot_examples=10)
Beispiel #5
0
def execute_official_evaluator(nusc_path,
                               result_path,
                               output_path,
                               task="detection",
                               nusc_version="v1.0-trainval",
                               eval_version="detection_cvpr_2019",
                               verbose=True):
    '''
    Call official evaluator implemented in Nuscenes devkit on evaluation set

    :param result_path: For detection, it's the path to the submission json file; For lidarseg,
        it's the path to the submission zip
    '''
    from nuscenes import NuScenes
    nusc = NuScenes(version=nusc_version, dataroot=nusc_path, verbose=verbose)

    if task == "detection":
        from nuscenes.eval.detection.config import config_factory
        from nuscenes.eval.detection.evaluate import NuScenesEval

        nusc_eval = NuScenesEval(nusc,
                                 config=config_factory(eval_version),
                                 result_path=result_path,
                                 eval_set='val',
                                 output_dir=output_path,
                                 verbose=verbose)
        nusc_eval.main(render_curves=False)
    elif task == "lidarseg":
        from nuscenes.eval.lidarseg.evaluate import LidarSegEval

        tempfolder = tempfile.mkdtemp()
        if verbose:
            print("Extracting submission to", tempfolder)
        with zipfile.ZipFile(result_path, "r") as archive:
            archive.extractall(tempfolder)

        try:
            nusc_eval = LidarSegEval(nusc,
                                     results_folder=tempfolder,
                                     eval_set='val',
                                     verbose=verbose)
            results = nusc_eval.evaluate()
            if verbose:
                print("Results:", results)

            output_path = Path(output_path)
            output_path.mkdir(exist_ok=True, parents=True)
            with open(output_path / "lidarseg_results.json", "w") as fout:
                json.dump(results, fout, indent="  ")
        finally:
            shutil.rmtree(tempfolder)
    else:
        raise ValueError("Unsupported evaluation task!")
Beispiel #6
0
    def evaluation(self, det_annos, class_names, **kwargs):
        import json
        from nuscenes.nuscenes import NuScenes
        from . import nuscenes_utils
        nusc = NuScenes(version=self.dataset_cfg.VERSION,
                        dataroot=str(self.root_path),
                        verbose=True)
        nusc_annos = nuscenes_utils.transform_det_annos_to_nusc_annos(
            det_annos, nusc)
        nusc_annos['meta'] = {
            'use_camera': False,
            'use_lidar': True,
            'use_radar': False,
            'use_map': False,
            'use_external': False,
        }

        output_path = Path(kwargs['output_path'])
        output_path.mkdir(exist_ok=True, parents=True)
        res_path = str(output_path / 'results_nusc.json')
        with open(res_path, 'w') as f:
            json.dump(nusc_annos, f)

        self.logger.info(
            f'The predictions of NuScenes have been saved to {res_path}')

        if self.dataset_cfg.VERSION == 'v1.0-test':
            return 'No ground-truth annotations for evaluation', {}

        from nuscenes.eval.detection.config import config_factory
        from nuscenes.eval.detection.evaluate import NuScenesEval

        eval_set_map = {
            'v1.0-mini': 'mini_val',
            'v1.0-trainval': 'val',
            'v1.0-test': 'test'
        }
        try:
            eval_version = 'detection_cvpr_2019'
            eval_config = config_factory(eval_version)
        except:
            eval_version = 'cvpr_2019'
            eval_config = config_factory(eval_version)

        nusc_eval = NuScenesEval(
            nusc,
            config=eval_config,
            result_path=res_path,
            eval_set=eval_set_map[self.dataset_cfg.VERSION],
            output_dir=str(output_path),
            verbose=True,
        )
        metrics_summary = nusc_eval.main(plot_examples=0, render_curves=False)

        with open(output_path / 'metrics_summary.json', 'r') as f:
            metrics = json.load(f)

        result_str, result_dict = nuscenes_utils.format_nuscene_results(
            metrics, self.class_names, version=eval_version)
        return result_str, result_dict
Beispiel #7
0
    def _evaluate_single(self,
                         result_path,
                         pkl_path=None,
                         logger=None,
                         metric='bbox',
                         result_name='pts_bbox'):
        from nuscenes import NuScenes
        from nuscenes.eval.detection.evaluate import NuScenesEval

        output_dir = osp.join(*osp.split(result_path)[:-1])
        nusc = NuScenes(version=self.version,
                        dataroot=self.data_root,
                        verbose=False)
        eval_set_map = {
            'v1.0-mini': 'mini_val',
            'v1.0-trainval': 'val',
        }
        nusc_eval = NuScenesEval(nusc,
                                 config=self.eval_detection_configs,
                                 result_path=result_path,
                                 eval_set=eval_set_map[self.version],
                                 merge=True,
                                 pkl_path=pkl_path,
                                 output_dir=output_dir,
                                 verbose=True)
        nusc_eval.main(render_curves=False)

        # record metrics
        metrics = mmcv.load(osp.join(output_dir, 'metrics_summary.json'))
        detail = dict()
        metric_prefix = f'{result_name}_NuScenes'
        for name in self.CLASSES:
            for k, v in metrics['label_aps'][name].items():
                val = float('{:.4f}'.format(v))
                detail['{}/{}_AP_dist_{}'.format(metric_prefix, name, k)] = val
            for k, v in metrics['label_tp_errors'][name].items():
                val = float('{:.4f}'.format(v))
                detail['{}/{}_{}'.format(metric_prefix, name, k)] = val

        detail['{}/NDS'.format(metric_prefix)] = metrics['nd_score']
        detail['{}/mAP'.format(metric_prefix)] = metrics['mean_ap']
        return detail
Beispiel #8
0
def eval_main(nusc, eval_version, res_path, eval_set, output_dir):
    # nusc = NuScenes(version=version, dataroot=str(root_path), verbose=True)
    cfg = config_factory(eval_version)

    nusc_eval = NuScenesEval(
        nusc,
        config=cfg,
        result_path=res_path,
        eval_set=eval_set,
        output_dir=output_dir,
        verbose=True,
    )
    metrics_summary = nusc_eval.main(plot_examples=10, )
Beispiel #9
0
def eval(res_path, eval_set, output_dir=None, root_path=None, nusc=None):
    from nuscenes.eval.detection.evaluate import NuScenesEval
    from nuscenes.eval.detection.config import config_factory

    cfg = config_factory("detection_cvpr_2019")

    nusc_eval = NuScenesEval(
        nusc,
        config=cfg,
        result_path=res_path,
        eval_set=eval_set,
        output_dir=output_dir,
        verbose=True,
    )
    metrics_summary = nusc_eval.main(render_curves=False)
Beispiel #10
0
def eval_main(nusc, eval_version, res_path, eval_set, output_dir):
    # nusc = NuScenes(version=version, dataroot=str(root_path), verbose=True)
    cfg = config_factory(eval_version)

    if nusc.version == "v1.0-trainval":
        train_scenes = splits.train
        # random.shuffle(train_scenes)
        # train_scenes = train_scenes[:int(len(train_scenes)*0.2)]
        val_scenes = splits.val
    elif nusc.version == "v1.0-test":
        train_scenes = splits.test
        val_scenes = []
    elif nusc.version == "v1.0-mini":
        train_scenes = splits.mini_train
        val_scenes = splits.mini_val
    else:
        raise ValueError("unknown")

    available_scenes = _get_available_scenes(nusc)
    available_scene_names = [s["name"] for s in available_scenes]
    val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes))
    val_scenes = set([
        available_scenes[available_scene_names.index(s)]["token"]
        for s in val_scenes
    ])

    nusc_eval = NuScenesEval(
        nusc,
        config=cfg,
        result_path=res_path,
        eval_set=eval_set,
        output_dir=output_dir,
        verbose=True,
        val_scenes=val_scenes,
    )
    metrics_summary = nusc_eval.main(plot_examples=10, )
    def _evaluate_single(self,
                         result_path,
                         logger=None,
                         metric='bbox',
                         result_name='pts_bbox'):
        """Evaluation for a single model in nuScenes protocol.

        Args:
            result_path (str): Path of the result file.
            logger (logging.Logger | str | None): Logger used for printing
                related information during evaluation. Default: None.
            metric (str): Metric name used for evaluation. Default: 'bbox'.
            result_name (str): Result name in the metric prefix.
                Default: 'pts_bbox'.

        Returns:
            dict: Dictionary of evaluation details.
        """
        from nuscenes import NuScenes
        from nuscenes.eval.detection.evaluate import NuScenesEval

        output_dir = osp.join(*osp.split(result_path)[:-1])
        # convert attributes to detection names
        # once added bboxes for attr_names, NuScenes will compute wth attributes in the same way as class detections
        nusc = NuScenes(
            version=self.version, dataroot=self.data_root, verbose=False) # ground truth object
        eval_set_map = {
            'v1.0-mini': 'mini_val',
            'v1.0-trainval': 'val',
        }
        # import pdb; pdb.set_trace()
        nusc_eval = NuScenesEval(
            nusc,
            config=self.eval_detection_configs,
            result_path=result_path,
            eval_set=eval_set_map[self.version],
            output_dir=output_dir,
            verbose=False)
        nusc_eval.main(render_curves=False)

        # record metrics
        metrics = mmcv.load(osp.join(output_dir, 'metrics_summary.json'))
        detail = dict()
        metric_prefix = f'{result_name}_NuScenes'
        for name in self.CLASSES:
            for k, v in metrics['label_aps'][name].items():
                val = float('{:.4f}'.format(v))
                detail['{}/{}_AP_dist_{}'.format(metric_prefix, name, k)] = val
            for k, v in metrics['label_tp_errors'][name].items():
                val = float('{:.4f}'.format(v))
                detail['{}/{}_{}'.format(metric_prefix, name, k)] = val

        # TODO: loop through attribute names to get metrics
        # self.ATTR_CLASSES
        # import pdb; pdb.set_trace()

        for attr in self.ATTR_CLASSES:
            for k, v in metrics['label_aps'][attr].items():
                val = float('{:.4f}'.format(v))
                detail['{}/{}_AP_dist_{}'.format(metric_prefix, attr, k)] = val
            for k, v in metrics['label_tp_errors'][attr].items():
                val = float('{:.4f}'.format(v))
                detail['{}/{}_{}'.format(metric_prefix, attr, k)] = val


        # import pdb; pdb.set_trace()
        detail['{}/NDS'.format(metric_prefix)] = metrics['nd_score']
        detail['{}/mAP'.format(metric_prefix)] = metrics['mean_ap']
        return detail
Beispiel #12
0
def main():
    args = parse_args()

    pred_paths = glob.glob(os.path.join(args.ensemble_dir, '*.pkl'))
    print(pred_paths)

    preds = []
    for path in pred_paths:
        preds.append(get_pred(path))

    merged_predictions = {}
    for token in preds[0].keys():
        annos = [pred[token] for pred in preds]

        merged_predictions[token] = concatenate_list(annos)

    predictions = merged_predictions

    print("Finish Merging")

    nusc_annos = {
        "results": {},
        "meta": None,
    }

    for sample_token, prediction in tqdm(predictions.items()):
        annos = []

        # reorganize pred by class
        pred_dicts = reorganize_pred_by_class(prediction)

        for name, pred in pred_dicts.items():
            # in global coordinate
            top_boxes, top_scores = get_sample_data(pred)

            with torch.no_grad():
                top_boxes_tensor = torch.from_numpy(top_boxes)
                boxes_for_nms = top_boxes_tensor[:, [0, 1, 2, 4, 3, 5, -1]]
                boxes_for_nms[:, -1] = boxes_for_nms[:, -1] + np.pi / 2
                top_scores_tensor = torch.from_numpy(top_scores)

                selected = box_torch_ops.rotate_nms(
                    boxes_for_nms,
                    top_scores_tensor,
                    pre_max_size=None,
                    post_max_size=50,
                    iou_threshold=0.2,
                ).numpy()

            pred = [pred[s] for s in selected]

            annos.extend(pred)

        nusc_annos["results"].update({sample_token: annos})

    nusc_annos["meta"] = {
        "use_camera": False,
        "use_lidar": True,
        "use_radar": True,
        "use_map": False,
        "use_external": False,
    }

    res_dir = os.path.join(args.work_dir)
    if not os.path.exists(res_dir):
        os.makedirs(res_dir)

    with open(os.path.join(args.work_dir, 'result.json'), "w") as f:
        json.dump(nusc_annos, f)

    from nuscenes.eval.detection.config import config_factory
    from nuscenes.eval.detection.evaluate import NuScenesEval
    nusc = NuScenes(version="v1.0-trainval",
                    dataroot=args.data_root,
                    verbose=True)
    cfg = config_factory("cvpr_2019")
    nusc_eval = NuScenesEval(
        nusc,
        config=cfg,
        result_path=os.path.join(args.work_dir, 'result.json'),
        eval_set='val',
        output_dir=args.work_dir,
        verbose=True,
    )
    metrics_summary = nusc_eval.main(plot_examples=0, )
        max_recall_ind = non_zero[-1]

    return max_recall_ind


if __name__ == '__main__':
    result_path_ = ''
    output_dir_ = ''
    eval_set_ = 'val'
    dataroot_ = ''
    version_ = 'v1.0-trainval'
    config_path = ''
    plot_examples_ = 0
    render_curves_ = False
    verbose_ = True

    if config_path == '':
        cfg_ = config_factory('cvpr_2019')
    else:
        with open(config_path, 'r') as f:
            cfg_ = DetectionConfig.deserialize(json.load(f))

    nusc_ = NuScenes(version=version_, verbose=verbose_, dataroot=dataroot_)
    nusc_eval = NuScenesEval(nusc_,
                             config=cfg_,
                             result_path=result_path_,
                             eval_set=eval_set_,
                             output_dir=output_dir_,
                             verbose=verbose_)
    nusc_eval.main(plot_examples=plot_examples_, render_curves=render_curves_)