Exemplo n.º 1
0
def main(result_path, output_dir, eval_set, dataroot, version, verbose,
         config_name, plot_examples):

    # Init.
    cfg = config_factory(config_name)
    nusc_ = NuScenes(version=version, verbose=verbose, dataroot=dataroot)
    nusc_eval = NuScenesEval(nusc_,
                             config=cfg,
                             result_path=result_path,
                             eval_set=eval_set,
                             output_dir=output_dir,
                             verbose=verbose)

    # Visualize samples.
    random.seed(43)
    if plot_examples:
        sample_tokens_ = list(nusc_eval.sample_tokens)
        random.shuffle(sample_tokens_)
        for sample_token_ in sample_tokens_:
            visualize_sample(
                nusc_,
                sample_token_,
                nusc_eval.gt_boxes,
                nusc_eval.pred_boxes,
                eval_range=max(nusc_eval.cfg.class_range.values()),
                savepath=os.path.join(output_dir,
                                      '{}.png'.format(sample_token_)))

    # Run evaluation.
    metrics, md_list = nusc_eval.run()
    nusc_eval.render(md_list, metrics)
Exemplo n.º 2
0
    def main(self,
             plot_examples: int = 0,
             render_curves: bool = True) -> Dict[str, Any]:
        """
        Main function that loads the evaluation code, visualizes samples, runs the evaluation and renders stat plots.
        :param plot_examples: How many example visualizations to write to disk.
        :param render_curves: Whether to render PR and TP curves to disk.
        :return: A dict that stores the high-level metrics and meta data.
        """

        if plot_examples > 0:
            # Select a random but fixed subset to plot.
            random.seed(43)
            sample_tokens = list(self.sample_tokens)
            random.shuffle(sample_tokens)
            sample_tokens = sample_tokens[:plot_examples]

            # Visualize samples.
            example_dir = os.path.join(self.output_dir, 'examples')
            if not os.path.isdir(example_dir):
                os.mkdir(example_dir)
            for sample_token in sample_tokens:
                visualize_sample(
                    self.nusc,
                    sample_token,
                    self.gt_boxes if self.eval_set != 'test' else EvalBoxes(),
                    # Don't render test GT.
                    self.pred_boxes,
                    eval_range=max(self.cfg.class_range.values()),
                    savepath=os.path.join(example_dir,
                                          '{}.png'.format(sample_token)))

        # Run evaluation.
        metrics, metric_data_list = self.evaluate()

        # Render PR and TP curves.
        if render_curves:
            self.render(metrics, metric_data_list)

        # Dump the metric data, meta and metrics to disk.
        if self.verbose:
            print('Saving metrics to: %s' % self.output_dir)
        metrics_summary = metrics.serialize()
        metrics_summary['meta'] = self.meta.copy()
        with open(os.path.join(self.output_dir, 'metrics_summary.json'),
                  'w') as f:
            json.dump(metrics_summary, f, indent=2)
        with open(os.path.join(self.output_dir, 'metrics_details.json'),
                  'w') as f:
            json.dump(metric_data_list.serialize(), f, indent=2)

        # Print high-level metrics.
        print('mAP: %.4f' % (metrics_summary['mean_ap']))
        err_name_mapping = {
            'trans_err': 'mATE',
            'scale_err': 'mASE',
            'orient_err': 'mAOE',
            'vel_err': 'mAVE',
            'attr_err': 'mAAE'
        }
        for tp_name, tp_val in metrics_summary['tp_errors'].items():
            print('%s: %.4f' % (err_name_mapping[tp_name], tp_val))
        print('NDS: %.4f' % (metrics_summary['nd_score']))
        print('Eval time: %.1fs' % metrics_summary['eval_time'])

        return metrics_summary