示例#1
0
            elif type(args[setting]) == type(None):
                x = None
            else:
                x = args[setting]
            config[setting] = x
    eval_config = {
        k: v
        for k, v in config.items() if k in default_eval_config.keys()
    }
    dataset_config = {
        k: v
        for k, v in config.items() if k in default_dataset_config.keys()
    }
    metrics_config = {
        k: v
        for k, v in config.items() if k in default_metrics_config.keys()
    }

    # Run code
    evaluator = trackeval.Evaluator(eval_config)
    dataset_list = [trackeval.datasets.BDD100K2DBox(dataset_config)]
    metrics_list = []
    for metric in [
            trackeval.metrics.HOTA, trackeval.metrics.CLEAR,
            trackeval.metrics.Identity
    ]:
        if metric.get_name() in metrics_config['METRICS']:
            metrics_list.append(metric())
    if len(metrics_list) == 0:
        raise Exception('No metrics selected for evaluation')
    evaluator.evaluate(dataset_list, metrics_list)
示例#2
0
    def evaluate(self,
                 results,
                 metric='track',
                 logger=None,
                 resfile_path=None,
                 bbox_iou_thr=0.5,
                 track_iou_thr=0.5):
        """Evaluation in MOT Challenge.

        Args:
            results (list[list | tuple]): Testing results of the dataset.
            metric (str | list[str]): Metrics to be evaluated. Options are
                'bbox', 'track'. Defaults to 'track'.
            logger (logging.Logger | str | None): Logger used for printing
                related information during evaluation. Default: None.
            resfile_path (str, optional): Path to save the formatted results.
                Defaults to None.
            bbox_iou_thr (float, optional): IoU threshold for detection
                evaluation. Defaults to 0.5.
            track_iou_thr (float, optional): IoU threshold for tracking
                evaluation.. Defaults to 0.5.

        Returns:
            dict[str, float]: MOTChallenge style evaluation metric.
        """
        eval_results = dict()
        if isinstance(metric, list):
            metrics = metric
        elif isinstance(metric, str):
            metrics = [metric]
        else:
            raise TypeError('metric must be a list or a str.')
        allowed_metrics = ['bbox', 'track']
        for metric in metrics:
            if metric not in allowed_metrics:
                raise KeyError(f'metric {metric} is not supported.')

        if 'track' in metrics:
            resfile_path, resfiles, names, tmp_dir = self.format_results(
                results, resfile_path, metrics)
            print_log('Evaluate CLEAR MOT results.', logger=logger)
            distth = 1 - track_iou_thr

            accs = []
            for name in names:
                if 'half-train' in self.ann_file:
                    gt_file = osp.join(self.img_prefix,
                                       f'{name}/gt/gt_half-train.txt')
                elif 'half-val' in self.ann_file:
                    gt_file = osp.join(self.img_prefix,
                                       f'{name}/gt/gt_half-val.txt')
                else:
                    gt_file = osp.join(self.img_prefix, f'{name}/gt/gt.txt')
                res_file = osp.join(resfiles['track'], f'{name}.txt')
                gt = mm.io.loadtxt(gt_file)
                res = mm.io.loadtxt(res_file)
                ini_file = osp.join(self.img_prefix, f'{name}/seqinfo.ini')
                if osp.exists(ini_file) and 'MOT15' not in self.img_prefix:
                    acc, ana = mm.utils.CLEAR_MOT_M(gt,
                                                    res,
                                                    ini_file,
                                                    distth=distth)
                else:
                    acc = mm.utils.compare_to_groundtruth(gt,
                                                          res,
                                                          distth=distth)
                accs.append(acc)

            mh = mm.metrics.create()
            summary = mh.compute_many(accs,
                                      names=names,
                                      metrics=mm.metrics.motchallenge_metrics,
                                      generate_overall=True)

            if trackeval is None:
                raise ImportError(
                    'Please run'
                    'pip install git+https://github.com/JonathonLuiten/TrackEval.git'  # noqa
                    'to manually install trackeval')

            seqmap = osp.join(resfile_path, 'videoseq.txt')
            with open(seqmap, 'w') as f:
                f.write('name\n')
                for name in names:
                    f.write(name + '\n')
                f.close

            eval_config = trackeval.Evaluator.get_default_eval_config()

            # tracker's name is set to 'track',
            # so this word needs to be splited out
            output_folder = resfiles['track'].rsplit(os.sep, 1)[0]

            dataset_config = self.get_dataset_cfg_for_hota(
                output_folder, seqmap)

            evaluator = trackeval.Evaluator(eval_config)
            dataset = [trackeval.datasets.MotChallenge2DBox(dataset_config)]
            hota_metrics = [
                trackeval.metrics.HOTA(dict(METRICS=['HOTA'], THRESHOLD=0.5))
            ]
            output_res, _ = evaluator.evaluate(dataset, hota_metrics)

            # modify HOTA results sequence according to summary list,
            # indexes of summary are sequence names and 'OVERALL'
            # while for hota they are sequence names and 'COMBINED_SEQ'
            seq_list = list(summary.index)
            seq_list.append('COMBINED_SEQ')

            hota = [
                np.average(output_res['MotChallenge2DBox']['track'][seq]
                           ['pedestrian']['HOTA']['HOTA']) for seq in seq_list
                if 'OVERALL' not in seq
            ]

            eval_results.update({
                mm.io.motchallenge_metric_names[k]: v['OVERALL']
                for k, v in summary.to_dict().items()
            })
            eval_results['HOTA'] = hota[-1]

            summary['HOTA'] = hota
            str_summary = mm.io.render_summary(
                summary,
                formatters=mh.formatters,
                namemap=mm.io.motchallenge_metric_names)
            print(str_summary)
            if tmp_dir is not None:
                tmp_dir.cleanup()

        if 'bbox' in metrics:
            if isinstance(results, dict):
                bbox_results = results['det_bboxes']
            elif isinstance(results, list):
                bbox_results = results
            else:
                raise TypeError('results must be a dict or a list.')
            annotations = [self.get_ann_info(info) for info in self.data_infos]
            mean_ap, _ = eval_map(bbox_results,
                                  annotations,
                                  iou_thr=bbox_iou_thr,
                                  dataset=self.CLASSES,
                                  logger=logger)
            eval_results['mAP'] = mean_ap

        for k, v in eval_results.items():
            if isinstance(v, float):
                eval_results[k] = float(f'{(v):.3f}')

        return eval_results