Esempio n. 1
0
def tao_evaluation(tao_ann_file, anns, results_coco_format):
    from tao.toolkit.tao import TaoEval

    ############################## debugging code to make sure we are using TaoEval correctly
    ############################## we pass the gt ann as predictions
    # annos = anns['annotations']
    # for ann in annos:
    #     ann['score'] = 1
    # import logging
    # logger = logging.getLogger()
    # logger.setLevel(logging.INFO)
    # tao_eval = TaoEval(tao_ann_file, annos)
    # # tao_eval = TaoEval(tao_ann_file, annos[:len(annos)//2])
    # import pdb;pdb.set_trace()
    # tao_eval.run()
    # tao_eval.print_results()
    ############################## end debugging code

    # convert results from coco format to tao format
    global_instance_id = 0
    results_tao_format = []
    for img, results_in_img in zip(anns['images'], results_coco_format):
        img_id = img['id']

        if img['frame_id'] == 0:
            global_instance_id += 10000  # shift it 10000 to restart counting in next video

        for instance_id, result in results_in_img.items():
            instance_id = int(instance_id) + global_instance_id
            result_tao_format = {
                "image_id": img_id,
                "category_id": result['label'] + 1,  # coco labels are 1-based
                "bbox": xyxy2xywh(result['bbox'][:-1]),
                "score": result['bbox'][-1],
                "track_id": instance_id,
                "video_id": img['video_id'],
            }
            results_tao_format.append(result_tao_format)

    import logging
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    tao_eval = TaoEval(tao_ann_file, results_tao_format)
    tao_eval.run()
    tao_eval.print_results()
    results = tao_eval.get_results()
    return results
Esempio n. 2
0
    def evaluate(self,
                 results,
                 metric=['track'],
                 logger=None,
                 resfile_path=None):
        if isinstance(metric, list):
            metrics = metric
        elif isinstance(metric, str):
            metrics = [metric]
        else:
            raise TypeError('metric must be a list or a str.')
        allowed_metrics = ['bbox', 'track']
        for metric in metrics:
            if metric not in allowed_metrics:
                raise KeyError(f'metric {metric} is not supported.')

        result_files, tmp_dir = self.format_results(results, resfile_path)

        eval_results = dict()

        if 'track' in metrics:
            from tao.toolkit.tao import TaoEval
            print_log('Evaluating TAO results...', logger)
            tao_eval = TaoEval(self.ann_file, result_files['track'])
            tao_eval.params.img_ids = self.img_ids
            tao_eval.params.cat_ids = self.cat_ids
            tao_eval.params.iou_thrs = np.array([0.5, 0.75])
            tao_eval.run()

            tao_eval.print_results()
            tao_results = tao_eval.get_results()
            for k, v in tao_results.items():
                if isinstance(k, str) and k.startswith('AP'):
                    key = 'track_{}'.format(k)
                    val = float('{:.3f}'.format(float(v)))
                    eval_results[key] = val

        if 'bbox' in metrics:
            print_log('Evaluating detection results...', logger)
            lvis_gt = LVIS(self.ann_file)
            lvis_dt = LVISResults(lvis_gt, result_files['bbox'])
            lvis_eval = LVISEval(lvis_gt, lvis_dt, 'bbox')
            lvis_eval.params.imgIds = self.img_ids
            lvis_eval.params.catIds = self.cat_ids
            lvis_eval.evaluate()
            lvis_eval.accumulate()
            lvis_eval.summarize()
            lvis_eval.print_results()
            lvis_results = lvis_eval.get_results()
            for k, v in lvis_results.items():
                if k.startswith('AP'):
                    key = '{}_{}'.format('bbox', k)
                    val = float('{:.3f}'.format(float(v)))
                    eval_results[key] = val
            ap_summary = ' '.join([
                '{}:{:.3f}'.format(k, float(v))
                for k, v in lvis_results.items() if k.startswith('AP')
            ])
            eval_results['bbox_mAP_copypaste'] = ap_summary

        if tmp_dir is not None:
            tmp_dir.cleanup()

        return eval_results
Esempio n. 3
0
 def copy_tao_eval(tao_eval):
     eval_copy = TaoEval(tao_eval.tao_gt, tao_eval.tao_dt)
     eval_copy.params = copy.deepcopy(tao_eval.params)
     return eval_copy
Esempio n. 4
0
def evaluate(annotations, predictions, cfg, logger=logging.root):
    """
    Args:
        annotations (str, Path, or dict)
        predictions (str, Path or dict)
        cfg (ConfigNode)
    """
    logger.info(f'Evaluating predictions at path: {predictions}')
    logger.info(f'Using annotations at path: {annotations}')
    verify_config_or_error(cfg)
    if cfg.SUPERCATEGORY_MAP:
        assert not cfg.CATEGORY_AGNOSTIC, (
            '--category-agnostic is not valid if --supercategory-map is '
            'specified.')
        assert not cfg.CATEGORIES, (
            '--categories cannot be specified if --supercategory-map is '
            'specified.')

    if isinstance(annotations, dict):
        tao = annotations
    else:
        with open(annotations, 'r') as f:
            tao = json.load(f)

    # name_to_id = {x['name']: x['id'] for x in tao['categories']}
    merge_categories = Tao._construct_merge_map(tao)
    assert merge_categories

    for ann in tao['annotations'] + tao['tracks']:
        ann['category_id'] = merge_categories.get(ann['category_id'],
                                                  ann['category_id'])
    tao = Tao(tao)
    if cfg.PREDICTIONS_FORMAT == 'json':
        if isinstance(predictions, dict):
            results = predictions
        else:
            with open(predictions, 'r') as f:
                results = json.load(f)
        for x in results:
            x['score'] = float(x['score'])
        if cfg.THRESHOLD >= 0:
            results = [
                x for x in results if x['score'] >= cfg.THRESHOLD
            ]
    elif cfg.PREDICTIONS_FORMAT in ('mat_dir', 'pickle_dir', 'pkl_dir'):
        detection_format = cfg.PREDICTIONS_FORMAT.split('_')[0]
        results = misc.load_detection_dir_as_results(
            predictions,
            tao.dataset,
            score_threshold=cfg.THRESHOLD,
            detections_format=detection_format,
            show_progress=True)

    invalid_images = {
        x['image_id']
        for x in results if x['image_id'] not in tao.imgs
    }
    if invalid_images:
        logger.warning(f'Found invalid image ids: {invalid_images}')
        results = [x for x in results if x['image_id'] not in invalid_images]

    if cfg.CATEGORY_AGNOSTIC:
        for x in results:
            x['category_id'] = 1

    if cfg.SPLIT_CLASS_TRACKS:
        track_id_gen = itertools.count(1)
        unique_track_ids = defaultdict(lambda: next(track_id_gen))
        for x in results:
            x['track_id'] = unique_track_ids[(x['track_id'], x['category_id'])]

    if cfg.SPLIT_TRACKS:
        last_track_id = itertools.count(
            max([x['track_id'] for x in tao.anns.values()]) + 1)
        for x in results:
            x['track_id'] = next(last_track_id)

    for x in results:
        x['category_id'] = merge_categories.get(x['category_id'],
                                                x['category_id'])

    fill_video_ids_inplace(results, tao)

    if cfg.SINGLE_OBJECT.ENABLED:
        update_init_scores_inplace(results, cfg.SINGLE_OBJECT)

    num_updated_tracks = make_track_ids_unique(results)
    if num_updated_tracks:
        logger.info(
            f'Updating {num_updated_tracks} track ids to make them unique.')
    set_track_scores_inplace(results, cfg.TRACK_SCORE_TOP_PERC)

    results = TaoResults(tao, results)
    if cfg.ORACLE.TYPE != 'none':
        results = apply_oracle(tao,
                               results,
                               cfg.ORACLE,
                               cfg.CATEGORY_AGNOSTIC,
                               logger=logger)
    tao_eval = TaoEval(tao, results, iou_3d_type=cfg.IOU_3D_TYPE)
    if cfg.CATEGORY_AGNOSTIC:
        tao_eval.params.use_cats = 0
    if cfg.CATEGORIES:
        if cfg.CATEGORY_AGNOSTIC:
            raise ValueError(
                '--categories and --category-agnostic are mutually exclusive')
        cat_synset_to_id = {x['synset']: x['id'] for x in tao.cats.values()}
        cat_ids = []
        for x in cfg.CATEGORIES:
            if x not in cat_synset_to_id:
                raise ValueError(
                    f'Could not find category synset {x} (specified from '
                    f'--categories)')
            cat_ids.append(cat_synset_to_id[x])
        tao_eval.params.cat_ids = cat_ids

    tao_eval.params.area_rng = [
        x
        for x, l in zip(tao_eval.params.area_rng, tao_eval.params.area_rng_lbl)
        if l in cfg.AREA_RNG
    ]
    tao_eval.params.area_rng_lbl = cfg.AREA_RNG
    tao_eval.params.iou_thrs = cfg.EVAL_IOUS
    tao_eval.run()

    eval_info = {'tao_eval': tao_eval}
    if cfg.MOTA.ENABLED:
        from .evaluation_mota import evaluate_mota
        mota_info = evaluate_mota(tao_eval, cfg, logger)
        eval_info['mota_eval'] = mota_info
    return eval_info