Esempio n. 1
0
    def __init__(self, run_path, model_ckpt):
        self.lvis_gt = LVIS(ANNOTATION_PATH)
        self.lvis_dt = LVISResults(self.lvis_gt, PREDICTION_PATH)
        self.run_path = run_path
        self.model_ckpt = model_ckpt

        self._build_coco_to_lvis_map()
        cocoEval = LVISEval(self.lvis_gt, self.lvis_dt, 'segm')
        self.freq_groups = cocoEval._prepare_freq_group()
        config_path = os.path.join(self.run_path, 'config_lvis.yaml')
        self.config = yaml.load(open(config_path, "r"), Loader=yaml.FullLoader)
Esempio n. 2
0
    def evaluate_class_agnostic(self):
        """ Treat all masks as one category.
        """
        lvis_dt = self.lvis_dt
        lvis_gt = self.lvis_gt
        feats_ann = self.feats_ann
        cluster_to_coco = self.cluster_to_coco

        # by default, none of the predictions gets evaluated.
        for _, dt in lvis_dt.anns.items(): dt['category_id'] = -2
        for _, dt in lvis_gt.anns.items(): dt['category_id'] = -2

        print('Updating category ids')
        for i in tqdm(range(len(feats_ann))):
            cluster_id = self.clusters[i]
            ann_id = int(feats_ann[i])
            if cluster_id in cluster_to_coco:
                lvis_dt.anns[ann_id]['category_id'] = -1  # the assigned ones are included in the eval

        cocoEval = LVISEval(lvis_gt, lvis_dt, 'segm')
        cocoEval.params.catIds = [-1]  # only evaluate on the category -1.
        cocoEval.params.useCats = 0
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()
Esempio n. 3
0
def do_lvis_evaluation(
    dataset,
    gt_path,
    predictions,
    box_only,
    output_folder,
    iou_types,
    iteration,
):
    logger = logging.getLogger("maskrcnn_benchmark.inference")

    if box_only:
        logger.info("Evaluating bbox proposals")
        areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
        res = COCOResults("box_proposal")
        for limit in [100, 1000]:
            for area, suffix in areas.items():
                stats = evaluate_box_proposals(
                    predictions, dataset, area=area, limit=limit
                )
                key = "AR{}@{:d}".format(suffix, limit)
                res.results["box_proposal"][key] = stats["ar"].item()
        logger.info(res)
        if output_folder:
            torch.save(res, os.path.join(output_folder, "box_proposals.pth"))
        return

    logger.info("Preparing results for LVIS format")
    lvis_results = prepare_for_lvis_evaluation(predictions, dataset, iou_types)
    if len(lvis_results) == 0:
        return {}

    dt_path = os.path.join(output_folder, "lvis_dt.json")
    import json
    with open(dt_path, "w") as f:
        json.dump(lvis_results, f)

    logger.info("Evaluating predictions")
    lvis_eval_info = {}
    for iou_type in iou_types:
        lvis_eval = LVISEval(
            gt_path, dt_path, iou_type
        )
        lvis_eval.run()
        print(iou_type)
        lvis_eval.print_results()
        keys = lvis_eval.get_results().keys()
        for k in keys:
            lvis_eval_info[iou_type + k] = lvis_eval.get_results()[k]

        save_path = os.path.join(output_folder, str(iteration))
        mkdir(save_path)
        lvis_eval_percat = LVISEvalPerCat(
            gt_path, dt_path, iou_type, save_path)
        lvis_eval_percat.run()
        lvis_eval_percat.print_results()
    return lvis_eval_info
def _evaluate_predictions_on_lvis(
    lvis_gt, lvis_results, iou_type, class_names=None):
    """
    Args:
        iou_type (str):
        class_names (None or list[str]): if provided, will use it to predict
            per-category AP.

    Returns:
        a dict of {metric name: score}
    """
    metrics = ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"]

    logger = logging.getLogger(__name__)

    if len(lvis_results) == 0:  # TODO: check if needed
        logger.warn("No predictions from the model! Set scores to -1")
        return {metric: -1 for metric in metrics}

    from lvis import LVISEval, LVISResults

    lvis_results = LVISResults(lvis_gt, lvis_results)
    lvis_eval = LVISEval(lvis_gt, lvis_results, iou_type)
    lvis_eval.run()
    lvis_eval.print_results()

    # Pull the standard metrics from the LVIS results
    results = lvis_eval.get_results()
    results = {metric: float(results[metric] * 100) for metric in metrics}
    logger.info(
        "Evaluation results for {}: \n".format(iou_type) + \
            create_small_table(results)
    )
    return results
Esempio n. 5
0
    def evaluate(self, max_dets_per_image=None):
        all_preds, main_process = self.synchronize_between_processes()
        if main_process:
            if max_dets_per_image is None:
                max_dets_per_image = 300

            eval_imgs = [lvis_res['image_id'] for lvis_res in all_preds]

            gt_subset = LvisEvaluator._make_lvis_subset(
                self.lvis_gt, eval_imgs)

            for iou_type in self.iou_types:
                print('Evaluating for iou', iou_type)
                if iou_type == "segm":
                    # See:
                    # https://detectron2.readthedocs.io/en/latest/_modules/detectron2/evaluation/lvis_evaluation.html
                    lvis_results = copy.deepcopy(all_preds)
                    for c in lvis_results:
                        c.pop("bbox", None)
                else:
                    lvis_results = all_preds

                lvis_results = LVISResults(gt_subset,
                                           lvis_results,
                                           max_dets=max_dets_per_image)
                lvis_eval = LVISEval(gt_subset, lvis_results, iou_type)
                lvis_eval.params.img_ids = list(set(eval_imgs))
                lvis_eval.run()
                self.lvis_eval_per_iou[iou_type] = lvis_eval
        else:
            self.lvis_eval_per_iou = None

        if dist.is_initialized():
            dist.barrier()

        result_dict = None
        if self.lvis_eval_per_iou is not None:
            result_dict = dict()
            for iou, eval_data in self.lvis_eval_per_iou.items():
                result_dict[iou] = dict()
                for key in eval_data.results:
                    value = eval_data.results[key]
                    result_dict[iou][key] = value

        return result_dict
Esempio n. 6
0
def _evaluate_predictions_on_lvis(lvis_gt,
                                  lvis_results,
                                  iou_type,
                                  class_names=None):
    metrics = {
        "bbox":
        ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"],
        "segm":
        ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"],
    }[iou_type]

    logger = logging.getLogger(__name__)

    if len(lvis_results) == 0:
        logger.warn("No predictions from the model!")
        return {metric: float("nan") for metric in metrics}

    if iou_type == "segm":
        lvis_results = copy.deepcopy(lvis_results)
        for c in lvis_results:
            c.pop("bbox", None)

    from lvis import LVISEval, LVISResults

    lvis_results = LVISResults(lvis_gt, lvis_results)
    lvis_eval = LVISEval(lvis_gt, lvis_results, iou_type)
    lvis_eval.run()
    lvis_eval.print_results()

    results = lvis_eval.get_results()
    results = {metric: float(results[metric] * 100) for metric in metrics}
    logger.info("Evaluation results for {}: \n".format(iou_type) +
                create_small_table(results))
    return results
Esempio n. 7
0
def _evaluate_predictions_on_lvis(
    lvis_gt, lvis_results, iou_type, max_dets=None, class_names=None
):
    """
    Copied from detectron2.evaluation.lvis_evaluation, with support for max_dets.

    Args:
        iou_type (str):
        kpt_oks_sigmas (list[float]):
        max_dets (None or int)
        class_names (None or list[str]): if provided, will use it to predict
            per-category AP.

    Returns:
        a dict of {metric name: score}
    """
    metrics = {
        "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"],
        "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"],
    }[iou_type]

    logger = logging.getLogger(__name__)

    if len(lvis_results) == 0:  # TODO: check if needed
        logger.warn("No predictions from the model!")
        return {metric: float("nan") for metric in metrics}

    if iou_type == "segm":
        lvis_results = copy.deepcopy(lvis_results)
        # When evaluating mask AP, if the results contain bbox, LVIS API will
        # use the box area as the area of the instance, instead of the mask area.
        # This leads to a different definition of small/medium/large.
        # We remove the bbox field to let mask AP use mask area.
        for c in lvis_results:
            c.pop("bbox", None)

    from lvis import LVISEval, LVISResults

    #####
    # <modified>
    if max_dets is None:
        max_dets = 300

    lvis_results_obj = LVISResults(lvis_gt, lvis_results, max_dets=max_dets)
    lvis_eval = LVISEval(lvis_gt, lvis_results_obj, iou_type)
    lvis_eval.params.max_dets = max_dets
    # </modified>
    #####
    lvis_eval.run()
    lvis_eval.print_results()

    # Pull the standard metrics from the LVIS results
    results = lvis_eval.get_results()
    results = {metric: float(results[metric] * 100) for metric in metrics}
    logger.info(
        f"Evaluation results for {iou_type}, max_dets {max_dets} \n"
        + create_small_table(results)
    )
    return results
Esempio n. 8
0
def _evaluate_predictions_on_lvis(lvis_gt,
                                  lvis_results,
                                  iou_type,
                                  max_dets_per_image=None,
                                  class_names=None):
    """
    Args:
        iou_type (str):
        max_dets_per_image (None or int): limit on maximum detections per image in evaluating AP
            This limit, by default of the LVIS dataset, is 300.
        class_names (None or list[str]): if provided, will use it to predict
            per-category AP.

    Returns:
        a dict of {metric name: score}
    """
    metrics = {
        "bbox":
        ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"],
        "segm":
        ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"],
    }[iou_type]

    logger = logging.getLogger(__name__)

    if len(lvis_results) == 0:  # TODO: check if needed
        logger.warn("No predictions from the model!")
        return {metric: float("nan") for metric in metrics}

    if iou_type == "segm":
        lvis_results = copy.deepcopy(lvis_results)
        # When evaluating mask AP, if the results contain bbox, LVIS API will
        # use the box area as the area of the instance, instead of the mask area.
        # This leads to a different definition of small/medium/large.
        # We remove the bbox field to let mask AP use mask area.
        for c in lvis_results:
            c.pop("bbox", None)

    if max_dets_per_image is None:
        max_dets_per_image = 300  # Default for LVIS dataset

    from lvis import LVISEval, LVISResults

    logger.info(
        f"Evaluating with max detections per image = {max_dets_per_image}")
    lvis_results = LVISResults(lvis_gt,
                               lvis_results,
                               max_dets=max_dets_per_image)
    lvis_eval = LVISEval(lvis_gt, lvis_results, iou_type)
    lvis_eval.run()
    lvis_eval.print_results()

    # Pull the standard metrics from the LVIS results
    results = lvis_eval.get_results()
    results = {metric: float(results[metric] * 100) for metric in metrics}
    logger.info("Evaluation results for {}: \n".format(iou_type) +
                create_small_table(results))
    return results
Esempio n. 9
0
    def __init__(self):
        self.lvis = LVIS('/scratch/users/zzweng/datasets/lvis/lvis_v0.5_val.json')
        self.dt_path = 'output/inference/lvis_instances_results.json'
        self.lvis_dt = LVISResults(self.lvis, self.dt_path)
        
        coco_map = json.load(open('lvis-api/data/coco_to_synset.json'))
        synset_to_lvis = {cat['synset']: cat['id'] for cat in self.lvis.cats.values()}
        synset_to_lvis['oven.n.01'] = synset_to_lvis['toaster_oven.n.01']
        synset_to_lvis['frank.n.02'] = synset_to_lvis['sausage.n.01']

        coco_to_lvis = {}
        lvis_to_coco = {}
        for item in coco_map.values():
            coco_id, lvis_id = item['coco_cat_id'], synset_to_lvis[item['synset']]
            coco_to_lvis[coco_id] = lvis_id
            lvis_to_coco[lvis_id] = coco_id
        self.coco_to_lvis = coco_to_lvis
        self.lvis_to_coco = lvis_to_coco
        cocoEval = LVISEval(self.lvis, self.lvis_dt,'segm')
        self.freq_groups = cocoEval._prepare_freq_group()
Esempio n. 10
0
def test_gt_boxes_as_anns(ann, ann_path, ann_type='bbox'):

    annotations = ann['annotations']
    template_pre = {
        'image_id': 0,
        'category_id': 0,
        'bbox': [0., 0., 0., 0.],
        'score': 1.
    }

    gt_to_pre = []
    for idx, annotation in enumerate(annotations):

        if idx % 10 == 0:
            print("{}/{}".format(idx, len(annotations)), end="\r")
        pre = copy.deepcopy(template_pre)

        pre['image_id'] = annotation['image_id']
        pre['category_id'] = annotation['category_id']
        pre['bbox'] = annotation['bbox']
        pre['score'] = 1.0

        gt_to_pre.append(pre)

    PRE_OUT_PATH = "./data/lvis_gt_pred.json"
    with open(PRE_OUT_PATH, "w") as f:
        pre = json.dump(gt_to_pre, f)
    print("Stored GT to pred JSON.\n")

    lvis_eval = LVISEval(ann_path, PRE_OUT_PATH, ann_type)
    print("Constructed lvis_eval object.")
    lvis_eval.run()
    print("Finished lvis_eval.run()")
    lvis_eval.print_results()
Esempio n. 11
0
    def evaluate(self):
#         self.reload_annotation()
        cluster_to_coco = self.cluster_to_coco
        coco_clusters = self.coco_clusters
        cocoDt = self.lvis_dt
        clusters = self.clusters
        feats_ann = self.feats_ann
        
        # by default everything is -1.
        for _, dt in cocoDt.anns.items(): dt['category_id'] = -1
        print('Updating category ids')
        for i in tqdm(range(len(feats_ann))):
            ann_id = int(feats_ann[i])
            cluster_id = clusters[i]
            if cluster_id in cluster_to_coco:
                cocoDt.anns[ann_id]['category_id'] = cluster_to_coco[cluster_id][0]
#                 print('assigned ', cluster_to_coco[cluster_id][0])
                
        print('Finally, evaluate!!')
        
        self.cocoEval = LVISEval(self.lvis, cocoDt,'segm')
        img_ids = cocoDt.get_img_ids()[:100]
#         cocoEval.params.catIds = [1, 2, 3, 4]# 5, 6, 7, 8, 9, 10, 11, 13, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 33, 34, 35, 37, 40, 41, 42, 43, 44, 46, 47, 49, 50, 51, 52, 54, 56, 57, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 77, 78, 79, 81, 82, 84, 85, 86, 87, 88, 90]
#         cocoEval.params.imgIds = img_ids
#         cocoEval.params.iouThrs = np.linspace(.25, 0.95, int(np.round((0.95 - .25) / .05)) + 1, endpoint=True)

        self.cocoEval.lvis_gt.cats[-1] = {'frequency': 'f',
          'id': -1,
          'synset': 'all',
          'image_count': 0,
          'instance_count': 0,
          'synonyms': ['all'],
          'def': 'nut from an oak tree',
          'name': 'all'}
        import pdb
        pdb.set_trace()
        self.cocoEval.evaluate()
        self.cocoEval.accumulate()
        self.cocoEval.summarize()
Esempio n. 12
0
def evaluate_predictions_on_lvis(coco_results, result_path, annotation_path, iou_type):
    import json

    with open(result_path, "w") as f:
        json.dump(coco_results, f)

    from lvis import LVIS, LVISEval
    lvis_eval = LVISEval(annotation_path, result_path, iou_type)
    lvis_eval.run()
    lvis_eval.print_results()
    return lvis_eval
Esempio n. 13
0
def eval_partial_results(epoch, dset_name, validation_path):
    results = []
    mAP = -1
    directory = 'bbox_results/temp_res'
    for filename in os.listdir(directory):
        if filename.endswith(".json"):
            temp_name = os.path.join(directory, filename)
            with open(temp_name, 'rb') as f:
                results = list(itertools.chain(results, pickle.load(f)))

    cwd = os.getenv('owd')
    validation_path = os.path.join(cwd, validation_path)

    if not os.path.exists(f'bbox_results/{dset_name}/'):
        os.makedirs(f'bbox_results/{dset_name}/')

    json.dump(results,
              open(f'./bbox_results/{dset_name}/results_{epoch}.json', 'w'),
              indent=4)
    resFile = f'./bbox_results/{dset_name}/results_{epoch}.json'

    if (dset_name == 'coco') | (dset_name == 'drones'):
        cocoGt = COCO(validation_path)
        try:
            cocoDt = cocoGt.loadRes(resFile)
        except IndexError:
            print('empty list return zero map')
            return 0
        cocoDt.loadAnns()

        #  running evaluation
        cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()

        mAP = cocoEval.stats[0]

    elif (dset_name == 'lvis'):

        lvis_eval = LVISEval(validation_path, resFile, 'bbox')
        lvis_eval.run()
        metrics = lvis_eval.get_results()
        lvis_eval.print_results()
        mAP = metrics['AP']

    return (mAP)
Esempio n. 14
0
def lvis_eval(result_files,
              result_types,
              lvis,
              ann_file,
              max_dets=(100, 300, 1000),
              existing_json=None):
    ANNOTATION_PATH = ann_file
    print('gt: ', ANNOTATION_PATH)
    for res_type in result_types:
        assert res_type in [
            'proposal', 'proposal_fast', 'proposal_fast_percat', 'bbox',
            'segm', 'keypoints'
        ]

    if mmcv.is_str(lvis):
        lvis = LVIS(lvis)
    assert isinstance(lvis, LVIS)

    if result_types == ['proposal_fast']:
        ar = lvis_fast_eval_recall(result_files, lvis, np.array(max_dets))
        for i, num in enumerate(max_dets):
            print('AR@{}\t= {:.4f}'.format(num, ar[i]))
        return

    elif result_types == ['proposal_fast_percat']:
        assert existing_json is not None
        per_cat_recall = {}
        for cat_id in range(1, 1231):
            ar = lvis_fast_eval_recall(result_files,
                                       lvis,
                                       np.array(max_dets),
                                       category_id=cat_id)
            for i, num in enumerate(max_dets):
                per_cat_recall.update({cat_id: ar})
                print('cat{} AR@{}\t= {:.4f}'.format(cat_id, num, ar[i]))
        pickle.dump(per_cat_recall,
                    open('./{}_per_cat_recall.pt'.format(existing_json), 'wb'))
        return
    for res_type in result_types:
        result_file = result_files[res_type]
        assert result_file.endswith('.json')

        iou_type = 'bbox' if res_type == 'proposal' else res_type
        lvisEval = LVISEval(ANNOTATION_PATH, result_file, iou_type)
        # lvisEval.params.imgIds = img_ids
        if res_type == 'proposal':
            lvisEval.params.use_cats = 0
            lvisEval.params.max_dets = list(max_dets)

        lvisEval.run()
        lvisEval.print_results()
Esempio n. 15
0
def eval_results(results, dset_name, validation_path):

    cwd = os.getenv('owd')
    validation_path = os.path.join(cwd, validation_path)

    if not os.path.exists(f'bbox_results/{dset_name}/'):
        os.makedirs(f'bbox_results/{dset_name}/')

    rid = (random.randint(0, 1000000))
    json.dump(results,
              open(f'./bbox_results/{dset_name}/results_{rid}.json', 'w'),
              indent=4)
    resFile = f'./bbox_results/{dset_name}/results_{rid}.json'

    if (dset_name == 'coco') | (dset_name == 'drones'):
        cocoGt = COCO(validation_path)
        try:
            cocoDt = cocoGt.loadRes(resFile)
        except IndexError:
            print('empty list return zero map')
            return 0
        cocoDt.loadAnns()

        #  running evaluation
        cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()

        mAP = cocoEval.stats[0]

    elif (dset_name == 'lvis'):
        try:
            lvis_eval = LVISEval(validation_path, resFile, 'bbox')
        except IndexError:
            print('empty list return zero map')
            return 0
        lvis_eval.run()
        metrics = lvis_eval.get_results()
        lvis_eval.print_results()
        mAP = metrics['AP']

    os.remove(resFile)

    return (mAP)
Esempio n. 16
0
def do_lvis_evaluation(
    dataset,
    gt_path,
    predictions,
    output_folder,
    iou_types,
    iteration,
):
    logger = logging.getLogger("maskrcnn_benchmark.inference")

    logger.info("Preparing results for LVIS format")
    lvis_results = prepare_for_lvis_evaluation(predictions, dataset, iou_types)
    if len(lvis_results) == 0:
        return {}

    dt_path = os.path.join(output_folder, "lvis_dt.json")
    import json
    with open(dt_path, "w") as f:
        json.dump(lvis_results, f)

    logger.info("Evaluating predictions")
    lvis_eval_info = {}
    for iou_type in iou_types:
        lvis_eval = LVISEval(gt_path, dt_path, iou_type)
        lvis_eval.run()
        print(iou_type)
        lvis_eval.print_results()
        keys = lvis_eval.get_results().keys()
        for k in keys:
            lvis_eval_info[iou_type + k] = lvis_eval.get_results()[k]

        save_path = os.path.join(output_folder, str(iteration))
        mkdir(save_path)
        lvis_eval_percat = LVISEvalPerCat(gt_path, dt_path, iou_type,
                                          save_path)
        lvis_eval_percat.run()
        lvis_eval_percat.print_results()
    return lvis_eval_info
Esempio n. 17
0
def lvis_eval(resfile, res_type):

    print('*********evaluating *{}*'.format(res_type))
    # tmp_file = osp.join(runner.work_dir, 'temp_0')
    # result_files = results2json(self.dataset, results, tmp_file)
    ANNOTATION_PATH = 'data/lvis/annotations/lvis_v0.5_val.json'
    # cocoGt = self.dataset.coco
    # imgIds = cocoGt.getImgIds()
    # for res_type in res_types:
    #     # try:
    #     #     cocoDt = cocoGt.loadRes(result_files[res_type])
    #     # except IndexError:
    #     #     print('No prediction found.')
    #     #     break
    iou_type = res_type
    lvis_eval = LVISEval(ANNOTATION_PATH, resfile, iou_type)
    lvis_eval.run()
    lvis_eval.print_results()
Esempio n. 18
0
def _evaluate_predictions_on_lvis(lvis_gt, lvis_results, iou_type):
    """
    Evaluate the lvis results using LVISEval API.
    """
    assert len(lvis_results) > 0

    if iou_type == "segm":
        lvis_results = copy.deepcopy(lvis_results)
        # When evaluating mask AP, if the results contain bbox, LVIS API will
        # use the box area as the area of the instance, instead of the mask area.
        # This leads to a different definition of small/medium/large.
        # We remove the bbox field to let mask AP use mask area.
        for c in lvis_results:
            c.pop("bbox", None)

    lvis_results = LVISResults(lvis_gt, lvis_results)
    lvis_eval = LVISEval(lvis_gt, lvis_results, iou_type)
    lvis_eval.run()
    lvis_eval.print_results()
    return lvis_eval
Esempio n. 19
0
    def evaluate(self, runner, results):
        tmp_file = osp.join(runner.work_dir, 'temp_0')
        result_files = results2json(self.dataset, results, tmp_file)

        res_types = ['bbox', 'segm'
                     ] if runner.model.module.with_mask else ['bbox']
        ANNOTATION_PATH = 'data/lvis/annotations/lvis_v0.5_val.json'
        # cocoGt = self.dataset.coco
        # imgIds = cocoGt.getImgIds()
        for res_type in res_types:
            # try:
            #     cocoDt = cocoGt.loadRes(result_files[res_type])
            # except IndexError:
            #     print('No prediction found.')
            #     break
            iou_type = res_type

            lvis_eval = LVISEval(ANNOTATION_PATH, result_files[res_type],
                                 iou_type)
            lvis_eval.run()
            lvis_eval.print_results()
Esempio n. 20
0
def main():

    config = fetch_config()
    print_args_stdout(config)
    ipdb.set_trace()

    print("Running eval.")
    lvis_eval = LVISEval(config.ann_path, config.results_path, config.ann_type)
    lvis_eval.run()
    lvis_eval.print_results()
    print("Finished eval.")

    ipdb.set_trace()
    # All precision values: 10 x 101 x 1230 x 4
    # precision has dims (iou, recall, cls, area range)
    precisions = lvis_eval.eval['precision']

    with open(config.ann_path, 'r') as outfile:
        gt = json.load(outfile)
    cat_metas = gt['categories']
    cats = []
    for cat_meta in cat_metas:
        cats.append((cat_meta['id'], cat_meta['name']))
    cats.sort(key=itemgetter(0))
    class_names = [cat[1] for cat in cats]

    area_type = 0
    results_per_category, per_cat_results = fetch_aps(precisions, class_names, area_type)
    print("mAP for area type {}: {}".format(area_type, evaluate_map(results_per_category)))

    # Print for eye-balling.
    # print_aps(results_per_category, class_names, n_cols=6)

    # Store results_per_category into a JSON.
    with open(config.aps_json_path, 'w') as json_file:
        json.dump(per_cat_results, json_file, indent=4)

    # Store the 4D precisions tensor as a PKL.
    with open(config.prec_pkl_path, 'wb') as pkl_file:
        pickle.dump(precisions, pkl_file)
Esempio n. 21
0
def main():
    args = parse_args()

    assert args.out or args.show or args.json_out, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show" or "--json_out"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)

    while not osp.isfile(args.checkpoint):
        print('Waiting for {} to exist...'.format(args.checkpoint))
        time.sleep(60)

    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES

    # assert not distributed
    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        # data_loader.dataset.img_infos = data_loader.dataset.img_infos[:10]
        outputs = single_gpu_test(model, data_loader)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, args.tmpdir)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    if dataset.ann_file == 'data/coco/annotations/image_info_test-dev2017.json':
                        result_files = results2json_segm(dataset,
                                                         outputs,
                                                         args.out,
                                                         dump=True)
                    else:
                        result_files = results2json_segm(dataset,
                                                         outputs,
                                                         args.out,
                                                         dump=False)
                    if 'lvis' in dataset.ann_file:  ## an ugly fix to make it compatible with coco eval
                        from lvis import LVISEval
                        lvisEval = LVISEval(cfg.data.test.ann_file,
                                            result_files, 'segm')
                        lvisEval.run()
                        lvisEval.print_results()
                        #fix lvis api eval iou_thr error, should be 0.9 but was 0.8999
                        lvisEval.params.iou_thrs[8] = 0.9
                        for iou in [0.5, 0.6, 0.7, 0.8, 0.9]:
                            print('AP at iou {}: {}'.format(
                                iou, lvisEval._summarize('ap', iou_thr=iou)))
                    else:
                        coco_eval(result_files, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}'.format(name)
                        result_files = results2json(dataset,
                                                    outputs_,
                                                    result_file,
                                                    dump=False)
                        coco_eval(result_files, eval_types, dataset.coco)

        ##eval on lvis-77######
        cfg.data.test.ann_file = 'data/lvis/lvis_v0.5_val_cocofied.json'
        cfg.data.test.img_prefix = 'data/lvis/val2017/'
        cfg.data.test.test_mode = True
        dataset = build_dataset(cfg.data.test)
        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            dist=False,
            shuffle=False)
        # model_orig=model.module
        # model = MMDataParallel(model, device_ids=[0]).cuda()
        # data_loader.dataset.img_infos = data_loader.dataset.img_infos[:10]
        outputs = single_gpu_test(model, data_loader)

        print('\nwriting results to {}'.format('xxx'))
        # mmcv.dump(outputs, 'xxx')
        eval_types = ['segm']
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = 'xxx'
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_files = results2json_segm(dataset,
                                                     outputs,
                                                     'xxx',
                                                     dump=False)
                    from lvis import LVISEval
                    lvisEval = LVISEval(
                        'data/lvis/lvis_v0.5_val_cocofied.json', result_files,
                        'segm')
                    lvisEval.run()
                    lvisEval.print_results()
                    # fix lvis api eval iou_thr error, should be 0.9 but was 0.8999
                    lvisEval.params.iou_thrs[8] = 0.9
                    for iou in [0.5, 0.6, 0.7, 0.8, 0.9]:
                        print('AP at iou {}: {}'.format(
                            iou, lvisEval._summarize('ap', iou_thr=iou)))
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = 'xxx' + '.{}'.format(name)
                        result_files = results2json(dataset,
                                                    outputs_,
                                                    result_file,
                                                    dump=False)
                        coco_eval(result_files, eval_types, dataset.coco)

    # Save predictions in the COCO json format
    if args.json_out and rank == 0:
        if not isinstance(outputs[0], dict):
            results2json(dataset, outputs, args.json_out)
        else:
            for name in outputs[0]:
                outputs_ = [out[name] for out in outputs]
                result_file = args.json_out + '.{}'.format(name)
                results2json(dataset, outputs_, result_file)
Esempio n. 22
0
    def evaluate(self,
                 results,
                 metric=['track'],
                 logger=None,
                 resfile_path=None):
        if isinstance(metric, list):
            metrics = metric
        elif isinstance(metric, str):
            metrics = [metric]
        else:
            raise TypeError('metric must be a list or a str.')
        allowed_metrics = ['bbox', 'track']
        for metric in metrics:
            if metric not in allowed_metrics:
                raise KeyError(f'metric {metric} is not supported.')

        result_files, tmp_dir = self.format_results(results, resfile_path)

        eval_results = dict()

        if 'track' in metrics:
            from tao.toolkit.tao import TaoEval
            print_log('Evaluating TAO results...', logger)
            tao_eval = TaoEval(self.ann_file, result_files['track'])
            tao_eval.params.img_ids = self.img_ids
            tao_eval.params.cat_ids = self.cat_ids
            tao_eval.params.iou_thrs = np.array([0.5, 0.75])
            tao_eval.run()

            tao_eval.print_results()
            tao_results = tao_eval.get_results()
            for k, v in tao_results.items():
                if isinstance(k, str) and k.startswith('AP'):
                    key = 'track_{}'.format(k)
                    val = float('{:.3f}'.format(float(v)))
                    eval_results[key] = val

        if 'bbox' in metrics:
            print_log('Evaluating detection results...', logger)
            lvis_gt = LVIS(self.ann_file)
            lvis_dt = LVISResults(lvis_gt, result_files['bbox'])
            lvis_eval = LVISEval(lvis_gt, lvis_dt, 'bbox')
            lvis_eval.params.imgIds = self.img_ids
            lvis_eval.params.catIds = self.cat_ids
            lvis_eval.evaluate()
            lvis_eval.accumulate()
            lvis_eval.summarize()
            lvis_eval.print_results()
            lvis_results = lvis_eval.get_results()
            for k, v in lvis_results.items():
                if k.startswith('AP'):
                    key = '{}_{}'.format('bbox', k)
                    val = float('{:.3f}'.format(float(v)))
                    eval_results[key] = val
            ap_summary = ' '.join([
                '{}:{:.3f}'.format(k, float(v))
                for k, v in lvis_results.items() if k.startswith('AP')
            ])
            eval_results['bbox_mAP_copypaste'] = ap_summary

        if tmp_dir is not None:
            tmp_dir.cleanup()

        return eval_results
        get_count = num_get[v].sum().astype(np.float64)
        acc = get_count / ins_count
        print(template.format('(ACC)', '0.50:0.95', 'all', 300, k, acc * 100))


# with open('tempcls.pkl', 'rb') as fin:
#     savelist = pickle.load(fin)

# num_get = savelist[0]
# num_ins = savelist[1]
# splitbin = get_split_bin()
# accumulate_acc(num_ins, num_get, splitbin)

# result and val files for 100 randomly sampled images.
ANNOTATION_PATH = "data/lvis/lvis_v0.5_val.json"

RESULT_PATH_BBOX = args.boxjson
print('Eval Bbox:')
ANN_TYPE = 'bbox'
lvis_eval = LVISEval(ANNOTATION_PATH, RESULT_PATH_BBOX, ANN_TYPE)
lvis_eval.run()
lvis_eval.print_results()

if not args.segjson == 'None':
    RESULT_PATH_SEGM = args.segjson
    print('Eval Segm:')
    ANN_TYPE = 'segm'
    lvis_eval = LVISEval(ANNOTATION_PATH, RESULT_PATH_SEGM, ANN_TYPE)
    lvis_eval.run()
    lvis_eval.print_results()
Esempio n. 24
0
    def evaluate(self,
                 results,
                 metric='bbox',
                 logger=None,
                 jsonfile_prefix=None,
                 classwise=False,
                 proposal_nums=(100, 300, 1000),
                 iou_thrs=np.arange(0.5, 0.96, 0.05)):
        """Evaluation in LVIS protocol.

        Args:
            results (list[list | tuple]): Testing results of the dataset.
            metric (str | list[str]): Metrics to be evaluated. Options are
                'bbox', 'segm', 'proposal', 'proposal_fast'.
            logger (logging.Logger | str | None): Logger used for printing
                related information during evaluation. Default: None.
            jsonfile_prefix (str | None):
            classwise (bool): Whether to evaluating the AP for each class.
            proposal_nums (Sequence[int]): Proposal number used for evaluating
                recalls, such as recall@100, recall@1000.
                Default: (100, 300, 1000).
            iou_thrs (Sequence[float]): IoU threshold used for evaluating
                recalls. If set to a list, the average recall of all IoUs will
                also be computed. Default: 0.5.

        Returns:
            dict[str, float]: LVIS style metrics.
        """

        try:
            import lvis
            assert lvis.__version__ >= '10.5.3'
            from lvis import LVISResults, LVISEval
        except AssertionError:
            raise AssertionError('Incompatible version of lvis is installed. '
                                 'Run pip uninstall lvis first. Then run pip '
                                 'install mmlvis to install open-mmlab forked '
                                 'lvis. ')
        except ImportError:
            raise ImportError('Package lvis is not installed. Please run pip '
                              'install mmlvis to install open-mmlab forked '
                              'lvis.')
        assert isinstance(results, list), 'results must be a list'
        assert len(results) == len(self), (
            'The length of results is not equal to the dataset len: {} != {}'.
            format(len(results), len(self)))

        metrics = metric if isinstance(metric, list) else [metric]
        allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
        for metric in metrics:
            if metric not in allowed_metrics:
                raise KeyError('metric {} is not supported'.format(metric))

        if jsonfile_prefix is None:
            tmp_dir = tempfile.TemporaryDirectory()
            jsonfile_prefix = osp.join(tmp_dir.name, 'results')
        else:
            tmp_dir = None
        result_files = self.results2json(results, jsonfile_prefix)

        eval_results = {}
        # get original api
        lvis_gt = self.coco
        for metric in metrics:
            msg = 'Evaluating {}...'.format(metric)
            if logger is None:
                msg = '\n' + msg
            print_log(msg, logger=logger)

            if metric == 'proposal_fast':
                ar = self.fast_eval_recall(results,
                                           proposal_nums,
                                           iou_thrs,
                                           logger='silent')
                log_msg = []
                for i, num in enumerate(proposal_nums):
                    eval_results['AR@{}'.format(num)] = ar[i]
                    log_msg.append('\nAR@{}\t{:.4f}'.format(num, ar[i]))
                log_msg = ''.join(log_msg)
                print_log(log_msg, logger=logger)
                continue

            if metric not in result_files:
                raise KeyError('{} is not in results'.format(metric))
            try:
                lvis_dt = LVISResults(lvis_gt, result_files[metric])
            except IndexError:
                print_log('The testing results of the whole dataset is empty.',
                          logger=logger,
                          level=logging.ERROR)
                break

            iou_type = 'bbox' if metric == 'proposal' else metric
            lvis_eval = LVISEval(lvis_gt, lvis_dt, iou_type)
            lvis_eval.params.imgIds = self.img_ids
            if metric == 'proposal':
                lvis_eval.params.useCats = 0
                lvis_eval.params.maxDets = list(proposal_nums)
                lvis_eval.evaluate()
                lvis_eval.accumulate()
                lvis_eval.summarize()
                for k, v in lvis_eval.get_results().items():
                    if k.startswith('AR'):
                        val = float('{:.3f}'.format(float(v)))
                        eval_results[k] = val
            else:
                lvis_eval.evaluate()
                lvis_eval.accumulate()
                lvis_eval.summarize()
                lvis_results = lvis_eval.get_results()
                if classwise:  # Compute per-category AP
                    # Compute per-category AP
                    # from https://github.com/facebookresearch/detectron2/
                    precisions = lvis_eval.eval['precision']
                    # precision: (iou, recall, cls, area range, max dets)
                    assert len(self.cat_ids) == precisions.shape[2]

                    results_per_category = []
                    for idx, catId in enumerate(self.cat_ids):
                        # area range index 0: all area ranges
                        # max dets index -1: typically 100 per image
                        nm = self.coco.load_cats(catId)[0]
                        precision = precisions[:, :, idx, 0, -1]
                        precision = precision[precision > -1]
                        if precision.size:
                            ap = np.mean(precision)
                        else:
                            ap = float('nan')
                        results_per_category.append(
                            (f'{nm["name"]}', f'{float(ap):0.3f}'))

                    num_columns = min(6, len(results_per_category) * 2)
                    results_flatten = list(
                        itertools.chain(*results_per_category))
                    headers = ['category', 'AP'] * (num_columns // 2)
                    results_2d = itertools.zip_longest(*[
                        results_flatten[i::num_columns]
                        for i in range(num_columns)
                    ])
                    table_data = [headers]
                    table_data += [result for result in results_2d]
                    table = AsciiTable(table_data)
                    print_log('\n' + table.table, logger=logger)

                for k, v in lvis_results.items():
                    if k.startswith('AP'):
                        key = '{}_{}'.format(metric, k)
                        val = float('{:.3f}'.format(float(v)))
                        eval_results[key] = val
                ap_summary = ' '.join([
                    '{}:{:.3f}'.format(k, float(v))
                    for k, v in lvis_results.items() if k.startswith('AP')
                ])
                eval_results['{}_mAP_copypaste'.format(metric)] = ap_summary
            lvis_eval.print_results()
        if tmp_dir is not None:
            tmp_dir.cleanup()
        return eval_results
Esempio n. 25
0
    def evaluate(self,
                 results,
                 metric='bbox',
                 logger=None,
                 jsonfile_prefix=None,
                 classwise=False,
                 proposal_nums=300,
                 iou_thrs=np.arange(0.5, 0.96, 0.05)):
        """Evaluation in COCO protocol.

        Args:
            results (list): Testing results of the dataset.
            metric (str | list[str]): Metrics to be evaluated.
            logger (logging.Logger | str | None): Logger used for printing
                related information during evaluation. Default: None.
            jsonfile_prefix (str | None): The prefix of json files. It includes
                the file path and the prefix of filename, e.g., "a/b/prefix".
                If not specified, a temp file will be created. Default: None.
            classwise (bool): Whether to evaluating the AP for each class.
            proposal_nums (Sequence[int]): Proposal number used for evaluating
                recalls, such as recall@100, recall@1000.
                Default: (100, 300, 1000).
            iou_thrs (Sequence[float]): IoU threshold used for evaluating
                recalls. If set to a list, the average recall of all IoUs will
                also be computed. Default: 0.5.

        Returns:
            dict[str: float]
        """

        metrics = metric if isinstance(metric, list) else [metric]
        allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
        for metric in metrics:
            if metric not in allowed_metrics:
                raise KeyError('metric {} is not supported'.format(metric))

        result_files = self.format_results(results, jsonfile_prefix)

        eval_results = {}
        cocoGt = self.coco
        for metric in metrics:
            msg = 'Evaluating {}...'.format(metric)
            if logger is None:
                msg = '\n' + msg
            print_log(msg, logger=logger)

            if metric == 'proposal_fast':
                ar = self.fast_eval_recall(results,
                                           proposal_nums,
                                           iou_thrs,
                                           logger='silent')
                log_msg = []
                for i, num in enumerate(proposal_nums):
                    eval_results['AR@{}'.format(num)] = ar[i]
                    log_msg.append('\nAR@{}\t{:.4f}'.format(num, ar[i]))
                log_msg = ''.join(log_msg)
                print_log(log_msg, logger=logger)
                continue

            if metric not in result_files:
                raise KeyError('{} is not in results'.format(metric))
            try:
                cocoDt = cocoGt.loadRes(result_files[metric])
            except IndexError:
                print_log('The testing results of the whole dataset is empty.',
                          logger=logger,
                          level=logging.ERROR)
                break

            iou_type = 'bbox' if metric == 'proposal' else metric
            # run lvis evaluation
            eval_results['lvis'] = {}
            lvis_eval = LVISEval(self.ann_file_path, result_files[metric],
                                 iou_type)
            lvis_eval.params.max_dets = proposal_nums
            lvis_eval.run()
            lvis_eval.print_results()
            print('=====> The above metric is {}.'.format(iou_type))
            keys = lvis_eval.get_results().keys()
            for k in keys:
                eval_results['lvis'][iou_type + k] = lvis_eval.get_results()[k]

        return eval_results
Esempio n. 26
0
import logging
from lvis import LVIS, LVISResults, LVISEval

# result and val files for 100 randomly sampled images.
# ANNOTATION_PATH = "./data/lvis_val_100.json"
# RESULT_PATH = "./data/lvis_results_100.json"
ANNOTATION_PATH = "./data/lvis/lvis_v0.5_val.json"
RESULT_PATH = './debug_file.pkl.segm.json'
# RESULT_PATH = './mask_rcnn_r101_fpn_1x_lvis.pkl.segm.json'
ANN_TYPE = 'segm'

lvis_eval = LVISEval(ANNOTATION_PATH, RESULT_PATH, ANN_TYPE)
lvis_eval.run()
lvis_eval.print_results(True)
Esempio n. 27
0
from lvis import LVIS, LVISResults, LVISEval

# result and val files for 100 randomly sampled images.
ANNOTATION_PATH = "./data/lvis_val_100.json"
RESULT_PATH = "./data/lvis_results_100.json"

# Annotation type is segm or bbox
ANN_TYPE = 'segm'

# Number of detections to be collected from each image
# LVIS uses 300 by default. 
# If you want to use all detections in the detection 
# file, then you can set it to -1.
MAX_DETS = 300

gt = LVIS(ANNOTATION_PATH)
results = LVISResults(gt, RESULT_PATH, max_dets=MAX_DETS)
lvis_eval = LVISEval(gt, results, iou_type=ANN_TYPE)
params = lvis_eval.params
params.max_dets = MAX_DETS  # No limit on detections per image.

lvis_eval.run()
lvis_eval.print_results()


# Uncomment to print class-specific LRP-Optimal Thresholds
# lvis_eval.print_lrp_opt_thresholds()
Esempio n. 28
0
    results = evaluate(model, test_loader, args.device)
    res_path = os.path.join(
        out_dir, (args.resume).split("/")[-1].split(".")[0] + ".json")
    json.dump(results, open(res_path, 'w'), indent=4)

    if args.dataset == 'coco':
        cocoGt = COCO(annotations)
        try:
            cocoDt = cocoGt.loadRes(res_path)
        except IndexError:
            print('empty list return zero map')
        cocoDt.loadAnns()

        #  running evaluation
        cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()

        mAP = cocoEval.stats[0]

    elif (args.dataset == 'lvis'):
        try:
            lvis_eval = LVISEval(annotations, res_path, 'bbox')
        except IndexError:
            print('empty list return zero map')
        lvis_eval.run()
        metrics = lvis_eval.get_results()
        lvis_eval.print_results()
        mAP = metrics['AP']
Esempio n. 29
0
class LVISEvaluator(object):
    def __init__(self, run_path, model_ckpt):
        self.lvis_gt = LVIS(ANNOTATION_PATH)
        self.lvis_dt = LVISResults(self.lvis_gt, PREDICTION_PATH)
        self.run_path = run_path
        self.model_ckpt = model_ckpt

        self._build_coco_to_lvis_map()
        cocoEval = LVISEval(self.lvis_gt, self.lvis_dt, 'segm')
        self.freq_groups = cocoEval._prepare_freq_group()
        config_path = os.path.join(self.run_path, 'config_lvis.yaml')
        self.config = yaml.load(open(config_path, "r"), Loader=yaml.FullLoader)


    def _build_coco_to_lvis_map(self):
        coco_map = json.load(open(os.path.join(LVIS_API_PATH, 'data/coco_to_synset.json')))
        synset_to_lvis = {cat['synset']: cat['id'] for cat in self.lvis_gt.cats.values()}
        synset_to_lvis['oven.n.01'] = synset_to_lvis['toaster_oven.n.01']
        synset_to_lvis['frank.n.02'] = synset_to_lvis['sausage.n.01']

        coco_to_lvis = {}
        lvis_to_coco = {}
        for item in coco_map.values():
            coco_id, lvis_id = item['coco_cat_id'], synset_to_lvis[item['synset']]
            coco_to_lvis[coco_id] = lvis_id
            lvis_to_coco[lvis_id] = coco_id
        self.coco_to_lvis = coco_to_lvis
        self.lvis_to_coco = lvis_to_coco

    def reload_annotations(self):
        self.lvis_gt = LVIS(ANNOTATION_PATH)
        self.dt_path = PREDICTION_PATH
        self.lvis_dt = LVISResults(self.lvis_gt, self.dt_path)

    def _save_gt_features(self):
        # This takes a long time and shoud only be run once.
        from eval.feature_saver import LvisSaver
        import torch
        config = self.config
        if self.config['hyperbolic']:
            from models.hyperbolic_resnet import HResNetSimCLR
            model = HResNetSimCLR(config['model']['base_model'], config['model']['out_dim'])
        else:
            from models.resnet_simclr import ResNetSimCLR
            model = ResNetSimCLR(config['model']['base_model'], config['model']['out_dim'])
        state_dict = torch.load(os.path.join(self.run_path, self.model_ckpt))  # , map_location=device)i
        model.load_state_dict(state_dict)
        model.eval()
        saver = LvisSaver(model, self.lvis_gt, GT_FEATS)
        saver.save()

    def _save_dt_features(self):
        from eval.feature_saver import LvisSaver
        import torch
        config = self.config
        if self.config['hyperbolic']:
            from models.hyperbolic_resnet import HResNetSimCLR
            model = HResNetSimCLR(config['model']['base_model'], config['model']['out_dim'])
        else:
            from models.resnet_simclr import ResNetSimCLR
            model = ResNetSimCLR(config['model']['base_model'], config['model']['out_dim'])
        state_dict = torch.load(os.path.join(self.run_path, self.model_ckpt))  # , map_location=device)i
        model.load_state_dict(state_dict)
        model.eval()
        saver = LvisSaver(model, self.lvis_dt, GT_FEATS)
        saver.save()

    def load_gt_features(self, coco_only=False, k=100, freq_groups=None):
        """  Load gt features from GT_FEATS folder.
        :param coco_only: only load categories that are in COCO.
        :param k: only load k masks for each category.
        :param freq_groups: only load categories in the specified freq_groups. e.g. ['f', 'r']
        """
        feats = []
        y = []
        files = os.listdir(GT_FEATS)
        if len(files) == 0:
            self._save_gt_features()
        print('Found {} files.'.format(len(files)))
        for f in files:
            if f.endswith('_x.npy'):
                feats.append(np.load(os.path.join(GT_FEATS, f)))
            elif f.endswith('_y.npy'):
                y.extend(np.load(os.path.join(GT_FEATS, f)))
        feats = np.concatenate(feats)
        print(feats.shape)
        self.feats_gt = feats
        self.feats_gt_y = np.array(y)

        if freq_groups is not None:
            print('Filter by freq groups', freq_groups)
            freqs = (np.concatenate([self.freq_groups[i] for i in freq_groups]) + 1).astype(np.int)
            idx = np.isin(self.feats_gt_y, freqs)
            self.feats_gt_y = self.feats_gt_y[idx]
            self.feats_gt = self.feats_gt[idx]
            print('After:', self.feats_gt.shape)

        if coco_only:
            coco_cats = self.lvis_to_coco.keys()
            idx = np.array([y in coco_cats for y in self.feats_gt_y])
            self.feats_gt = self.feats_gt[idx]
            self.feats_gt_y = self.feats_gt_y[idx]
            print('Keeping objects in COCO', self.feats_gt.shape)

        if k:
            print('Keeping only {} masks for each class'.format(k))
            new_feats_gt = []
            new_feats_gt_y = []
            counts = Counter(self.feats_gt_y)
            for i, c in counts.items():
                if c > k:
                    idx = np.random.choice(np.arange(len(self.feats_gt_y))[self.feats_gt_y == i], k, replace=False)
                    #                     print(self.feats_gt_y[idx])
                    new_feats_gt.append(self.feats_gt[idx])
                    new_feats_gt_y.extend([i] * k)
                else:
                    new_feats_gt.append(self.feats_gt[self.feats_gt_y == i])
                    new_feats_gt_y.extend([i] * c)
            self.feats_gt = np.concatenate(new_feats_gt)
            self.feats_gt_y = new_feats_gt_y
            print(self.feats_gt.shape)

    def fit_knn(self, k=5, weights='distance'):
        """ Fit a KNN model on the ground truth mask features to see whether the embeddings
        makes sense.
        """
        feats = self.feats_gt
        y = self.feats_gt_y
        if self.config['hyperbolic']:
            from hyperbolic_knn import HyperbolicKNN
            self.neigh = HyperbolicKNN(k, feats, y)
            pred_y = self.neigh.predict(feats)
            print('KNN accuracy', accuracy_score(y, pred_y))
        else:
            from sklearn.neighbors import KNeighborsClassifier
            self.neigh = KNeighborsClassifier(n_neighbors=k, weights=weights)
            self.neigh.fit(feats, y)
            print('KNN accuracy', self.neigh.score(feats, y))

    def load_dt_features(self):
        feats = []
        feats_ann = []
        files = os.listdir(DT_FEATS)
        if len(files) == 0:
            self._save_dt_features()
        print('Found {} files.'.format(len(files)))
        for f in files:
            if f.endswith('_x.npy'):
                feats.append(np.load(os.path.join(DT_FEATS, f)))
            elif f.endswith('_ann_id.npy'):
                feats_ann.extend(np.load(os.path.join(DT_FEATS, f)))
        self.feats = np.concatenate(feats)
        self.feats_ann = np.array(feats_ann)
        print(self.feats.shape, self.feats_ann.shape)

    def run_kmeans(self, C=1500):
        feats = self.feats
        if self.config['hyperbolic']:
            print('Running Hyperbolic KMeans...')
            from poincare_kmeans import PoincareKMeans as HKMeans
            assert self.feats.shape[1] == 2, 'only supports hkmeans in 2d.'
            kmeans = HKMeans(self.feats.shape[1], C)
            clusters = kmeans.fit_predict(self.feats)
            self.clusters = clusters
        else:
            print('Running Euclidean KMeans...')
            from sklearn.cluster import MiniBatchKMeans
            kmeans = MiniBatchKMeans(C)
            clusters = kmeans.fit_predict(feats)
            self.clusters = clusters

    def assign_labels(self):
        """
        Take the clusters assigned by kmeans and assign labels to the clusters.
        :return:
        """
        neigh = self.neigh
        clusters = self.clusters
        feats = self.feats

        C = len(set(clusters))
        coco_clusters = {}
        cluster_to_coco = {}
        print('Assigning labels using KNN ...')
        for i in tqdm(range(C)):
            idx = np.where(clusters == i)[0]
            if len(idx) == 0: continue
            predicted = neigh.predict(feats[idx])
            votes = sorted(Counter(predicted).items(), key=lambda tup: -tup[1])
            best_ratio = votes[0][1] / len(predicted)

            if len(predicted) < 3: continue  # ignore clusters with fewer than 5
            if best_ratio < 0.95: continue
            cluster_to_coco[i] = (votes[0][0], best_ratio, len(predicted))
        self.cluster_to_coco = cluster_to_coco
        self.coco_clusters = coco_clusters
        print('Number of assigned clusters:', len(cluster_to_coco))

    def evaluate(self):
        cluster_to_coco = self.cluster_to_coco
        lvis_dt = self.lvis_dt
        clusters = self.clusters
        feats_ann = self.feats_ann

        # by default everything is -1.
        for _, dt in lvis_dt.anns.items():  dt['category_id'] = -1

        print('Updating category ids')
        for i in tqdm(range(len(feats_ann))):
            ann_id = int(feats_ann[i])
            cluster_id = clusters[i]
            if cluster_id in cluster_to_coco:
                lvis_dt.anns[ann_id]['category_id'] = cluster_to_coco[cluster_id][0]
        #                 print('assigned ', cluster_to_coco[cluster_id][0])

        print('Finally, evaluate!!')

        self.lvisEval = LVISEval(self.lvis_gt, lvis_dt, 'segm')
        # img_ids = cocoDt.get_img_ids()[:100]
        #         lvisEval.params.catIds = [1, 2, 3, 4]# 5, 6, 7, 8, 9, 10, 11, 13, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 33, 34, 35, 37, 40, 41, 42, 43, 44, 46, 47, 49, 50, 51, 52, 54, 56, 57, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 77, 78, 79, 81, 82, 84, 85, 86, 87, 88, 90]
        #         lvisEval.params.imgIds = img_ids
        #         lvisEval.params.iouThrs = np.linspace(.25, 0.95, int(np.round((0.95 - .25) / .05)) + 1, endpoint=True)

        self.lvisEval.lvis_gt.cats[-1] = {'frequency': 'f',
                                          'id': -1,
                                          'synset': 'all',
                                          'image_count': 0,
                                          'instance_count': 0,
                                          'synonyms': ['all'],
                                          'def': 'dummy category',
                                          'name': 'all'}

        self.lvisEval.evaluate()
        self.lvisEval.accumulate()
        self.lvisEval.summarize()

    def evaluate_class_agnostic(self):
        """ Treat all masks as one category.
        """
        lvis_dt = self.lvis_dt
        lvis_gt = self.lvis_gt
        feats_ann = self.feats_ann
        cluster_to_coco = self.cluster_to_coco

        # by default, none of the predictions gets evaluated.
        for _, dt in lvis_dt.anns.items(): dt['category_id'] = -2
        for _, dt in lvis_gt.anns.items(): dt['category_id'] = -2

        print('Updating category ids')
        for i in tqdm(range(len(feats_ann))):
            cluster_id = self.clusters[i]
            ann_id = int(feats_ann[i])
            if cluster_id in cluster_to_coco:
                lvis_dt.anns[ann_id]['category_id'] = -1  # the assigned ones are included in the eval

        cocoEval = LVISEval(lvis_gt, lvis_dt, 'segm')
        cocoEval.params.catIds = [-1]  # only evaluate on the category -1.
        cocoEval.params.useCats = 0
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()
Esempio n. 30
0
    pre['image_id'] = annotation['image_id']
    pre['category_id'] = annotation['category_id']
    pre['score'] = 1.0
    img_proposals = all_prop_boxes[all_prop_ids.index(annotation['image_id'])]
    gt_bbox = coco_box_to_bbox(annotation['bbox'])
    opt_prop = fetch_optimal_proposal(gt_bbox, img_proposals)
    pre['bbox'] = opt_prop
    gt_to_pre.append(pre)

with open(PRE_OUT_PATH, "w") as f:
    pre = json.dump(gt_to_pre, f)
print("Stored GT to pre_out JSON.\n")

# Eval how well we did.
lvis_eval = LVISEval(ANN_PATH, PRE_OUT_PATH, ANN_TYPE)
print("Constructed lvis_eval object.")
lvis_eval.run()
print("Finished lvis_eval.run()")
lvis_eval.print_results()