Пример #1
0
    def evaluate(self,
                 results,
                 metric='mAP',
                 logger=None,
                 iou_thr=0.5,
                 scale_ranges=None,
                 use_07_metric=True,
                 proposal_nums=(100, 300, 1000)):

        if not isinstance(metric, str):
            assert len(metric) == 1
            metric = metric[0]
        allowed_metrics = ['mAP', 'recall']
        if metric not in allowed_metrics:
            raise KeyError(f'metric {metric} is not supported')
        annotations = [self.get_ann_info(i) for i in range(len(self))]
        eval_results = {}
        if metric == 'mAP':
            assert isinstance(iou_thr, float)
            mean_ap, _ = eval_arb_map(results,
                                      annotations,
                                      scale_ranges=scale_ranges,
                                      iou_thr=iou_thr,
                                      use_07_metric=use_07_metric,
                                      dataset=self.CLASSES,
                                      logger=logger)
            eval_results['mAP'] = mean_ap
        elif metric == 'recall':
            gt_bboxes = [ann['bboxes'] for ann in annotations]
            if isinstance(iou_thr, float):
                iou_thr = [iou_thr]
            recalls = eval_arb_recalls(gt_bboxes,
                                       results,
                                       True,
                                       proposal_nums,
                                       iou_thr,
                                       logger=logger)
            for i, num in enumerate(proposal_nums):
                for j, iou in enumerate(iou_thr):
                    eval_results[f'recall@{num}@{iou}'] = recalls[i, j]
            if recalls.shape[1] > 1:
                ar = recalls.mean(axis=1)
                for i, num in enumerate(proposal_nums):
                    eval_results[f'AR@{num}'] = ar[i]
        return eval_results
Пример #2
0
    def evaluate(self,
                 results,
                 metric='mAP',
                 logger=None,
                 with_merge=True,
                 ign_diff=True,
                 ign_scale_ranges=None,
                 save_dir=None,
                 merge_iou_thr=0.1,
                 use_07_metric=True,
                 scale_ranges=None,
                 eval_iou_thr=[0.5],
                 proposal_nums=(2000, ),
                 nproc=10):
        nproc = min(nproc, os.cpu_count())
        if not isinstance(metric, str):
            assert len(metric) == 1
            metric = metric[0]
        allowed_metrics = ['mAP', 'recall']
        if metric not in allowed_metrics:
            raise KeyError(f'metric {metric} is not supported')
        task = self.task

        eval_results = {}
        if metric == 'mAP':
            merged_results = self.format_results(
                results,
                nproc=nproc,
                with_merge=with_merge,
                ign_scale_ranges=ign_scale_ranges,
                iou_thr=merge_iou_thr,
                save_dir=save_dir)

            infos = self.ori_infos if with_merge else self.data_infos
            id_mapper = {ann['id']: i for i, ann in enumerate(infos)}
            det_results, annotations = [], []
            for k, v in merged_results:
                det_results.append(v)
                ann = infos[id_mapper[k]]['ann']
                gt_bboxes = ann['bboxes']
                gt_labels = ann['labels']
                diffs = ann.get('diffs',
                                np.zeros((gt_bboxes.shape[0], ), dtype=np.int))

                if task == 'Task2':
                    gt_bboxes = bt.bbox2type(gt_bboxes, 'hbb')

                gt_ann = {}
                if ign_diff:
                    gt_ann['bboxes_ignore'] = gt_bboxes[diffs == 1]
                    gt_ann['labels_ignore'] = gt_labels[diffs == 1]
                    gt_bboxes = gt_bboxes[diffs == 0]
                    gt_labels = gt_labels[diffs == 0]
                gt_ann['bboxes'] = gt_bboxes
                gt_ann['labels'] = gt_labels
                annotations.append(gt_ann)

            print('\nStart calculate mAP!!!')
            print('Result is Only for reference,',
                  'final result is subject to DOTA_devkit')
            mean_ap, _ = eval_arb_map(det_results,
                                      annotations,
                                      scale_ranges=scale_ranges,
                                      iou_thr=eval_iou_thr,
                                      use_07_metric=use_07_metric,
                                      dataset=self.CLASSES,
                                      logger=logger,
                                      nproc=nproc)
            eval_results['mAP'] = mean_ap
        elif metric == 'recall':
            assert mmcv.is_list_of(results, np.ndarray)
            gt_bboxes = []
            for info in self.data_infos:
                bboxes = info['ann']['bboxes']
                if ign_diff:
                    diffs = info['ann'].get(
                        'diffs', np.zeros((bboxes.shape[0], ), dtype=np.int))
                    bboxes = bboxes[diffs == 0]
                gt_bboxes.append(bboxes)
            if isinstance(eval_iou_thr, float):
                eval_iou_thr = [eval_iou_thr]
            recalls = eval_arb_recalls(gt_bboxes,
                                       results,
                                       True,
                                       proposal_nums,
                                       eval_iou_thr,
                                       logger=logger)
            for i, num in enumerate(proposal_nums):
                for j, iou in enumerate(eval_iou_thr):
                    eval_results[f'recall@{num}@{iou}'] = recalls[i, j]
            if recalls.shape[1] > 1:
                ar = recalls.mean(axis=1)
                for i, num in enumerate(proposal_nums):
                    eval_results[f'AR@{num}'] = ar[i]
        return eval_results