コード例 #1
0
def mmdet_mAP_bbox(fas: FoldsAndStages, ds: DataSet):
    classes = ds.root().meta()['CLASSES']
    numClasses = len(classes)

    gt_bboxes = []
    gt_labels = []
    gt_ignore = []
    results = []
    for pi in ds:
        item_gt_labels = pi.y[0]
        item_gt_bboxes = pi.y[1]
        gt_bboxes.append(item_gt_bboxes)
        gt_labels.append(item_gt_labels)

        pred = pi.prediction
        # withMasks = fas.wrapped.withMask()
        # threshold = fas.wrapped.threshold
        # pred = convertMMDETModelOutput(pred, withMasks)
        # pred = applyTresholdToPrediction(pred, withMasks, threshold)

        item_pred_labels = pred[0]
        item_pred_bboxes = pred[2]

        gt_ignore.append(np.zeros((len(item_gt_bboxes, )), dtype=np.bool))

        res_bboxes = np.concatenate([
            item_pred_bboxes,
            np.ones((len(item_pred_bboxes), 1), dtype=np.float32)
        ],
                                    axis=1)

        result = []
        for i in range(numClasses):
            result.append([])

        for i in range(len(item_pred_labels)):
            label = int(item_pred_labels[i] + 0.5)
            bbox = res_bboxes[i]
            result[label].append(bbox)

        for i in range(numClasses):
            if len(result[i]) == 0:
                result[i] = np.zeros((0, 5), dtype=np.float32)
            else:
                result[i] = np.concatenate(
                    [np.expand_dims(x, axis=0) for x in result[i]], axis=0)

        results.append(result)

    # If the dataset is VOC2007, then use 11 points mAP evaluation.
    mean_ap, eval_results = eval_map(results,
                                     gt_bboxes,
                                     gt_labels,
                                     gt_ignore=gt_ignore,
                                     scale_ranges=None,
                                     iou_thr=0.5,
                                     dataset=classes,
                                     print_summary=True)

    return mean_ap
コード例 #2
0
def test_eval_map():

    # 2 image and 2 classes
    det_results = [[det_bboxes, det_bboxes], [det_bboxes, det_bboxes]]

    labels = np.array([0, 1, 1])
    labels_ignore = np.array([0, 1])
    gt_info = {
        'bboxes': gt_bboxes,
        'bboxes_ignore': gt_ignore,
        'labels': labels,
        'labels_ignore': labels_ignore
    }
    annotations = [gt_info, gt_info]
    mean_ap, eval_results = eval_map(det_results,
                                     annotations,
                                     use_legacy_coordinate=True)
    assert 0.291 < mean_ap < 0.293
    mean_ap, eval_results = eval_map(det_results,
                                     annotations,
                                     use_legacy_coordinate=False)
    assert 0.291 < mean_ap < 0.293

    # 1 image and 2 classes
    det_results = [[det_bboxes, det_bboxes]]

    labels = np.array([0, 1, 1])
    labels_ignore = np.array([0, 1])
    gt_info = {
        'bboxes': gt_bboxes,
        'bboxes_ignore': gt_ignore,
        'labels': labels,
        'labels_ignore': labels_ignore
    }
    annotations = [gt_info]
    mean_ap, eval_results = eval_map(det_results,
                                     annotations,
                                     use_legacy_coordinate=True)
    assert 0.291 < mean_ap < 0.293
    mean_ap, eval_results = eval_map(det_results,
                                     annotations,
                                     use_legacy_coordinate=False)
    assert 0.291 < mean_ap < 0.293
コード例 #3
0
    def evaluate(self,
                 results,
                 metric='mAP',
                 logger=None,
                 proposal_nums=(100, 300, 1000),
                 iou_thr=0.5,
                 scale_ranges=None):
        """Evaluate in VOC protocol.

        Args:
            results (list[list | tuple]): Testing results of the dataset.
            metric (str | list[str]): Metrics to be evaluated. Options are
                'mAP', 'recall'.
            logger (logging.Logger | str, optional): Logger used for printing
                related information during evaluation. Default: None.
            proposal_nums (Sequence[int]): Proposal number used for evaluating
                recalls, such as recall@100, recall@1000.
                Default: (100, 300, 1000).
            iou_thr (float | list[float]): IoU threshold. It must be a float
                when evaluating mAP, and can be a list when evaluating recall.
                Default: 0.5.
            scale_ranges (list[tuple], optional): Scale ranges for evaluating
                mAP. If not specified, all bounding boxes would be included in
                evaluation. Default: None.

        Returns:
            dict[str, float]: AP/recall metrics.
        """

        if not isinstance(metric, str):
            assert len(metric) == 1
            metric = metric[0]
        allowed_metrics = ['mAP', 'recall']
        if metric not in allowed_metrics:
            raise KeyError(f'metric {metric} is not supported')
        annotations = [self.get_ann_info(i) for i in range(len(self))]
        eval_results = {}
        if metric == 'mAP':
            assert isinstance(iou_thr, float)
            mean_ap, _ = eval_map(
                results,
                annotations,
                scale_ranges=None,
                iou_thr=iou_thr,
                dataset='r_aircraft',  #modify
                logger=logger,
                nproc=10)
            eval_results['mAP'] = mean_ap
        elif metric == 'recall':
            gt_bboxes = [ann['bboxes'] for ann in annotations]
            if isinstance(iou_thr, float):
                iou_thr = [iou_thr]
            recalls = eval_recalls(gt_bboxes,
                                   results,
                                   proposal_nums,
                                   iou_thr,
                                   logger=logger)
            for i, num in enumerate(proposal_nums):
                for j, iou in enumerate(iou_thr):
                    eval_results[f'recall@{num}@{iou}'] = recalls[i, j]
            if recalls.shape[1] > 1:
                ar = recalls.mean(axis=1)
                for i, num in enumerate(proposal_nums):
                    eval_results[f'AR@{num}'] = ar[i]
        return eval_results