コード例 #1
0
def test_case_1():
    gts_dir = 'tests/test_case_1/gts'
    dets_dir = 'tests/test_case_1/dets'

    gts = converter.text2bb(gts_dir, BBType.GROUND_TRUTH)
    dets = converter.text2bb(dets_dir, BBType.DETECTED)

    assert (len(gts) > 0)
    assert (len(dets) > 0)

    testing_ious = [0.1, 0.3, 0.5, 0.75]

    # ELEVEN_POINT_INTERPOLATION
    expected_APs = {'object': {0.1: 0.3333333333, 0.3: 0.2683982683, 0.5: 0.0303030303, 0.75: 0.0}}
    for idx, iou in enumerate(testing_ious):
        results_dict = get_pascalvoc_metrics(
            gts, dets, iou_threshold=iou, method=MethodAveragePrecision.ELEVEN_POINT_INTERPOLATION)
        results = results_dict['per_class']
        for c, res in results.items():
            assert isclose(expected_APs[c][iou], res['AP'])

    # EVERY_POINT_INTERPOLATION
    expected_APs = {'object': {0.1: 0.3371980676, 0.3: 0.2456866804, 0.5: 0.0222222222, 0.75: 0.0}}
    for idx, iou in enumerate(testing_ious):
        results_dict = get_pascalvoc_metrics(
            gts, dets, iou_threshold=iou, method=MethodAveragePrecision.EVERY_POINT_INTERPOLATION)
        results = results_dict['per_class']
        for c, res in results.items():
            assert isclose(expected_APs[c][iou], res['AP'])
コード例 #2
0
ファイル: metrics.py プロジェクト: David-Ciz/kitt
def get_metrics_reference(
    annotated_images: List[List[Annotation]], iou_threshold: float = 0.5
) -> Metrics:
    from src.bounding_box import BBFormat, BBType, BoundingBox
    from src.evaluators import pascal_voc_evaluator

    def to_bb(index: int, annotation: Annotation) -> BoundingBox:
        image_id = str(index)
        return BoundingBox(
            image_name=image_id,
            class_id=annotation.class_name,
            coordinates=annotation.bbox.xywh(),
            format=BBFormat.XYWH,
            bb_type=BBType.GROUND_TRUTH
            if annotation.type == AnnotationType.GROUND_TRUTH
            else BBType.DETECTED,
            confidence=annotation.confidence
            if annotation.type == AnnotationType.PREDICTION
            else None,
        )

    boxes = list(
        itertools.chain.from_iterable(
            [
                [to_bb(index, ann) for ann in annotations]
                for (index, annotations) in enumerate(annotated_images)
            ]
        )
    )

    gt_boxes = [box for box in boxes if box.get_bb_type() == BBType.GROUND_TRUTH]
    det_boxes = [box for box in boxes if box.get_bb_type() == BBType.DETECTED]
    metrics = pascal_voc_evaluator.get_pascalvoc_metrics(
        gt_boxes, det_boxes, iou_threshold=iou_threshold
    )
    return Metrics(
        per_class={
            k: ClassMetrics(
                precision=[float(v) for v in v["precision"]],
                recall=[float(v) for v in v["recall"]],
                AP=v["AP"],
                interpolated_precision=v["interpolated precision"],
                interpolated_recall=v["interpolated recall"],
                total_GT=v["total positives"],
                total_TP=int(v["total TP"]),
                total_FP=int(v["total FP"]),
            )
            for (k, v) in metrics["per_class"].items()
        }
    )
コード例 #3
0
    def btn_run_clicked(self):
        if self.dir_save_results is None or os.path.isdir(
                self.dir_save_results) is False:
            self.show_popup(
                'Output directory to save results was not specified or does not exist.',
                'Invalid output directory',
                buttons=QMessageBox.Ok,
                icon=QMessageBox.Information)
            return
        # Get detections
        det_annotations, passed = self.load_annotations_det()
        if passed is False:
            return
        # Verify if there are detections
        if det_annotations is None or len(det_annotations) == 0:
            self.show_popup(
                'No detection of the selected type was found in the folder.\nCheck if the selected type corresponds to the files in the folder and try again.',
                'Invalid detections',
                buttons=QMessageBox.Ok,
                icon=QMessageBox.Information)
            return

        gt_annotations = self.load_annotations_gt()
        if gt_annotations is None or len(gt_annotations) == 0:
            self.show_popup(
                'No ground-truth bounding box of the selected type was found in the folder.\nCheck if the selected type corresponds to the files in the folder and try again.',
                'Invalid groundtruths',
                buttons=QMessageBox.Ok,
                icon=QMessageBox.Information)
            return

        coco_res = {}
        pascal_res = {}
        # If any coco metric is required
        if self.chb_metric_AP_coco.isChecked(
        ) or self.chb_metric_AP50_coco.isChecked(
        ) or self.chb_metric_AP75_coco.isChecked(
        ) or self.chb_metric_APsmall_coco.isChecked(
        ) or self.chb_metric_APmedium_coco.isChecked(
        ) or self.chb_metric_APlarge_coco.isChecked(
        ) or self.chb_metric_AR_max1.isChecked(
        ) or self.chb_metric_AR_max10.isChecked(
        ) or self.chb_metric_AR_max100.isChecked(
        ) or self.chb_metric_AR_small.isChecked(
        ) or self.chb_metric_AR_medium.isChecked(
        ) or self.chb_metric_AR_large.isChecked():
            coco_res = get_coco_summary(gt_annotations, det_annotations)
            # Remove not checked metrics
            if not self.chb_metric_AP_coco.isChecked():
                del coco_res['AP']
            if not self.chb_metric_AP50_coco.isChecked():
                del coco_res['AP50']
            if not self.chb_metric_AP75_coco.isChecked():
                del coco_res['AP75']
            if not self.chb_metric_APsmall_coco.isChecked():
                del coco_res['APsmall']
            if not self.chb_metric_APmedium_coco.isChecked():
                del coco_res['APmedium']
            if not self.chb_metric_APlarge_coco.isChecked():
                del coco_res['APlarge']
            if not self.chb_metric_AR_max1.isChecked():
                del coco_res['AR1']
            if not self.chb_metric_AR_max10.isChecked():
                del coco_res['AR10']
            if not self.chb_metric_AR_max100.isChecked():
                del coco_res['AR100']
            if not self.chb_metric_AR_small.isChecked():
                del coco_res['ARsmall']
            if not self.chb_metric_AR_medium.isChecked():
                del coco_res['ARmedium']
            if not self.chb_metric_AR_large.isChecked():
                del coco_res['ARlarge']
        # If any pascal metric is required
        if self.chb_metric_AP_pascal.isChecked(
        ) or self.chb_metric_mAP_pascal.isChecked():
            iou_threshold = self.dsb_IOU_pascal.value()
            pascal_res = get_pascalvoc_metrics(gt_annotations,
                                               det_annotations,
                                               iou_threshold=iou_threshold,
                                               generate_table=True)
            if not self.chb_metric_AP_pascal.isChecked():
                del pascal_res['per_class']
            if not self.chb_metric_AR_large.isChecked():
                del pascal_res['mAP']

            if 'per_class' in pascal_res:
                # Save plots
                plot_precision_recall_curve(pascal_res['per_class'],
                                            showAP=True,
                                            savePath=self.dir_save_results,
                                            showGraphic=False)

        if len(coco_res) + len(pascal_res) == 0:
            self.show_popup('No results to show',
                            'No results',
                            buttons=QMessageBox.Ok,
                            icon=QMessageBox.Information)
        else:
            self.dialog_results.show_dialog(coco_res, pascal_res,
                                            self.dir_save_results)
コード例 #4
0
# det_bbs = [det for det in det_bbs if det.get_class_id() == 'cat']

gt_bbs = pickle.load(open('gts.pickle', 'rb'))
det_bbs = pickle.load(open('dets.pickle', 'rb'))

# dict_bbs_per_class = BoundingBox.get_amount_bounding_box_all_classes(gt_bbs, reverse=True)
# general_utils.plot_bb_per_classes(dict_bbs_per_class, horizontally=False, rotation=90, show=True, extra_title=' (groundtruths)')

# dict_bbs_per_class = BoundingBox.get_amount_bounding_box_all_classes(det_bbs, reverse=True)
# general_utils.plot_bb_per_classes(dict_bbs_per_class, horizontally=False, rotation=90, show=True, extra_title=' (detections)')

#############################################################
# EVALUATE WITH COCO METRICS
#############################################################
coco_res1 = coco_evaluator.get_coco_summary(gt_bbs, det_bbs)
coco_res2 = coco_evaluator.get_coco_metrics(gt_bbs, det_bbs)
#############################################################
# EVALUATE WITH VOC PASCAL METRICS
#############################################################
ious = [0.5, 0.75]
voc_res = {}
for iou in ious:
    voc_res[iou], mAP = pascal_voc_evaluator.get_pascalvoc_metrics(
        gt_bbs,
        det_bbs,
        iou,
        generate_table=True,
        method=MethodAveragePrecision.EVERY_POINT_INTERPOLATION)
    pascal_voc_evaluator.plot_precision_recall_curve(
        voc_res[iou], showInterpolatedPrecision=True, showAP=True)