Пример #1
0
def eval(dataset, predictor, iou_threshold=0.5, use_2007_metric=False):
    true_case_stat, all_gt_boxes, all_difficult_cases = \
     group_annotation_by_class(dataset)

    results_per_class = dict()
    for i in interactive(range(len(dataset))):
        image = dataset.get_image(i)
        image_id = dataset.ids[i]
        boxes, labels, probs = predictor.predict(image, prob_threshold=0)

        for box, label, prob in zip(boxes, labels, probs):
            if label.item() not in results_per_class:
                results_per_class.update({label.item(): []})

            results_per_class[label.item()].append((image_id, box, prob))

    aps = []
    print("\n\nAverage precision per-class:")
    for class_index, class_name in enumerate(dataset.class_names):
        if class_index == 0:
            continue

        ap = compute_average_precision_per_class(
            true_case_stat[class_index], all_gt_boxes[class_index],
            all_difficult_cases[class_index], results_per_class[class_index],
            iou_threshold, use_2007_metric)
        aps.append(ap)

        print(f"{class_name}: {ap}")

    avg_ap = sum(aps) / len(aps)
    print(f"\nAverage Precision Across All Classes:{avg_ap}")

    return {"per class": aps, "rate": avg_ap}
Пример #2
0
def group_annotation_by_class(dataset):
    true_case_stat = {}
    all_gt_boxes = {}
    all_difficult_cases = {}

    input_bbox_converter = BboxFormatConvert(source_format=dataset.bbox_format,
                                             target_format='pascal_voc')

    print("Processing dataset...")
    for i in interactive(range(len(dataset))):
        image_id, annotation = dataset.get_annotation(i)
        gt_boxes, classes = annotation[:2]
        if len(annotation) > 2:
            is_difficult = annotation[2]
        else:
            is_difficult = [False] * len(classes)

        gt_boxes = input_bbox_converter(image=dataset.get_image(i),
                                        bboxes=gt_boxes)["bboxes"]
        gt_boxes = torch.tensor(gt_boxes)

        for i, difficult in enumerate(is_difficult):
            class_index = int(classes[i])
            gt_box = gt_boxes[i]
            if not difficult:
                true_case_stat[class_index] = \
                 true_case_stat.get(class_index, 0) + 1

            if class_index not in all_gt_boxes:
                all_gt_boxes[class_index] = {}
            if image_id not in all_gt_boxes[class_index]:
                all_gt_boxes[class_index][image_id] = []
            all_gt_boxes[class_index][image_id].append(gt_box)

            if class_index not in all_difficult_cases:
                all_difficult_cases[class_index] = {}
            if image_id not in all_difficult_cases[class_index]:
                all_difficult_cases[class_index][image_id] = []
            all_difficult_cases[class_index][image_id].append(difficult)

    for class_index in all_gt_boxes:
        for image_id in all_gt_boxes[class_index]:
            all_gt_boxes[class_index][image_id] = torch.stack(
                all_gt_boxes[class_index][image_id])

    return true_case_stat, all_gt_boxes, all_difficult_cases
Пример #3
0
def eval(dataset,
         predictor,
         iou_threshold=0.5,
         metric_score_threshold=0,
         use_2007_metric=False):
    true_case_stat, all_gt_boxes, all_difficult_cases = \
     group_annotation_by_class(dataset)

    results_per_class = dict()
    for i in interactive(range(len(dataset))):
        image = dataset.get_image(i)
        image_id = dataset.ids[i]
        boxes, labels, probs = predictor.predict(image, prob_threshold=0)

        for box, label, prob in zip(boxes, labels, probs):
            if label.item() not in results_per_class:
                results_per_class.update({label.item(): []})

            results_per_class[label.item()].append((image_id, box, prob))

    aps = []
    avg_ious = []
    tps = []
    fps = []
    fns = []
    print("\n\nAverage precision per-class:")
    for class_index, class_name in enumerate(dataset.class_names):
        if class_index == 0:
            continue

        ap, avg_iou, tp, fp, fn = compute_average_precision_per_class(
            true_case_stat[class_index], all_gt_boxes[class_index],
            all_difficult_cases[class_index], results_per_class[class_index],
            iou_threshold, metric_score_threshold, use_2007_metric)
        aps.append(ap)
        avg_ious.append(avg_iou)
        tps.append(tp)
        fps.append(fp)
        fns.append(fn)

        print(f"{class_name}: {ap}, TP: {tp}, FP: {fp}, FN: {fn}, "
              f"mean IOU: {avg_iou}")

    print(f"\nAverage Precision Across All Classes:{sum(aps)/len(aps)}")
Пример #4
0
def eval(dataset, predictor):
    gt_coco = {
        "licenses": {
            "name": "",
            "id": 0,
            "url": ""
        },
        "images": [],
        "annotations": [],
        "categories": []
    }

    for i, c in enumerate(dataset.class_names):
        gt_coco["categories"].append(create_coco_category(i, c))

    dt_coco = {
        "licenses": gt_coco["licenses"],
        "annotations": [],
        "categories": gt_coco["categories"]
    }

    input_bbox_converter = BboxFormatConvert(source_format=dataset.bbox_format,
                                             target_format='coco')

    output_bbox_converter = BboxFormatConvert(source_format='pascal_voc',
                                              target_format='coco')

    for i in interactive(range(len(dataset))):
        sample = dataset[i]
        image = sample['image']
        height, width = image.shape[:2]

        image_record = create_coco_image_record(i, (width, height))
        gt_coco["images"].append(image_record)

        coco_sample = input_bbox_converter(**sample)
        boxes = coco_sample['bboxes']
        labels = coco_sample['category_id']
        scores = [1 for _ in labels]
        gt_anns = create_coco_annotations(i, boxes, labels, scores)
        gt_coco["annotations"].extend(gt_anns)

        boxes, labels, probs = predictor.predict(image, prob_threshold=0)
        boxes = [b.tolist() for b in boxes]
        labels = labels.tolist()
        probs = probs.tolist()
        boxes = output_bbox_converter(image=image, bboxes=boxes)["bboxes"]

        dt_anns = create_coco_annotations(i, boxes, labels, probs)
        dt_coco["annotations"].extend(dt_anns)

    dt_coco.update({"images": gt_coco["images"]})

    gt_coco_obj = COCO()
    gt_coco_obj.dataset = gt_coco
    gt_coco_obj.createIndex()
    dt_coco_obj = COCO()
    dt_coco_obj.dataset = dt_coco
    dt_coco_obj.createIndex()

    e = COCOeval(gt_coco_obj, dt_coco_obj, iouType='bbox')

    e.evaluate()
    e.accumulate()

    e.summarize()

    result = [{
        "iou": [e.params.iouThrs[0], e.params.iouThrs[-1]],
        "area": [e.params.areaRng[0], e.params.areaRng[-1]],
        "max_dets": e.params.maxDets[2],
        "ap": e.stats[0]
    }, {
        "iou": 0.5,
        "area": [e.params.areaRng[0], e.params.areaRng[-1]],
        "max_dets": e.params.maxDets[2],
        "ap": e.stats[1]
    }, {
        "iou": 0.75,
        "area": [e.params.areaRng[0], e.params.areaRng[-1]],
        "max_dets": e.params.maxDets[2],
        "ap": e.stats[2]
    }, {
        "iou": [e.params.iouThrs[0], e.params.iouThrs[-1]],
        "area": e.params.areaRng[1],
        "max_dets": e.params.maxDets[2],
        "ap": e.stats[3]
    }, {
        "iou": [e.params.iouThrs[0], e.params.iouThrs[-1]],
        "area": e.params.areaRng[2],
        "max_dets": e.params.maxDets[2],
        "ap": e.stats[4]
    }, {
        "iou": [e.params.iouThrs[0], e.params.iouThrs[-1]],
        "area": e.params.areaRng[3],
        "max_dets": e.params.maxDets[2],
        "ap": e.stats[5]
    }, {
        "iou": [e.params.iouThrs[0], e.params.iouThrs[-1]],
        "area": [e.params.areaRng[0], e.params.areaRng[-1]],
        "max_dets": e.params.maxDets[0],
        "ar": e.stats[6]
    }, {
        "iou": [e.params.iouThrs[0], e.params.iouThrs[-1]],
        "area": [e.params.areaRng[0], e.params.areaRng[-1]],
        "max_dets": e.params.maxDets[1],
        "ar": e.stats[7]
    }, {
        "iou": [e.params.iouThrs[0], e.params.iouThrs[-1]],
        "area": [e.params.areaRng[0], e.params.areaRng[-1]],
        "max_dets": e.params.maxDets[2],
        "ar": e.stats[8]
    }, {
        "iou": [e.params.iouThrs[0], e.params.iouThrs[-1]],
        "area": e.params.areaRng[1],
        "max_dets": e.params.maxDets[2],
        "ar": e.stats[9]
    }, {
        "iou": [e.params.iouThrs[0], e.params.iouThrs[-1]],
        "area": e.params.areaRng[2],
        "max_dets": e.params.maxDets[1],
        "ar": e.stats[10]
    }, {
        "iou": [e.params.iouThrs[0], e.params.iouThrs[-1]],
        "area": e.params.areaRng[3],
        "max_dets": e.params.maxDets[2],
        "ar": e.stats[11]
    }]

    return result
Пример #5
0
def eval(dataset, predictor):
    gt_coco = {
        "licenses": {
            "name": "",
            "id": 0,
            "url": ""
        },
        "images": [],
        "annotations": [],
        "categories": []
    }

    for i, c in enumerate(dataset.class_names):
        gt_coco["categories"].append(create_coco_category(i, c))

    dt_coco = {
        "licenses": gt_coco["licenses"],
        "annotations": [],
        "categories": gt_coco["categories"]
    }

    input_bbox_converter = BboxFormatConvert(source_format=dataset.bbox_format,
                                             target_format='coco')

    output_bbox_converter = BboxFormatConvert(source_format='pascal_voc',
                                              target_format='coco')

    for i in interactive(range(len(dataset))):
        sample = dataset[i]
        image = sample['image']
        height, width = image.shape[:2]

        image_record = create_coco_image_record(i, (width, height))
        gt_coco["images"].append(image_record)

        coco_sample = input_bbox_converter(**sample)
        boxes = coco_sample['bboxes']
        labels = coco_sample['category_id']
        scores = [1 for _ in labels]
        gt_anns = create_coco_annotations(i, boxes, labels, scores)
        gt_coco["annotations"].extend(gt_anns)

        boxes, labels, probs = predictor.predict(image, prob_threshold=0)
        boxes = [b.tolist() for b in boxes]
        labels = labels.tolist()
        probs = probs.tolist()
        boxes = output_bbox_converter(image=image, bboxes=boxes)["bboxes"]

        dt_anns = create_coco_annotations(i, boxes, labels, probs)
        dt_coco["annotations"].extend(dt_anns)

    dt_coco.update({"images": gt_coco["images"]})

    gt_coco_obj = COCO()
    gt_coco_obj.dataset = gt_coco
    gt_coco_obj.createIndex()
    dt_coco_obj = COCO()
    dt_coco_obj.dataset = dt_coco
    dt_coco_obj.createIndex()

    eval = COCOeval(gt_coco_obj, dt_coco_obj, iouType='bbox')

    eval.evaluate()
    eval.accumulate()

    eval.summarize()

    result = {
        "Average Precision  (AP) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ]":
        eval.stats[0],
        "Average Precision  (AP) @[ IoU=0.50	  | area=   all | maxDets=100 ]":
        eval.stats[1],
        "Average Precision  (AP) @[ IoU=0.75	  | area=   all | maxDets=100 ]":
        eval.stats[2],
        "Average Precision  (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ]":
        eval.stats[3],
        "Average Precision  (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ]":
        eval.stats[4],
        "Average Precision  (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ]":
        eval.stats[5],
        "Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=  1 ]":
        eval.stats[6],
        "Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets= 10 ]":
        eval.stats[7],
        "Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ]":
        eval.stats[8],
        "Average Recall     (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ]":
        eval.stats[9],
        "Average Recall     (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ]":
        eval.stats[10],
        "Average Recall     (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ]":
        eval.stats[11]
    }

    return result