示例#1
0
    def __init__(
        self,
        num_classes=1,
        metric_key="mAP",
        output_type="ssd",
        iou_threshold=0.5,
        confidence_threshold=0.5,
    ):
        """
        Args:
            num_classes (int): Number of classes.
                Default is ``1``.
            metric_key (str): name of a metric.
                Default is ``"mAP"``.
            output_type (str): model output type. Valid values are ``"ssd"`` or
                ``"centernet"`` or ``"yolo-x"``.
                Default is ``"ssd"``.
            iou_threshold (float): IoU threshold to use in NMS.
                Default is ``0.5``.
            confidence_threshold (float): confidence threshold,
                proposals with lover values than threshold will be ignored.
                Default is ``0.5``.
        """
        super().__init__(order=CallbackOrder.Metric)
        assert output_type in ("ssd", "centernet", "yolo-x")

        self.num_classes = num_classes
        self.metric_key = metric_key
        self.output_type = output_type
        self.iou_threshold = iou_threshold
        self.confidence_threshold = confidence_threshold

        self.metric_fn = MetricBuilder.build_evaluation_metric(
            "map_2d", async_mode=False, num_classes=num_classes)
示例#2
0
def calculate_AP(ovthresh,
                 recs,
                 detpath,
                 classname,
                 imagenames,
                 cal_type='bbox'):
    # read dets
    detfile = detpath.format(classname)
    with open(detfile, 'r') as f:
        lines = f.readlines()
    if any(lines) == 1:
        # exchange dets as key-value dic
        # 1. pred bbox with gt bbox; 2. bbox from pred four corners with gt bbox
        preds_dic = {}
        splitlines = [x.strip().split(' ') for x in lines]
        for x in splitlines:
            bbox_used = None
            if cal_type == 'bbox':
                bbox_used = x[2:6]
            elif cal_type == 'bbox_from_fc':
                bbox_used = change_four_corners_to_bbox(x[6:14])
            else:
                assert False, "wrong AP calculation type"
            if x[0] in preds_dic.keys():
                preds_dic[x[0]] = np.vstack(
                    (preds_dic[x[0]],
                     np.append(np.array([float(z) for z in bbox_used]),
                               [int(0), float(x[1])])))
            else:
                preds_dic[x[0]] = np.append(
                    np.array([float(z) for z in bbox_used]),
                    [int(0), float(x[1])])
                preds_dic[x[0]] = np.expand_dims(preds_dic[x[0]], axis=0)

        # create metric
        metric_fn = MetricBuilder.build_evaluation_metric("map_2d",
                                                          async_mode=True,
                                                          num_classes=1)
        # extract gt objects for this class
        for imagename in imagenames:
            R = [obj for obj in recs[imagename] if obj['name'] == classname]
            GT_bbox = np.array([x['bbox'] for x in R])
            GT_append = np.zeros((GT_bbox.shape[0], 3), dtype=np.int)
            GTs = np.hstack((GT_bbox, GT_append))
            if imagename not in preds_dic.keys():
                preds = np.array([[]])
            else:
                preds = preds_dic[imagename]
            metric_fn.add(preds, GTs)

        # metrics = metric_fn.value(iou_thresholds=ovthresh)
        # rec = metrics[ovthresh][0]['recall']
        # prec = metrics[ovthresh][0]['precision']
        # ap = metrics[ovthresh][0]['ap']
        metrics = metric_fn.value(iou_thresholds=np.arange(0.5, 1.0, 0.05),
                                  recall_thresholds=np.arange(0., 1.01, 0.01),
                                  mpolicy='soft')
        map = metrics["mAP"]

    return metrics, map
示例#3
0
    def __init__(
        self,
        model,
        data_root,
        img_size=448,
        conf_thresh=0.001,
        nms_thresh=0.5,
        is_07_subset=False,
        progressbar=False,
        eval_style='coco',
    ):

        super(VOCEvaluator, self).__init__(
            model=model, img_size=img_size, conf_thresh=conf_thresh, nms_thresh=nms_thresh,
        )

        self.is_07_subset = is_07_subset
        self.test_file = "test.txt" if not self.is_07_subset else "val.txt"
        self.val_data_path = data_root

        img_inds_file = os.path.join(
            data_root, "ImageSets", "Main", self.test_file
        )
        with open(img_inds_file, "r") as f:
            lines = f.readlines()
            self.img_inds = [line.strip() for line in lines]

        self.progressbar = progressbar
        self.eval_style = eval_style
        self.predictions = {}
        self.ground_truth_boxes = {}

        self.class_names = self._parse_gt_boxes()
        self.metric_fn = MetricBuilder.build_evaluation_metric("map_2d",
            async_mode=True, num_classes=len(self.class_names))
示例#4
0
    def __init__(self, n_classes):

        self.iou_thresholds = [0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75]

        self.AP = 0.0
        self.n_pictures = 0.0
        self.map_calc = MetricBuilder.build_evaluation_metric(
            "map_2d", async_mode=True, num_classes=n_classes - 1)
示例#5
0
 def __call__(self, preds, targs, num_classes):
     if self.remove_background_class:
         num_classes = num_classes - 1
     metric_fn = MetricBuilder.build_evaluation_metric(
         "map_2d", async_mode=True, num_classes=num_classes)
     for sample_preds, sample_targs in self.create_metric_samples(
             preds, targs):
         metric_fn.add(sample_preds, sample_targs)
     metric_batch = metric_fn.value(iou_thresholds=self.iou_thresholds,
                                    recall_thresholds=np.arange(
                                        0., 1.01, 0.01),
                                    mpolicy='soft')['mAP']
     return metric_batch
import sys, os
import keras
import cv2
import numpy as np
import traceback
import xml.etree.ElementTree as ET
from src.keras_utils import load_model
from glob import glob
from os.path import splitext, basename
from src.utils import im2single
from src.keras_utils import load_model, detect_lp
from src.label import Shape, writeShapes
from mean_average_precision import MetricBuilder

print(MetricBuilder.get_metrics_list())

metric_fn = MetricBuilder.build_evaluation_metric("map_2d",
                                                  async_mode=False,
                                                  num_classes=1)

is_exibir_gt = True
bbox_color_gt = (0, 0, 255)


def adjust_pts(pts, lroi):
    return pts * lroi.wh().reshape((2, 1)) + lroi.tl().reshape((2, 1))


def point_str_to_tuple(point_str):
    pontos = point_str.split(',')
    return int(float(pontos[0])), int(float(pontos[1]))
示例#7
0
def voc_eval(detpath,
             annopath,
             imagesetfile,
             classname,
             cachedir,
             use_12_metric=True):
    """rec, prec, ap = voc_eval(detpath,
                           annopath,
                           imagesetfile,
                           classname,
                           [use_12_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
   detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
   annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[use_12_metric]: Whether to use VOC12's all points AP computation
   (default True)
"""
    # assumes detections are in detpath.format(classname)
    # assumes annotations are in annopath.format(imagename)
    # assumes imagesetfile is a text file with each line an image name
    # cachedir caches the annotations in a pickle file
    # first load gt
    if not os.path.isdir(cachedir):
        os.mkdir(cachedir)
    cachefile = os.path.join(cachedir, 'annots.pkl')
    # read list of images
    with open(imagesetfile, 'r') as f:
        lines = f.readlines()
    imagenames = [x.strip() for x in lines]
    if not os.path.isfile(cachefile):
        # load annots
        recs = {}
        for i, imagename in enumerate(imagenames):
            recs[imagename] = parse_rec(annopath % (imagename))
            if i % 100 == 0:
                print('Reading annotation for {:d}/{:d}'.format(
                    i + 1, len(imagenames)))
        # save
        print('Saving cached annotations to {:s}'.format(cachefile))
        with open(cachefile, 'wb') as f:
            pickle.dump(recs, f)
    else:
        # load
        with open(cachefile, 'rb') as f:
            recs = pickle.load(f)

    # create metric_fn
    metric_fn = MetricBuilder.build_evaluation_metric("map_2d",
                                                      async_mode=True,
                                                      num_classes=1)

    # read dets
    detfile = detpath.format(classname)
    with open(detfile, 'r') as f:
        lines = f.readlines()
    if any(lines) == 1:
        # exchange dets as key-value dic
        preds_dic = {}
        splitlines = [x.strip().split(' ') for x in lines]
        for x in splitlines:
            if x[0] in preds_dic.keys():
                preds_dic[x[0]] = np.vstack(
                    (preds_dic[x[0]],
                     np.append(np.array([float(z) for z in x[2:]]),
                               [int(0), float(x[1])])))
            else:
                preds_dic[x[0]] = np.append(
                    np.array([float(z) for z in x[2:]]),
                    [int(0), float(x[1])])
                preds_dic[x[0]] = np.expand_dims(preds_dic[x[0]], axis=0)

        # extract gt objects for this class
        for imagename in imagenames:
            R = [obj for obj in recs[imagename] if obj['name'] == classname]
            GT_bbox = np.array([x['bbox'] for x in R])
            GT_append = np.zeros((GT_bbox.shape[0], 3), dtype=np.int)
            GTs = np.hstack((GT_bbox, GT_append))
            if imagename not in preds_dic.keys():
                preds = np.array([[]])
            else:
                preds = preds_dic[imagename]
            metric_fn.add(preds, GTs)

        metrics = metric_fn.value(iou_thresholds=args.iou_thres)
        ap = metrics[args.iou_thres][0]['ap']
        rec = metrics[args.iou_thres][0]['recall']
        prec = metrics[args.iou_thres][0]['precision']
    else:
        rec = -1.
        prec = -1.
        ap = -1.

    return rec, prec, ap
示例#8
0
    def __init__(self,
                 data,
                 model,
                 input_shape=(416, 416),
                 learningrate=1e-3,
                 epoch=1,
                 classes_path='model_data/seabird_classes.txt',
                 anchors_path='model_data/tiny_yolo_anchors.txt',
                 load_pretrained_model=False,
                 pretrained_weights_path=None,
                 data_path=None,
                 data_root_path=None):

        self.init_epoch = 0
        self.epoch = epoch
        self._data = data
        self._model = model
        self.anchors_path = anchors_path
        self.classes_path = classes_path
        self.class_names = self._data.get_classes(self.classes_path)
        self.num_classes = len(self.class_names)
        self.anchors = self._data.get_anchors(self.anchors_path)
        self.input_shape = input_shape
        self.load_pretrained = load_pretrained_model
        self.lr = learningrate
        if data_path:
            self.lines_train, self.lines_val = self._data.read_training_data(
                data_root_path, data_path)
        else:
            self.lines_train, self.lines_val = None, None
        self.log_dir = 'logs/000/'
        self.logging = TensorBoard(log_dir=self.log_dir)
        self.checkpoint = ModelCheckpoint(
            self.log_dir +
            'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
            monitor='val_loss',
            save_weights_only=True,
            save_best_only=True,
            period=3)
        self.reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                           factor=0.1,
                                           patience=3,
                                           verbose=1)
        self.early_stopping = EarlyStopping(monitor='val_loss',
                                            min_delta=0,
                                            patience=10,
                                            verbose=1)
        self.local_model = self._model.build_model(
            self.anchors,
            self.num_classes,
            load_pretrained=self.load_pretrained,
            weights_path=pretrained_weights_path
        )  # model created, self._model.current_model
        self.metric_fn = MetricBuilder.build_evaluation_metric(
            "map_2d", async_mode=True, num_classes=self.num_classes)

        self.model_body = self._model.get_model_body()
        self.input_image_shape = K.placeholder(shape=(2, ))
        self.sess = K.get_session()
        self.boxes, self.scores, self.classes = yolo_eval(
            self.model_body.output,
            self.anchors,
            self.num_classes,
            self.input_image_shape,
            score_threshold=0.3,
            iou_threshold=0.5)

        self.logger = logging.getLogger('FedBird')