コード例 #1
0
def test_exact_iou():

    #One box entire inside another
    prediction = box(minx=0, miny=0, maxx=10, maxy=10)
    truth = box(minx=0, miny=0, maxx=5, maxy=5)
    source_iou = average_precision.IoU(truth, prediction)

    assert source_iou == 25 / 100

    true_array = np.expand_dims(np.array(truth.bounds), axis=0)
    prediction_array = np.expand_dims(np.array([*prediction.bounds]), axis=0)
    retinanet_iou = compute_overlap(prediction_array, true_array)

    assert retinanet_iou[0][0] == source_iou

    #Poorly overlapping
    prediction = box(minx=0, miny=0, maxx=5, maxy=5)
    truth = box(minx=4, miny=4, maxx=9, maxy=9)
    source_iou = average_precision.IoU(truth, prediction)

    assert source_iou == (1 / 49)

    true_array = np.expand_dims(np.array(truth.bounds), axis=0)
    prediction_array = np.expand_dims(np.array([*prediction.bounds]), axis=0)
    retinanet_iou = compute_overlap(prediction_array, true_array)

    assert retinanet_iou == source_iou
コード例 #2
0
def compute_measures(generator, label, iou_threshold, all_detections, all_annotations):
    false_positives = np.zeros((0,))
    true_positives  = np.zeros((0,))
    scores          = np.zeros((0,))
    num_annotations = 0.0
    
    for i in range(generator.size()):
        detections           = all_detections[i][label]
        annotations          = all_annotations[i][label]
        num_annotations     += annotations.shape[0]
        detected_annotations = []

        sort_by_score_index = detections[:,4].argsort()[::-1]
        for d in detections[sort_by_score_index]:
            scores = np.append(scores, d[4])

            if num_annotations == 0:
                false_positives = np.append(false_positives, 1)
                true_positives  = np.append(true_positives, 0)
                continue

            overlaps            = compute_overlap(np.expand_dims(d, axis=0), annotations)
            assigned_annotation = np.argmax(overlaps, axis=1) if overlaps.size else 0
            max_overlap         = overlaps[0, assigned_annotation] if overlaps.size else 0

            if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
                false_positives = np.append(false_positives, 0)
                true_positives  = np.append(true_positives, 1)
                detected_annotations.append(assigned_annotation)
            else:
                false_positives = np.append(false_positives, 1)
                true_positives  = np.append(true_positives, 0)

    return true_positives, false_positives, scores, num_annotations
コード例 #3
0
def retinant_true_positive(from_retinanet):
    all_detections, all_annotations, validation_generator = from_retinanet
    iou_threshold = 0.5
    boxes = []

    ##Retinanet
    #walk through entire eval to find why the next test fails
    # process detections and annotations
    for label in range(validation_generator.num_classes()):
        if not validation_generator.has_label(label):
            continue

        false_positives = np.zeros((0, ))
        true_positives = np.zeros((0, ))
        scores = np.zeros((0, ))
        num_annotations = 0.0
        iou_score = []

        for i in range(validation_generator.size()):
            detections = all_detections[i][label]
            annotations = all_annotations[i][label]
            num_annotations += annotations.shape[0]
            detected_annotations = []

            for d in detections:
                scores = np.append(scores, d[4])
                boxes.append(box(*d[:4]))

                if annotations.shape[0] == 0:
                    false_positives = np.append(false_positives, 1)
                    true_positives = np.append(true_positives, 0)
                    continue

                overlaps = compute_overlap(np.expand_dims(d, axis=0),
                                           annotations)
                assigned_annotation = np.argmax(overlaps, axis=1)
                max_overlap = overlaps[0, assigned_annotation]
                iou_score.append(max_overlap)

                if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
                    false_positives = np.append(false_positives, 0)
                    true_positives = np.append(true_positives, 1)
                    detected_annotations.append(assigned_annotation)
                else:
                    false_positives = np.append(false_positives, 1)
                    true_positives = np.append(true_positives, 0)

    result = pd.DataFrame({
        "box": boxes,
        "match": true_positives,
        "iou": iou_score
    })

    return result
コード例 #4
0
def average_overlap(values,
                    entries,
                    state,
                    image_shape,
                    mode="focal",
                    ratio_count=3,
                    include_stride=False):
    anchor_params = calculate_config(values, ratio_count)

    if include_stride:
        anchors = anchors_for_shape(image_shape, anchor_params=anchor_params)
    else:
        anchors = base_anchors_for_shape(anchor_params=anchor_params)

    overlap = compute_overlap(entries, anchors)
    max_overlap = np.amax(overlap, axis=1)
    not_matched = len(np.where(max_overlap < 0.5)[0])

    if mode == "avg":
        result = 1 - np.average(max_overlap)
    elif mode == "ce":
        result = np.average(-np.log(max_overlap))
    elif mode == "focal":
        result = np.average(-(1 - max_overlap)**2 * np.log(max_overlap))
    else:
        raise Exception("Invalid mode.")

    if "iteration" not in state:
        state["iteration"] = 0
    else:
        state["iteration"] += 1

    if result < state["best_result"]:
        state["best_result"] = result

        print(f"Current best anchor configuration {result}",
              state["iteration"])
        print(f"Ratios: {sorted(np.round(anchor_params.ratios, 3))}")
        print(f"Scales: {sorted(np.round(anchor_params.scales, 3))}")

        if include_stride:
            print(f"Average overlap: {np.round(np.average(max_overlap), 3)}")

        print(
            f"Number of labels that don't have any matching anchor: {not_matched}"
        )
        print()

    return result, not_matched
コード例 #5
0
        for i in range(generator.size()-9400):
>>>>>>> 19c53023491d92692b7d44f1343b74e2d6623f42
            detections           = all_detections[i][label]
            annotations          = all_annotations[i][label]
            num_annotations     += annotations.shape[0]
            detected_annotations = []

            for d in detections:
                scores = np.append(scores, d[4])

                if annotations.shape[0] == 0:
                    false_positives = np.append(false_positives, 1)
                    true_positives  = np.append(true_positives, 0)
                    continue

                overlaps            = compute_overlap(np.expand_dims(d, axis=0), annotations)
                assigned_annotation = np.argmax(overlaps, axis=1)
                max_overlap         = overlaps[0, assigned_annotation]

                if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
                    false_positives = np.append(false_positives, 0)
                    true_positives  = np.append(true_positives, 1)
                    detected_annotations.append(assigned_annotation)
                else:
                    false_positives = np.append(false_positives, 1)
                    true_positives  = np.append(true_positives, 0)

        # no annotations -> AP for this class is 0 (is this correct?)
        if num_annotations == 0:
            average_precisions[label] = 0, 0
            continue
コード例 #6
0
ファイル: evalmAP.py プロジェクト: fvangef/DeepLidar
def evaluate(generator,
             model,
             iou_threshold=0.5,
             score_threshold=0.05,
             max_detections=200,
             save_path=None,
             experiment=None):
    """ Evaluate the mAP for given dataset using a given model.

    # Arguments
        generator       : The generator that represents the dataset to evaluate.
        model           : The model to evaluate.
        iou_threshold   : The threshold used to consider when a detection is positive or negative.
        score_threshold : The score confidence threshold to use for detections.
        max_detections  : The maximum number of detections to use per image.
        save_path       : The path to save images with visualized detections to.
        experiment     : Comet ml experiment to evaluate
    # Returns
        A dict mapping class names to mAP scores.
    """

    # gather all detections and annotations
    all_detections = _get_detections(generator,
                                     model,
                                     score_threshold=score_threshold,
                                     max_detections=max_detections,
                                     save_path=save_path,
                                     experiment=experiment)
    all_annotations = _get_annotations(generator)
    average_precisions = {}

    # all_detections = pickle.load(open('all_detections.pkl', 'rb'))
    # all_annotations = pickle.load(open('all_annotations.pkl', 'rb'))
    # pickle.dump(all_detections, open('all_detections.pkl', 'wb'))
    # pickle.dump(all_annotations, open('all_annotations.pkl', 'wb'))

    # process detections and annotations
    for label in range(generator.num_classes()):
        false_positives = np.zeros((0, ))
        true_positives = np.zeros((0, ))
        scores = np.zeros((0, ))
        num_annotations = 0.0

        for i in range(generator.size()):
            detections = all_detections[i][label]
            annotations = all_annotations[i][label]
            num_annotations += annotations.shape[0]
            detected_annotations = []

            try:
                _ = len(detections)
            except:
                print("No detections")
                continue

            for d in detections:
                scores = np.append(scores, d[4])

                if annotations.shape[0] == 0:
                    false_positives = np.append(false_positives, 1)
                    true_positives = np.append(true_positives, 0)
                    continue

                overlaps = compute_overlap(np.expand_dims(d, axis=0),
                                           annotations)
                assigned_annotation = np.argmax(overlaps, axis=1)
                max_overlap = overlaps[0, assigned_annotation]

                if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
                    false_positives = np.append(false_positives, 0)
                    true_positives = np.append(true_positives, 1)
                    detected_annotations.append(assigned_annotation)
                else:
                    false_positives = np.append(false_positives, 1)
                    true_positives = np.append(true_positives, 0)

        # no annotations -> AP for this class is 0 (is this correct?)
        if num_annotations == 0:
            average_precisions[label] = 0, 0
            continue

        # sort by score
        indices = np.argsort(-scores)
        false_positives = false_positives[indices]
        true_positives = true_positives[indices]

        # compute false positives and true positives
        false_positives = np.cumsum(false_positives)
        true_positives = np.cumsum(true_positives)

        # compute recall and precision
        recall = true_positives / num_annotations
        precision = true_positives / np.maximum(
            true_positives + false_positives,
            np.finfo(np.float64).eps)

        if len(recall) > 0:
            print(
                f"At score threshold {score_threshold}, the IoU recall is {recall[-1]} and precision is {precision[-1]}"
            )
        else:
            print("None of the annotations exceeded score threshold")

        # compute average precision
        average_precision = _compute_ap(recall, precision)
        average_precisions[label] = average_precision, num_annotations

    return average_precisions
コード例 #7
0
def test_iou():
    true_array = np.expand_dims(np.array([0., 0., 5., 5.]), axis=0)
    prediction_array = np.expand_dims(np.array([0., 0., 10., 10.]), axis=0)
    retinanet_iou = compute_overlap(prediction_array, true_array)
    assert retinanet_iou[0][0] == (5**2 / 10**2)
コード例 #8
0
def find_validation_score():
    from keras_retinanet.utils.anchors import compute_overlap

    iou_threshold = 0.5
    tc1, tc2 = get_class_name_mappings()
    d1, d2 = get_description_for_labels()
    all_files = glob.glob(DATASET_PATH + 'validation_big/*.jpg')
    all_ids = []
    for a in all_files:
        all_ids.append(os.path.basename(a)[:-4])
    print('Total image files: {} {}'.format(len(all_files), len(all_ids)))
    valid = pd.read_csv(DATASET_PATH +
                        'annotations/validation-annotations-bbox.csv')
    print('Number of files in annotations: {}'.format(
        len(valid['ImageID'].unique())))
    boxes_files = glob.glob(OUTPUT_PATH + 'cache_tensorflow_validation/*.pklz')
    print('Predictions found: {}'.format(len(boxes_files)))
    unique_classes = valid['LabelName'].unique()
    print('Unique classes: {}'.format(len(unique_classes)))

    print('Read detections...')
    all_detections = get_detections(boxes_files)
    print('Read annotations...')
    all_annotations = get_real_annotations(valid, unique_classes)

    average_precisions = {}
    for zz, label in enumerate(unique_classes):
        print('Go for: {} ({})'.format(d1[label], label))

        if label not in tc2:
            average_precisions[label] = 0, 1
            continue

        false_positives = np.zeros((0, ))
        true_positives = np.zeros((0, ))
        scores = np.zeros((0, ))
        num_annotations = 0.0

        for i in range(len(all_ids)):
            detections = []
            annotations = []
            id = all_ids[i]
            if id in all_detections:
                if label in all_detections[id]:
                    detections = all_detections[id][label]
            if id in all_annotations:
                if label in all_annotations[id]:
                    annotations = all_annotations[id][label]

            if len(detections) == 0 and len(annotations) == 0:
                continue

            if 0:
                if len(detections) > 0 and len(annotations) > 0:
                    print(detections)
                    print(annotations)
                    print('----')

            num_annotations += len(annotations)
            detected_annotations = []

            for d in detections:
                scores = np.append(scores, d[4])

                if len(annotations) == 0:
                    false_positives = np.append(false_positives, 1)
                    true_positives = np.append(true_positives, 0)
                    continue

                overlaps = compute_overlap(
                    np.expand_dims(np.array(d, dtype=np.float64), axis=0),
                    np.array(annotations, dtype=np.float64))
                assigned_annotation = np.argmax(overlaps, axis=1)
                max_overlap = overlaps[0, assigned_annotation]

                if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
                    false_positives = np.append(false_positives, 0)
                    true_positives = np.append(true_positives, 1)
                    detected_annotations.append(assigned_annotation)
                else:
                    false_positives = np.append(false_positives, 1)
                    true_positives = np.append(true_positives, 0)

        if num_annotations == 0:
            average_precisions[label] = 0, 0
            continue

        # sort by score
        indices = np.argsort(-scores)
        false_positives = false_positives[indices]
        true_positives = true_positives[indices]

        # compute false positives and true positives
        false_positives = np.cumsum(false_positives)
        true_positives = np.cumsum(true_positives)

        # compute recall and precision
        recall = true_positives / num_annotations
        precision = true_positives / np.maximum(
            true_positives + false_positives,
            np.finfo(np.float64).eps)

        # compute average precision
        average_precision = _compute_ap(recall, precision)
        average_precisions[label] = average_precision, num_annotations
        print(d1[label], average_precision, num_annotations)

    present_classes = 0
    precision = 0
    for label, (average_precision,
                num_annotations) in average_precisions.items():
        print('{:.0f} instances of class'.format(num_annotations), d1[label],
              'with average precision: {:.4f}'.format(average_precision))
        if num_annotations > 0:
            present_classes += 1
            precision += average_precision
    mean_ap = precision / present_classes
    print('mAP: {}'.format(mean_ap))
コード例 #9
0
    def evaluate(self):

        # Initialise Ground Truth data
        self.load_ground_truths()

        #Initialise Detections
        self.populate_detections()

        for l in range(self.generator.num_classes()):
            tp = np.zeros((0, ))
            fp = np.zeros((0, ))
            scores = np.zeros((0, ))
            npos = 0

            for i in range(self.generator.size()):
                dets = self.detections[i][l]
                gt = self.ground_truth[i][l]
                npos += gt.shape[0]
                detected_record = []

                for detection in dets:

                    # Append the score first as we might skip!
                    scores = np.append(scores, detection[4])

                    # Ensure shape.
                    if gt.shape[0] == 0:
                        fp = np.append(fp, 1)
                        tp = np.append(tp, 0)
                        continue

                    #Calculate overlap
                    overlaps = compute_overlap(
                        np.expand_dims(detection, axis=0), gt)
                    best_overlap = np.argmax(overlaps, axis=1)

                    if best_overlap not in detected_record and overlaps[
                            0, best_overlap] > self.iou_threshold:
                        # Only capture once
                        detected_record.append(best_overlap)

                        # Positive count.
                        fp = np.append(fp, 0)
                        tp = np.append(tp, 1)
                    else:
                        # Negative count.
                        fp = np.append(fp, 1)
                        tp = np.append(tp, 0)

            indices = np.argsort(-scores)
            tp = tp[indices]
            fp = fp[indices]

            tp_score = np.cumsum(tp)
            fp_score = np.cumsum(fp)

            recall = tp_score / npos
            precision = tp_score / np.maximum(tp_score + fp_score,
                                              np.finfo(np.float64).eps)

            ap = self._compute_ap(recall, precision)
            self.average_precisions = np.append(self.average_precisions, ap)

            print(self.generator.label_to_name(l), ap)
        print("mAP: {}".format(self.average_precisions.mean()))