def test_is_FN_incremented_properly_if_no_prediction(self):
     mAP = DetectionMAP(3)
     mAP.evaluate(np.array([]), np.array([]), np.array([]), self.gt,
                  self.gt_cls)
     self.assertEqual(mAP.total_accumulators[0][0].FN, 2)
     self.assertEqual(mAP.total_accumulators[0][1].FN, 1)
     self.assertEqual(mAP.total_accumulators[0][2].FN, 1)
    def test_is_FP_incremented_properly_when_away_from_gt(self):
        mAP = DetectionMAP(3)
        mAP.evaluate(self.pred, self.cls, self.conf, self.gt, self.gt_cls)

        self.assertEqual(mAP.total_accumulators[0][0].FP, 3)
        self.assertEqual(mAP.total_accumulators[0][1].FP, 1)
        self.assertEqual(mAP.total_accumulators[0][2].FP, 0)
    def test_is_TP_incremented_properly(self):
        mAP = DetectionMAP(3)
        mAP.evaluate(self.pred, self.cls, self.conf, self.gt, self.gt_cls)

        self.assertEqual(mAP.total_accumulators[0][0].TP, 1)
        self.assertEqual(mAP.total_accumulators[0][1].TP, 0)
        self.assertEqual(mAP.total_accumulators[0][2].TP, 1)
    def test_is_FP_incremented_properly_when_away_from_gt(self):
        mAP = DetectionMAP(3)
        mAP.evaluate(self.pred, self.cls, self.conf, self.gt, self.gt_cls)

        self.assertEqual(mAP.total_accumulators[0][0].FP, 3)
        self.assertEqual(mAP.total_accumulators[0][1].FP, 1)
        self.assertEqual(mAP.total_accumulators[0][2].FP, 0)
    def test_is_TP_incremented_properly(self):
        mAP = DetectionMAP(3)
        mAP.evaluate(self.pred, self.cls, self.conf, self.gt, self.gt_cls)

        self.assertEqual(mAP.total_accumulators[0][0].TP, 1)
        self.assertEqual(mAP.total_accumulators[0][1].TP, 0)
        self.assertEqual(mAP.total_accumulators[0][2].TP, 1)
示例#6
0
 def test_is_FP_incremented_properly_when_away_from_gt(self):
     IoU = DetectionMAP.compute_IoU(self.pred, self.gt, self.conf, 0)
     qty = DetectionMAP.compute_false_positive(self.cls, self.conf, 0, self.gt_cls, IoU, 0)
     self.assertEqual(qty, 4)
     qty = DetectionMAP.compute_false_positive(self.cls, self.conf, 0, self.gt_cls, IoU, 1)
     self.assertEqual(qty, 2)
     qty = DetectionMAP.compute_false_positive(self.cls, self.conf, 0, self.gt_cls, IoU, 2)
     self.assertEqual(qty, 0)
示例#7
0
 def test_is_TP_incremented_properly(self):
     IoU = DetectionMAP.compute_IoU(self.pred, self.gt, self.conf, 0)
     qty = DetectionMAP.compute_true_positive(self.cls, self.gt_cls, IoU, 0)
     self.assertEqual(qty, 1)
     qty = DetectionMAP.compute_true_positive(self.cls, self.gt_cls, IoU, 1)
     self.assertEqual(qty, 0)
     qty = DetectionMAP.compute_true_positive(self.cls, self.gt_cls, IoU, 2)
     self.assertEqual(qty, 1)
示例#8
0
 def test_is_FN_incremented_properly_if_no_prediction(self):
     IoU = None
     qty = DetectionMAP.compute_false_negatives(np.array([]), self.gt_cls, IoU, 0)
     self.assertEqual(qty, 2)
     qty = DetectionMAP.compute_false_negatives(np.array([]), self.gt_cls, IoU, 1)
     self.assertEqual(qty, 1)
     qty = DetectionMAP.compute_false_negatives(np.array([]), self.gt_cls, IoU, 2)
     self.assertEqual(qty, 1)
示例#9
0
 def test_is_FN_incremented_properly(self):
     IoU = DetectionMAP.compute_IoU(self.pred, self.gt, self.conf, 0)
     qty = DetectionMAP.compute_false_negatives(self.cls, self.gt_cls, IoU, 0)
     self.assertEqual(qty, 1)
     qty = DetectionMAP.compute_false_negatives(self.cls, self.gt_cls, IoU, 1)
     self.assertEqual(qty, 1)
     qty = DetectionMAP.compute_false_negatives(self.cls, self.gt_cls, IoU, 2)
     self.assertEqual(qty, 0)
def calculate_metrics(eval_data, model) -> tuple:
    mAP = DetectionMAP(1)
    false_negative = 0
    inference_times = []
    detections = DataFrame()

    for index, (image, labels) in enumerate(eval_data):
        t1 = time()
        boxes, scores, classes, nums = model.predict(image)
        t2 = time()
        inference_times += [t2 - t1]
        labels = zero_filter(labels)
        # get first 4 cols from tensor and compute iou
        pred, pred_cls, pred_conf, gt_bb, gt_cls = [], [], [], [], []
        for i, truth in enumerate(labels[:, :5]):
            if (tf.reduce_sum(truth).numpy() != 0
                    or tf.reduce_sum(boxes[0][i]) != 0
                    or tf.reduce_sum(scores[0][i]) != 0):
                pred += [boxes[0][i].numpy()]
                pred_cls += [1]
                pred_conf += [scores[0][i].numpy()]
                gt_bb += [truth[:4].numpy()]
                gt_cls += [truth[4:5].numpy()[0]]

                iou = bb_intersection_over_union(boxes[0][i], truth).numpy()
                detection = {
                    "image": [index],
                    "confidence": [scores[0][i].numpy()],
                }
                if scores[0][i] >= 0.5 and iou >= 0.5:
                    detection["TP"] = [1]
                    detection["FP"] = [0]
                elif iou < 0.5:
                    detection["TP"] = [0]
                    detection["FP"] = [1]
                elif tf.reduce_sum(truth).numpy() != 0:
                    false_negative += 1
                detections = detections.append(DataFrame(detection))
        if len(pred) != 0:
            mAP.evaluate(*[
                np.array(pred),
                np.array(pred_cls),
                np.array(pred_conf),
                np.array(gt_bb),
                np.array(gt_cls),
            ])

    detections = detections.sort_values("confidence", ascending=False)
    detections["Acc TP"] = detections["TP"].cumsum()
    detections["Acc FP"] = detections["FP"].cumsum()
    detections["Precision"] = detections["Acc TP"] / (detections["Acc TP"] +
                                                      detections["Acc FP"])
    detections["Recall"] = detections["Acc TP"] / (detections["Acc TP"] +
                                                   false_negative)
    return detections, mAP
示例#11
0
def evaluate_map(frames,nb_class):
    mAP = DetectionMAP(nb_class)
    for frame in frames:
        mAP.evaluate(*frame)
    mAP.plot()
    plt.savefig('./eval.jpg')
    mean_average_precision = []
    for i in range(nb_class):
        precision, recalls = mAP.compute_precision_recall_(i, True)
        average_precision = mAP.compute_ap(precision, recalls)
        mean_average_precision.append(average_precision)
    print(mean_average_precision)
    print("Mean average precision : {:0.2f}".format(sum(mean_average_precision)/len(mean_average_precision)))
示例#12
0
def calc_mAP(imgs, annot, model, writer, it, local_it):
    regressBoxes = BBoxTransform()
    clipBoxes = ClipBoxes()
    orig_train = model.training
    model.eval()
    threshold = 0.2
    iou_threshold = 0.2
    with torch.no_grad():
        features, regression, classification, anchors = model.model(imgs)
        # get max. confidence
        # there are batch_size out dicts
        out = postprocess(imgs, anchors, regression, classification,
                          regressBoxes, clipBoxes, threshold, iou_threshold)
    mAP_list = []
    for im_idx in range(len(out)):  # iterate through images
        # (1) check ground truth
        curr_annot = annot[im_idx, :, :]  # e.g. (8,5)
        curr_annot = curr_annot[curr_annot[..., 0] != -1.]  # e.g. (6,5)
        gt_boxes = curr_annot[:, :4]
        gt_cls = curr_annot[:, 4]
        if gt_cls.shape[0] == 0:
            continue
        # (2) check prediction
        out_ = out[im_idx]
        pred_boxes = out_['rois']
        pred_classes = out_['class_ids']
        pred_scores = out_['scores']
        curr_img = imgs[im_idx]
        # (3) build map tuple
        map_tuple = tuple()
        map_tuple += (pred_boxes / curr_img.shape[1], )
        map_tuple += (pred_classes, )
        map_tuple += (pred_scores, )
        map_tuple += (gt_boxes.cpu() / curr_img.shape[2], )
        map_tuple += (gt_cls.cpu(), )
        if map_tuple is not None:
            mAP_list.append(map_tuple)
    mAP = DetectionMAP(3)
    overall_mAP = []
    classwise_mAP = []
    for mAP_item in mAP_list:
        mAP.evaluate(*mAP_item)
        ov_, cls_ = mAP.map(class_names=["vehicle", "pedestrian", "cyclist"])
        overall_mAP.append(ov_)
        classwise_mAP.append(cls_)
    key_arrays = {}
    for item in classwise_mAP:
        for k, v in item.items():
            key_arrays.setdefault(k, []).append(v)
    ave = {
        k: reduce(lambda x, y: x + y, v) / len(v)
        for k, v in key_arrays.items()
    }

    if len(overall_mAP) > 0:
        writer.add_scalars('mAP', {'val': np.mean(overall_mAP)}, it)
    for k in ave.keys():
        writer.add_scalars('val mAP {}'.format(k), {'val': ave[k]}, it)
    if orig_train:
        model.train()
    return np.mean(overall_mAP)
 def test_is_iou_thresholded(self):
     IoU = DetectionMAP.compute_IoU_mask(self.pred, self.gt, 0.7)
     valid_IoU = np.argwhere(IoU)
     np.testing.assert_equal(valid_IoU,
                             np.array([[0, 0], [1, 0], [3, 2], [5, 3]]))
 def test_is_FN_incremented_properly_if_no_prediction(self):
     mAP = DetectionMAP(3)
     mAP.evaluate(np.array([]), np.array([]), np.array([]), self.gt, self.gt_cls)
     self.assertEqual(mAP.total_accumulators[0][0].FN, 2)
     self.assertEqual(mAP.total_accumulators[0][1].FN, 1)
     self.assertEqual(mAP.total_accumulators[0][2].FN, 1)
 def test_is_iou_thresholded(self):
     IoU = DetectionMAP.compute_IoU_mask(self.pred, self.gt, 0.7)
     valid_IoU = np.argwhere(IoU)
     np.testing.assert_equal(valid_IoU, np.array([[0, 0], [1, 0], [3, 2], [5, 3]]))
示例#16
0
    # results = read_json_file("/Users/Darragh/College/Dissertation/mean_average_precision/test_files/mobile_results_newnew.txt")
    # other_results = read_other_file("/Users/Darragh/College/Dissertation/mean_average_precision/test_files/od-test_gt_new.txt")
    # results = read_json_file(
    #     "/Users/Darragh/College/Dissertation/mean_average_precision/test_files/pred.txt")
    other_results = read_other_file(
        "/Users/Darragh/College/Dissertation/mean_average_precision/test_files/gt.txt"
    )
    # other_results = read_other_file(
    #     "../test_files/filtered_gt.txt")

    # frames = [(pre_bb1, pre_cls1, pre_conf1, pre_bb2, pre_cls2)]
    n_class = 3

    # frames = convert_all_frames(results, other_results, convert_results(results))

    frames = no_gt_filter(results, other_results)

    mAP = DetectionMAP(n_class)
    for i, frame in enumerate(frames):
        print("Evaluate frame {}".format(i))
        if (i == 213):
            print("here")

        if (i == 123):
            show_frame(*frame)
        mAP.evaluate(*frame)

    mAP.plot()
    plt.show()
    #plt.savefig("pr_curve_example.png")
示例#17
0
    def evaluate(self,
                 generator,
                 iou_threshold=0.3,
                 score_threshold=0.3,
                 max_detections=100,
                 save_path=None):
        """ Evaluate a given dataset using a given model.
        code originally from https://github.com/fizyr/keras-retinanet

        # Arguments
            generator       : The generator that represents the dataset to evaluate.
            model           : The model to evaluate.
            iou_threshold   : The threshold used to consider when a detection is positive or negative.
            score_threshold : The score confidence threshold to use for detections.
            max_detections  : The maximum number of detections to use per image.
            save_path       : The path to save images with visualized detections to.
        # Returns
            A dict mapping class names to mAP scores.
        """
        # gather all detections and annotations
        all_detections = [[None for i in range(generator.num_classes())]
                          for j in range(generator.size())]
        all_annotations = [[None for i in range(generator.num_classes())]
                           for j in range(generator.size())]

        mAP = DetectionMAP(4, 100, 0.3)  # Initialise metric

        timeHistory = []
        for i in range(generator.size()):
            raw_image = generator.load_image(i)
            raw_height, raw_width, raw_channels = raw_image.shape

            # make the boxes and the labels
            start = time.time()
            pred_boxes = self.predict(raw_image)
            timeHistory.append(time.time() - start)

            score = np.array([box.score for box in pred_boxes])
            pred_labels = np.array([box.label for box in pred_boxes])

            if len(pred_boxes) > 0:
                pred_boxes = np.array([[
                    box.xmin * raw_width, box.ymin * raw_height,
                    box.xmax * raw_width, box.ymax * raw_height, box.score
                ] for box in pred_boxes])
            else:
                pred_boxes = np.array([[]])

            # sort the boxes and the labels according to scores
            score_sort = np.argsort(-score)
            pred_labels = pred_labels[score_sort]
            pred_boxes = pred_boxes[score_sort]

            # copy detections to all_detections
            for label in range(generator.num_classes()):
                all_detections[i][label] = pred_boxes[pred_labels == label, :]

            annotations = generator.load_annotation(i)

            # copy detections to all_annotations
            for label in range(generator.num_classes()):
                all_annotations[i][label] = annotations[annotations[:, 4] ==
                                                        label, :4].copy()

            # #===================================================
            # # INICIO PR

            # print(pred_boxes[:,0:4])
            # print(pred_labels)
            # print(score)
            # print(annotations[:,0:4])
            # print(annotations[:,4])

            mAP.evaluate(pred_boxes[:, 0:4], pred_labels, score,
                         annotations[:, 0:4], annotations[:, 4])

            # # FINAL PR
            # #===================================================

        # compute mAP by comparing all detections and all annotations
        average_precisions = {}

        for label in range(generator.num_classes()):
            false_positives = np.zeros((0, ))
            true_positives = np.zeros((0, ))
            scores = np.zeros((0, ))
            num_annotations = 0.0

            for i in range(generator.size()):
                detections = all_detections[i][label]
                annotations = all_annotations[i][label]
                num_annotations += annotations.shape[0]
                detected_annotations = []

                for d in detections:
                    scores = np.append(scores, d[4])

                    if annotations.shape[0] == 0:
                        false_positives = np.append(false_positives, 1)
                        true_positives = np.append(true_positives, 0)
                        continue

                    overlaps = compute_overlap(np.expand_dims(d, axis=0),
                                               annotations)
                    assigned_annotation = np.argmax(overlaps, axis=1)
                    max_overlap = overlaps[0, assigned_annotation]

                    if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
                        false_positives = np.append(false_positives, 0)
                        true_positives = np.append(true_positives, 1)
                        detected_annotations.append(assigned_annotation)
                    else:
                        false_positives = np.append(false_positives, 1)
                        true_positives = np.append(true_positives, 0)

            # no annotations -> AP for this class is 0 (is this correct?)
            if num_annotations == 0:
                average_precisions[label] = 0
                continue

            # sort by score
            indices = np.argsort(-scores)
            false_positives = false_positives[indices]
            true_positives = true_positives[indices]

            # compute false positives and true positives
            false_positives = np.cumsum(false_positives)
            true_positives = np.cumsum(true_positives)

            # compute recall and precision
            recall = true_positives / num_annotations
            precision = true_positives / np.maximum(
                true_positives + false_positives,
                np.finfo(np.float64).eps)

            # compute average precision
            average_precision = compute_ap(recall, precision)
            average_precisions[label] = average_precision

        mAP.myPlot(self.labels)
        plt.show()

        return average_precisions, timeHistory
示例#18
0
                     [0.59, 0.24, 1.0, 0.63],
                     [0.55, 0.24, 0.33, 0.7],
                     [0.12, 0.21, 0.31, 0.39],
                     [0.1240625, 0.2109375, 0.859375, 0.39453125],
                     [2.86702722e-01, 5.87677717e-01, 3.90843153e-01, 7.14454949e-01],
                     [2.87590116e-01, 8.76132399e-02, 3.79709303e-01, 2.05121845e-01]])
pred_cls3 = np.array(
    [0, 0, 0, 0, 0, 1, 1, 2, 2])
pred_conf3 = np.array([0.75, 0.90, 0.9, 0.9, 0.5, 0.84,
                       0.1, 0.2363426, 0.02707205])
gt_bb3 = np.array([[0.74609375, 0.58007812, 1.05273438, 0.83007812],
                   [0.57226562, 0.234375, 1.14453125, 0.62890625],
                   [0.1240625, 0.2109375, 0.329375, 0.39453125]])
gt_cls3 = np.array([0, 0, 1])

if __name__ == '__main__':
    frames = [(pred_bb1, pred_cls1, pred_conf1, gt_bb1, gt_cls1),
              (pred_bb2, pred_cls2, pred_conf2, gt_bb2, gt_cls2),
              (pred_bb3, pred_cls3, pred_conf3, gt_bb3, gt_cls3)]
    n_class = 4

    mAP = DetectionMAP(n_class)
    for i, frame in enumerate(frames):
        print("Evaluate frame {}".format(i))
        show_frame(*frame)
        mAP.evaluate(*frame)

    mAP.plot()
    plt.show()
    #plt.savefig("pr_curve_example.png")
示例#19
0
        pred_cls = np.array(pred[2], dtype=np.int16)
        pred_conf = np.array(pred[3], dtype=np.float32)

        gt_bb = []
        gt_cls = []
        with open(os.path.join(args.gt_dir, gtfname) + '.txt', 'r') as f:
            for line in f:
                line = line.split()
                gt_bb.append(list(map(float, line[1:5])))
                gt_cls.append(CLASS_ID[line[0]])
        gt_bb = np.array(gt_bb, dtype=np.float32)
        bb_normalize(gt_bb, file_id)
        gt_cls = np.array(gt_cls, dtype=np.int16)

        frames.append([file_id, (pred_bb, pred_cls, pred_conf, gt_bb, gt_cls)])

    n_class = 2

    mAP = DetectionMAP(n_class, overlap_threshold=0.7)
    for i, frame in enumerate(frames):
        print("Evaluate frame {}".format(i))
        img = cv2.imread(
            os.path.join(args.gt_dir, '..', 'frames', frame[0]) + '.jpg',
            cv2.IMREAD_COLOR)
        img = img[:, :, [2, 1, 0]]
        # show_frame(*frame[1], background=img)
        mAP.evaluate(*frame[1])

    mAP.plot()
    plt.show()