Exemplo n.º 1
0
                    k] == true_labels_list[m] and m not in detected:
                detected.append(m)
                true_positives.append(1)
            else:
                true_positives.append(0)

    num_predictions = len(true_positives)
    true_positives = np.array(true_positives)
    false_positives = np.ones_like(true_positives) - true_positives
    # sorted by score
    indices = np.argsort(-np.array(scores))
    false_positives = false_positives[indices]
    true_positives = true_positives[indices]
    # compute false positives and true positives
    false_positives = np.cumsum(false_positives)
    true_positives = np.cumsum(true_positives)
    # compute recall and precision
    recall = true_positives / np.maximum(num_annotations,
                                         np.finfo(np.float64).eps)
    precision = true_positives / np.maximum(num_predictions,
                                            np.finfo(np.float64).eps)
    # compute average precision
    average_precision = utils.compute_ap(recall, precision)
    all_aver_precs[CLASSES[idx]] = average_precision

for idx in range(NUM_CLASSES):
    cls_name = CLASSES[idx]
    print("=> Class %10s - AP: %.4f" % (cls_name, all_aver_precs[cls_name]))

print("=> mAP: %.4f" % (sum(all_aver_precs.values()) / NUM_CLASSES))
Exemplo n.º 2
0
def evaluate(sess, y_pred, y_true):
    NUM_CLASSES = Config.NUM_CLASSES
    CLASSES     = Config.CLASSES
    all_detections   = []
    all_annotations  = []
    all_aver_precs   = {CLASSES[i]:0. for i in range(NUM_CLASSES)}
    for _ in range(868):
        y_pred_o, y_true_o = sess.run([y_pred, y_true],feed_dict={is_training:False})
        pred_boxes = y_pred_o[0]
        pred_confs = y_pred_o[1]
        pred_probs = y_pred_o[2]
            
        true_labels_list, true_boxes_list = [], []
        true_probs_temp = y_true_o[..., 5: ]
        true_boxes_temp = y_true_o[..., 0:4]
        object_mask     = true_probs_temp.sum(axis=-1) > 0
        true_probs_temp = true_probs_temp[object_mask]
        true_boxes_temp = true_boxes_temp[object_mask]

        true_labels_list += np.argmax(true_probs_temp, axis=-1).tolist()
        true_boxes_list  += true_boxes_temp.tolist()

        pred_boxes, pred_scores, pred_labels = utils.cpu_nms(pred_boxes, pred_confs*pred_probs, NUM_CLASSES,
                                                        score_thresh=0.3, iou_thresh=0.5)
        true_boxes = np.array(true_boxes_list)
        box_centers, box_sizes = true_boxes[:,0:2], true_boxes[:,2:4]

        true_boxes[:,0:2] = box_centers - box_sizes / 2.
        true_boxes[:,2:4] = true_boxes[:,0:2] + box_sizes
        pred_labels_list = [] if pred_labels is None else pred_labels.tolist()

        all_detections.append( [pred_boxes, pred_scores, pred_labels_list])
        all_annotations.append([true_boxes, true_labels_list])

    for idx in range(NUM_CLASSES):
        true_positives  = []
        scores = []
        num_annotations = 0

        for i in range(len(all_annotations)):
            pred_boxes, pred_scores, pred_labels_list = all_detections[i]
            true_boxes, true_labels_list              = all_annotations[i]
            detected                                  = []
            num_annotations                          += true_labels_list.count(idx)

            for k in range(len(pred_labels_list)):
                if pred_labels_list[k] != idx: continue

                scores.append(pred_scores[k])
                ious = utils.bbox_iou(pred_boxes[k:k+1], true_boxes)
                m    = np.argmax(ious)
                if ious[m] > 0.5 and pred_labels_list[k] == true_labels_list[m] and m not in detected:
                    detected.append(m)
                    true_positives.append(1)
                else:
                    true_positives.append(0)

        num_predictions = len(true_positives)
        true_positives  = np.array(true_positives)
        false_positives = np.ones_like(true_positives) - true_positives
        # sorted by score
        indices = np.argsort(-np.array(scores))
        false_positives = false_positives[indices]
        true_positives = true_positives[indices]
        # compute false positives and true positives
        false_positives = np.cumsum(false_positives)
        true_positives = np.cumsum(true_positives)
        # compute recall and precision
        recall    = true_positives / np.maximum(num_annotations, np.finfo(np.float64).eps)
        precision = true_positives / np.maximum(num_predictions, np.finfo(np.float64).eps)
        # compute average precision
        average_precision = utils.compute_ap(recall, precision)
        all_aver_precs[CLASSES[idx]] = average_precision
    MAP = sum(all_aver_precs.values()) / NUM_CLASSES
    return MAP