Exemplo n.º 1
0
def compute_gt_annotations(anchors,
                           annotations,
                           negative_overlap=0.4,
                           positive_overlap=0.5):
    """ Obtain indices of gt annotations with the greatest overlap.

    Args
        anchors: np.array of annotations of shape (N, 4) for (x1, y1, x2, y2).
        annotations: np.array of shape (N, 5) for (x1, y1, x2, y2, label).
        negative_overlap: IoU overlap for negative anchors (all anchors with overlap < negative_overlap are negative).
        positive_overlap: IoU overlap or positive anchors (all anchors with overlap > positive_overlap are positive).

    Returns
        positive_indices: indices of positive anchors
        ignore_indices: indices of ignored anchors
        argmax_overlaps_inds: ordered overlaps indices
    """

    overlaps = compute_overlap(anchors.astype(np.float64),
                               annotations.astype(np.float64))
    argmax_overlaps_inds = np.argmax(overlaps, axis=1)
    max_overlaps = overlaps[np.arange(overlaps.shape[0]), argmax_overlaps_inds]

    # assign "dont care" labels
    positive_indices = max_overlaps >= positive_overlap
    ignore_indices = (max_overlaps > negative_overlap) & ~positive_indices

    return positive_indices, ignore_indices, argmax_overlaps_inds
Exemplo n.º 2
0
def evaluate_batch(pred, target, score, iou_threshold):

    pred_result = {}
    pred_annotation = {}
    pred_size = pred.shape[0]
    detected_annotations = []
    for i in range(pred_size):
        pred_label = int(pred[i, 0])
        if pred_label not in pred_result:
            pred_result[pred_label] = []

        # record [score, true or false]
        s_ = float(score[i, 0])

        box = np.expand_dims(pred[i, 1:5].astype(np.float64), axis=0)
        overlap = compute_overlap(box, target[:, 1:5].astype(np.float64))
        assigned_target = np.argmax(overlap, axis=1)
        max_overlap = overlap[0, assigned_target]

        if max_overlap >= iou_threshold and assigned_target not in detected_annotations:
            record = (s_, 1)
            detected_annotations.append(assigned_target)
        else:
            record = (s_, 0)
        pred_result[pred_label].append(record)

    for j in range(target.shape[0]):
        target_label = int(target[j, 0])
        if target_label not in pred_annotation:
            pred_annotation[target_label] = 0
        pred_annotation[target_label] += 1
    return pred_result, pred_annotation
Exemplo n.º 3
0
def compute_gt_annotations(anchors,
                           annotations,
                           negative_overlap=0.4,
                           positive_overlap=0.5):
    # (N, K)
    overlaps = compute_overlap(anchors.astype(np.float64),
                               annotations.astype(np.float64))
    # (N, )
    argmax_overlaps_indices = np.argmax(overlaps, axis=1)
    # (N, )
    max_overlaps = overlaps[np.arange(overlaps.shape[0]),
                            argmax_overlaps_indices]

    # assign "dont care" labels
    # (N, )
    positive_indices = max_overlaps >= positive_overlap

    # in case of there are gt boxes has no matched positive anchors
    nonzero_indices = np.nonzero(overlaps == np.max(overlaps, axis=0))
    positive_indices[nonzero_indices[0]] = 1

    # (N, )
    ignore_indices = (max_overlaps > negative_overlap) & ~positive_indices

    return positive_indices, ignore_indices, argmax_overlaps_indices
Exemplo n.º 4
0
def get_gt_indices(anchors,
                   annotations,
                   positive_overlap=0.5,
                   negative_overlap=0.4):

    overlaps = compute_overlap(anchors.astype(np.float64),
                               annotations.astype(np.float64))
    # 获得每个anchor对应的最大gt的idx
    best_overlap_each_anchor_indices = np.argmax(overlaps, axis=1)
    best_overlap_each_anchor = overlaps[np.arange(overlaps.shape[0]),
                                        best_overlap_each_anchor_indices]

    # print(np.max(best_overlap_each_anchor))
    positive_indices = best_overlap_each_anchor > positive_overlap
    ignore_indices = (best_overlap_each_anchor >
                      negative_overlap) & ~positive_indices

    return positive_indices, ignore_indices, best_overlap_each_anchor_indices
Exemplo n.º 5
0
    def handle_multi_labels(self, boxes, labels, scores):
        overlap = compute_overlap(boxes, boxes)
        boxes_number = boxes.shape[0]

        fathers = []
        for i in range(0, boxes_number):
            for j in range(i, boxes_number):
                father, _ = self.data_loader.judge_similar(labels[i, 0], labels[j, 0])
                if father != -1 and overlap[i, j] > 0.5:
                    fathers.append(father)

        fathers = set(fathers)
        sons = list(set(range(boxes_number))-fathers)
        sons.sort()
        boxes = boxes[sons]
        scores = scores[sons]
        labels = labels[sons]

        return boxes, labels, scores
Exemplo n.º 6
0
def compute_gt_annotations(
        anchors,
        annotations,
        negative_overlap=0.4,
        positive_overlap=0.5
):
    """
    Obtain indices of gt annotations with the greatest overlap.

    Args
        anchors: np.array of annotations of shape (N, 4) for (x1, y1, x2, y2).
        annotations: np.array of shape (K, 5) for (x1, y1, x2, y2, label).
        negative_overlap: IoU overlap for negative anchors (all anchors with overlap < negative_overlap are negative).
        positive_overlap: IoU overlap or positive anchors (all anchors with overlap > positive_overlap are positive).

    Returns
        positive_indices: indices of positive anchors, (N, )
        ignore_indices: indices of ignored anchors, (N, )
        argmax_overlaps_inds: ordered overlaps indices, (N, )
    """
    # (N, K)
    overlaps = compute_overlap(anchors.astype(np.float64), annotations.astype(np.float64))
    # (N, )
    argmax_overlaps_inds = np.argmax(overlaps, axis=1)
    # (N, )
    max_overlaps = overlaps[np.arange(overlaps.shape[0]), argmax_overlaps_inds]

    # assign "dont care" labels
    # (N, )
    positive_indices = max_overlaps >= positive_overlap
    
    #get the max overlapping anchor indices for each gt box and set them to true so that each gt box has at least one positive anchor box
    max_overlapping_anchor_box_indices = np.argmax(overlaps, axis = 0)
    positive_indices[max_overlapping_anchor_box_indices] = True

    # (N, )
    ignore_indices = (max_overlaps > negative_overlap) & ~positive_indices

    return positive_indices, ignore_indices, argmax_overlaps_inds
Exemplo n.º 7
0
def compute_gt_annotations(anchors,
                           annotations,
                           negative_overlap=0.4,
                           positive_overlap=0.5):
    """
    Obtain indices of gt annotations with the greatest overlap.

    Args
        anchors: np.array of annotations of shape (N, 4) for (x1, y1, x2, y2).
        annotations: np.array of shape (K, 5) for (x1, y1, x2, y2, label).
        negative_overlap: IoU overlap for negative anchors (all anchors with overlap < negative_overlap are negative).
        positive_overlap: IoU overlap or positive anchors (all anchors with overlap > positive_overlap are positive).

    Returns
        positive_indices: indices of positive anchors, (N, )
        ignore_indices: indices of ignored anchors, (N, )
        argmax_overlaps_inds: ordered overlaps indices, (N, )
    """
    # (N, K)
    overlaps = compute_overlap(anchors.astype(np.float64),
                               annotations.astype(np.float64))
    # (N, )
    argmax_overlaps_inds = np.argmax(overlaps, axis=1)
    # (N, )
    max_overlaps = overlaps[np.arange(overlaps.shape[0]), argmax_overlaps_inds]

    # assign "dont care" labels
    # (N, )
    positive_indices = max_overlaps >= positive_overlap

    # adam: in case of there are gt boxes has no matched positive anchors
    # nonzero_inds = np.nonzero(overlaps == np.max(overlaps, axis=0))
    # positive_indices[nonzero_inds[0]] = 1

    # (N, )
    ignore_indices = (max_overlaps > negative_overlap) & ~positive_indices

    return positive_indices, ignore_indices, argmax_overlaps_inds
Exemplo n.º 8
0
def evaluate(generator,
             model,
             iou_threshold=0.5,
             score_threshold=0.01,
             max_detections=100,
             visualize=False,
             epoch=0):
    """
    Evaluate a given dataset using a given model.

    Args:
        generator: The generator that represents the dataset to evaluate.
        model: The model to evaluate.
        iou_threshold: The threshold used to consider when a detection is positive or negative.
        score_threshold: The score confidence threshold to use for detections.
        max_detections: The maximum number of detections to use per image.
        visualize: Show the visualized detections or not.

    Returns:
        A dict mapping class names to mAP scores.

    """
    # gather all detections and annotations
    all_detections = _get_detections(generator,
                                     model,
                                     score_threshold=score_threshold,
                                     max_detections=max_detections,
                                     visualize=visualize)
    all_annotations = _get_annotations(generator)
    average_precisions = {}
    num_tp = 0
    num_fp = 0

    # process detections and annotations
    for label in range(generator.num_classes()):
        if not generator.has_label(label):
            continue

        false_positives = np.zeros((0, ))
        true_positives = np.zeros((0, ))
        scores = np.zeros((0, ))
        num_annotations = 0.0

        for i in range(generator.size()):
            detections = all_detections[i][label]
            annotations = all_annotations[i][label]
            num_annotations += annotations.shape[0]
            detected_annotations = []

            for d in detections:
                scores = np.append(scores, d[4])

                if annotations.shape[0] == 0:
                    false_positives = np.append(false_positives, 1)
                    true_positives = np.append(true_positives, 0)
                    continue
                overlaps = compute_overlap(np.expand_dims(d, axis=0),
                                           annotations)
                assigned_annotation = np.argmax(overlaps, axis=1)
                max_overlap = overlaps[0, assigned_annotation]

                if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
                    false_positives = np.append(false_positives, 0)
                    true_positives = np.append(true_positives, 1)
                    detected_annotations.append(assigned_annotation)
                else:
                    false_positives = np.append(false_positives, 1)
                    true_positives = np.append(true_positives, 0)

        # no annotations -> AP for this class is 0 (is this correct?)
        if num_annotations == 0:
            average_precisions[label] = 0, 0
            continue

        # sort by score
        indices = np.argsort(-scores)
        false_positives = false_positives[indices]
        true_positives = true_positives[indices]

        # compute false positives and true positives
        false_positives = np.cumsum(false_positives)
        true_positives = np.cumsum(true_positives)

        if false_positives.shape[0] == 0:
            num_fp += 0
        else:
            num_fp += false_positives[-1]
        if true_positives.shape[0] == 0:
            num_tp += 0
        else:
            num_tp += true_positives[-1]

        # compute recall and precision
        recall = true_positives / num_annotations
        precision = true_positives / np.maximum(
            true_positives + false_positives,
            np.finfo(np.float64).eps)
        print("Precision: " + str(precision))
        print("Recall: " + str(recall))

        # compute average precision
        average_precision = _compute_ap(recall, precision)
        average_precisions[label] = average_precision, num_annotations

    print('num_fp={}, num_tp={}, num_annotations={}'.format(
        num_fp, num_tp, num_annotations))

    return average_precisions
Exemplo n.º 9
0
def evaluate(generator,
             model,
             iou_threshold=0.25,
             score_threshold=0.05,
             max_detections=500,
             save_path=None):
    """ Evaluate a given dataset using a given model.
    # Arguments
        generator       : The generator that represents the dataset to evaluate.
        model           : The model to evaluate.
        iou_threshold   : The threshold used to consider when a detection is positive or negative.
        score_threshold : The score confidence threshold to use for detections.
        max_detections  : The maximum number of detections to use per image.
        save_path       : The path to save images with visualized detections to.
    # Returns
        A dict mapping class names to mAP scores.
    """
    # gather all detections and annotations
    ## all detections include all those that don't reach the treshold
    all_detections, image_names, detection_list, scores_list, labels_list = _get_detections(
        generator,
        model,
        score_threshold=score_threshold,
        max_detections=max_detections,
        save_path=save_path)
    all_annotations = _get_annotations(generator)
    #print(image_names)
    #print(detection_list)
    ## average_precisions is initialized as dictionary
    average_precisions = {}
    true_positives_dict = {}
    false_positives_dict = {}
    iou_dict = {}

    # all_detections = pickle.load(open('all_detections.pkl', 'rb'))
    # all_annotations = pickle.load(open('all_annotations.pkl', 'rb'))
    # pickle.dump(all_detections, open('all_detections.pkl', 'wb'))
    # pickle.dump(all_annotations, open('all_annotations.pkl', 'wb'))

    ## create different lists that I need
    iou = []
    #all_completed_detections = []
    #image_index = []
    #object_type_df = []

    # process detections and annotations
    ## all this part is done for each class
    ## but only for classes that are actually present
    ## so it doesn't cover classes that detections were made on but which were not in the picture
    ## however, generator.num_classes()=7
    for label in range(generator.num_classes()):
        if not generator.has_label(label):
            continue

        false_positives = np.zeros((0, ))
        true_positives = np.zeros((0, ))
        scores = np.zeros((0, ))
        num_annotations = 0.0
        num_detections = 0.0
        iou_per_class = []

        for i in range(generator.size()):
            detections = all_detections[i][label]
            annotations = all_annotations[i][label]
            num_annotations += annotations.shape[0]
            detected_annotations = []

            for d in detections:

                scores = np.append(scores, d[4])

                if annotations.shape[0] == 0:
                    false_positives = np.append(false_positives, 1)
                    true_positives = np.append(true_positives, 0)
                    continue

                overlaps = compute_overlap(np.expand_dims(d, axis=0),
                                           annotations)
                assigned_annotation = np.argmax(overlaps, axis=1)
                max_overlap = overlaps[0, assigned_annotation]

                ## here we check if box if it is true or false positive
                if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
                    ## I assume that IoU is only calculated for TP boxes
                    false_positives = np.append(false_positives, 0)
                    true_positives = np.append(true_positives, 1)
                    #print("bbox overlap: ",max_overlap)
                    iou.append(np.asscalar(max_overlap))
                    iou_per_class.append(np.asscalar(max_overlap))
                    #print("iou list: ",iou)
                    detected_annotations.append(assigned_annotation)
                    #all_completed_detections.append(d)
                    #image_index.append(i)
                    #print(image_index)
                    num_detections += 1
                    #print(detections)
                else:
                    false_positives = np.append(false_positives, 1)
                    true_positives = np.append(true_positives, 0)
                    ## the way (I think) they do it in EAD: if the overlap doesn't reach treshold, it's 0
                    iou.append(0)
                    iou_per_class.append(0)
            #print("Scores: ",scores)

        # no annotations -> AP for this class is 0 (is this correct?)
        ## hid continue to see # of FP
        if num_annotations == 0:
            average_precisions[label] = 0, 0
            #continue

        # sort by score
        indices = np.argsort(-scores)
        false_positives = false_positives[indices]
        true_positives = true_positives[indices]

        # compute false positives and true positives
        ## cumsum returns the cumulative sum of the elements along a given axis
        false_positives = np.cumsum(false_positives)
        true_positives = np.cumsum(true_positives)

        # compute recall and precision
        ## we divide an array by a scalar
        recall = true_positives / num_annotations
        precision = true_positives / np.maximum(
            true_positives + false_positives,
            np.finfo(np.float64).eps)

        # compute average precision
        average_precision = _compute_ap(recall, precision)

        ## average_precision will be in the following dictionary format: {0: (0.31672005587085506, 1112.0), 1: (0.08074755526818446, 107.0), 2: (0.19361940213603135, 291.0), 3: (0.12725467367537643, 57.0), 4: (0.20030495872509274, 121.0), 5: (0.06083609353943108, 481.0), 6: (0.41498412085028863, 89.0)}
        average_precisions[label] = average_precision, num_annotations
        ## added dictionaries for TP and FP
        ## I use max, because true_positives is an array with accumulating TP's
        if true_positives.size != 0:
            true_positives_dict[label] = max(true_positives), num_annotations
        else:
            true_positives_dict[label] = 0, num_annotations
        if false_positives.size != 0:
            false_positives_dict[label] = max(false_positives), num_annotations
        else:
            false_positives_dict[label] = 0, num_annotations
        iou_dict[label] = np.mean(iou), num_annotations
        #print("Label: ",generator.label_to_name(label))
        #print("FP: ",false_positives_dict)
        #print("TP: ",true_positives_dict)
        #print("AP: ", average_precision)
        #print("precision: ",precision)
        #print("recall: ",recall)

    return false_positives_dict, true_positives_dict, iou_dict, average_precisions, iou, image_names, detection_list, scores_list, labels_list
Exemplo n.º 10
0
def evaluate(generator,
             model,
             iou_threshold=0.5,
             score_threshold=0.01,
             max_detections=100,
             visualize=False,
             flip_test=False,
             keep_resolution=False):

    all_detections = _get_detections(generator,
                                     model,
                                     score_threshold=score_threshold,
                                     max_detections=max_detections,
                                     visualize=visualize,
                                     flip_test=flip_test,
                                     keep_resolution=keep_resolution)
    all_annotations = _get_annotations(generator)
    average_precisions = {}

    # process detections and annotations
    for label in range(generator.num_classes()):

        if not generator.has_label(label):
            continue

        false_positives = np.zeros((0, ))
        true_positives = np.zeros((0, ))
        scores = np.zeros((0, ))
        num_annotations = 0.0

        for i in range(generator.size()):
            detections = all_detections[i][label]
            annotations = all_annotations[i][label]
            num_annotations += annotations.shape[0]
            detected_annotations = []

            for d in detections:
                scores = np.append(scores, d[4])

                if annotations.shape[0] == 0:
                    false_positives = np.append(false_positives, 1)
                    true_positives = np.append(true_positives, 0)
                    continue

                overlaps = compute_overlap(np.expand_dims(d, axis=0),
                                           annotations)
                assigned_annotation = np.argmax(overlaps, axis=1)
                max_overlap = overlaps[0, assigned_annotation]

                if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
                    false_positives = np.append(false_positives, 0)
                    true_positives = np.append(true_positives, 1)
                    detected_annotations.append(assigned_annotation)
                else:
                    false_positives = np.append(false_positives, 1)
                    true_positives = np.append(true_positives, 0)

        if num_annotations == 0:
            average_precisions[label] = 0, 0
            continue

        # sort by score
        indices = np.argsort(-scores)
        false_positives = false_positives[indices]
        true_positives = true_positives[indices]

        # compute false positives and true positives
        false_positives = np.cumsum(false_positives)
        true_positives = np.cumsum(true_positives)

        # compute recall and precision
        recall = true_positives / num_annotations
        precision = true_positives / np.maximum(
            true_positives + false_positives,
            np.finfo(np.float64).eps)

        # compute average precision
        average_precision = _compute_ap(recall, precision)
        average_precisions[label] = average_precision, num_annotations

    return average_precisions
Exemplo n.º 11
0
def evalution(generator,
              model,
              iou_threshold=0.5,
              score_threshold=0.05,
              max_detections=100,
              save_path=None):
    """ Evaluate a given dataset using a given model.

    # Arguments
        generator       : The generator that represents the dataset to evaluate.
        model           : The model to evaluate.
        iou_threshold   : The threshold used to consider when a detection is positive or negative.
        score_threshold : The score confidence threshold to use for detections.
        max_detections  : The maximum number of detections to use per image.
        save_path       : The path to save images with visualized detections to.
    # Returns
        A dict mapping class names to mAP scores.
    """
    all_detections, all_inferences_time = _get_detections(
        generator,
        model,
        score_threshold=score_threshold,
        max_detections=max_detections,
        save_path=save_path)
    all_annotations = _get_annotations(generator)
    average_precisions = {}

    for label in range(generator.num_classes()):
        if not generator.has(label):
            continue
        false_positives = np.zeros((0, ))
        true_positives = np.zeros((0, ))
        scores = np.zeros((0, ))
        num_annotations = 0.0
        num_detections = 0.0

        for i in range(generator.size()):
            detections = all_detections[i][label]
            annotations = all_annotations[i][label]
            # number of boxes * 4
            num_annotations += annotations.shape[0]
            num_detections += detections.shape[0]
            detected_annotations = []

            for d in detections:
                scores = np.append(scores, d[4])

                if annotations.shape[0] == 0:
                    np.append(false_positives, 1)
                    np.append(true_positives, 0)
                    continue

                overlaps = compute_overlap(np.expand_dims(d[:-1], axis=0),
                                           annotations)
                assigned_labels = np.argmax(overlaps, axis=1)[0]
                max_overlap = overlaps[0, assigned_labels]

                # 如果多个检测框对应某一个GT的IoU都大于阈值? detections本来就是排过序的
                if max_overlap > iou_threshold and assigned_labels not in detected_annotations:
                    false_positives = np.append(false_positives, 0)
                    true_positives = np.append(true_positives, 1)
                    detected_annotations.append(assigned_labels)
                else:
                    false_positives = np.append(false_positives, 1)
                    true_positives = np.append(true_positives, 0)

        if num_annotations == 0:
            if num_detections == 0:
                average_precisions[label] = 1, 0
                continue
            else:
                average_precisions[label] = 0, 0
                continue

        indices = np.argsort(-scores)
        false_positives = false_positives[indices]
        true_positives = true_positives[indices]

        # np.cumsum([0,1,0,1,1])
        # array([0, 1, 1, 2, 3])
        false_positives = np.cumsum(false_positives)
        true_positives = np.cumsum(true_positives)

        # recall = TP / number of gt
        # prescision = TP /
        recall = true_positives / num_annotations
        prescision = true_positives / np.maximum(
            true_positives + false_positives,
            np.finfo(np.float64).eps)

        average_precision = _compute_ap(recall, prescision)
        average_precisions[label] = average_precision, num_annotations

    inference_time_mean = np.sum(all_inferences_time) / generator.size()
    return average_precisions, inference_time_mean
Exemplo n.º 12
0
def evaluate(generator,
             model,
             iou_threshold=0.5,
             score_threshold=0.05,
             max_detections=100,
             save_path=None,
             epoch=0):
    """
    Evaluate a given dataset using a given model.

    Args:
        generator: The generator that represents the dataset to evaluate.
        model: The model to evaluate.
        iou_threshold: The threshold used to consider when a detection is positive or negative.
        score_threshold: The score confidence threshold to use for detections.
        max_detections: The maximum number of detections to use per image.
        save_path: The path to save images with visualized detections to.
        epoch: epoch index

    Returns:
        A dict mapping class names to mAP scores.

    """
    # gather all detections and annotations
    all_detections = _get_detections(generator,
                                     model,
                                     score_threshold=score_threshold,
                                     max_detections=max_detections,
                                     save_path=save_path)
    all_annotations = _get_annotations(generator)
    average_precisions = {}

    # all_detections = pickle.load(open('fcos/all_detections_11.pkl', 'rb'))
    # all_annotations = pickle.load(open('fcos/all_annotations.pkl', 'rb'))
    # pickle.dump(all_detections, open('fcos/all_detections_{}.pkl'.format(epoch + 1), 'wb'))
    # pickle.dump(all_annotations, open('fcos/all_annotations_{}.pkl'.format(epoch + 1), 'wb'))

    # process detections and annotations
    for label in range(generator.num_classes()):
        if not generator.has_label(label):
            continue

        false_positives = np.zeros((0, ))
        true_positives = np.zeros((0, ))
        scores = np.zeros((0, ))
        num_annotations = 0.0

        for i in range(generator.size()):
            detections = all_detections[i][label]
            annotations = all_annotations[i][label]
            num_annotations += annotations.shape[0]
            detected_annotations = []

            for d in detections:
                scores = np.append(scores, d[4])

                if annotations.shape[0] == 0:
                    false_positives = np.append(false_positives, 1)
                    true_positives = np.append(true_positives, 0)
                    continue
                overlaps = compute_overlap(np.expand_dims(d, axis=0),
                                           annotations)
                assigned_annotation = np.argmax(overlaps, axis=1)
                max_overlap = overlaps[0, assigned_annotation]

                if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
                    false_positives = np.append(false_positives, 0)
                    true_positives = np.append(true_positives, 1)
                    detected_annotations.append(assigned_annotation)
                else:
                    false_positives = np.append(false_positives, 1)
                    true_positives = np.append(true_positives, 0)

        # no annotations -> AP for this class is 0 (is this correct?)
        if num_annotations == 0:
            average_precisions[label] = 0, 0
            continue

        # sort by score
        indices = np.argsort(-scores)
        false_positives = false_positives[indices]
        true_positives = true_positives[indices]

        # compute false positives and true positives
        false_positives = np.cumsum(false_positives)
        true_positives = np.cumsum(true_positives)

        # compute recall and precision
        recall = true_positives / num_annotations
        precision = true_positives / np.maximum(
            true_positives + false_positives,
            np.finfo(np.float64).eps)

        # compute average precision
        average_precision = _compute_ap(recall, precision)
        average_precisions[label] = average_precision, num_annotations

    return average_precisions
Exemplo n.º 13
0
with open('car.txt', 'r') as f:
    img_lines = f.readlines()
save_path = '/Users/yanyan/data/plot_img_0'
if not os.path.exists(save_path):
    os.makedirs(save_path)

bbox = []
for img_line in img_lines:
    img_info = img_line.strip('\n').split('\t')
    img_path = img_info[0]

    img_boxes = img_info[1:]
    img = cv2.imread(root + img_path)
    img, scale = preprocess_image(img, image_size)
    for img_box in img_boxes:
        box = list(map(int, img_box.split(',')[:-2]))
        bbox.append(box)

bbox_array = np.array(bbox) * scale
# (644, 49104)
overlaps = compute_overlap(bbox_array.astype(np.float64),
                           anchors.astype(np.float64))
# overlaps = compute_overlap(bbox_array.astype(anchors.astype(np.float64), np.float64))

print(overlaps.shape)
argmax_overlaps_inds = np.argmax(overlaps, axis=1)
max_overlaps = overlaps[np.arange(overlaps.shape[0]), argmax_overlaps_inds]
ingore = max_overlaps < 0.5

print(sum(ingore))
Exemplo n.º 14
0
def evaluate(
    generator,
    model,
    iou_threshold=0.5,
    score_threshold=0.05,
    max_detections=100,
    save_path=None,
    diameter_threshold=0.1,
):
    """ Evaluate a given dataset using a given model.

    # Arguments
        generator: The generator that represents the dataset to evaluate.
        model: The model to evaluate.
        iou_threshold: The threshold used to consider when a detection is positive or negative.
        score_threshold: The score confidence threshold to use for detections.
        max_detections: The maximum number of detections to use per image.
        save_path: The path to save images with visualized detections to.
        diameter_threshold: Threshold relative to the object's diameter when a prdicted 6D pose in considered to be correct
    # Returns
        Several dictionaries mapping class names to the computed metrics.
    """
    # gather all detections and annotations
    all_detections = _get_detections(generator,
                                     model,
                                     score_threshold=score_threshold,
                                     max_detections=max_detections,
                                     save_path=save_path)
    all_annotations = _get_annotations(generator)
    all_3d_models = generator.get_models_3d_points_dict()
    all_3d_model_diameters = generator.get_objects_diameter_dict()
    average_precisions = {}
    add_metric = {}
    add_s_metric = {}
    metric_5cm_5degree = {}
    translation_diff_metric = {}
    rotation_diff_metric = {}
    metric_2d_projection = {}
    mixed_add_and_add_s_metric = {}
    average_point_distance_error_metric = {}
    average_sym_point_distance_error_metric = {}
    mixed_average_point_distance_error_metric = {}

    # process detections and annotations
    for label in range(generator.num_classes()):
        if not generator.has_label(label):
            continue

        false_positives = np.zeros((0, ))
        true_positives = np.zeros((0, ))
        scores = np.zeros((0, ))
        num_annotations = 0.0
        true_positives_add = np.zeros((0, ))
        true_positives_add_s = np.zeros((0, ))
        model_3d_points = all_3d_models[label]
        model_3d_diameter = all_3d_model_diameters[label]
        true_positives_5cm_5degree = np.zeros((0, ))
        translation_diffs = np.zeros((0, ))
        rotation_diffs = np.zeros((0, ))
        true_positives_2d_projection = np.zeros((0, ))
        point_distance_errors = np.zeros((0, ))
        point_sym_distance_errors = np.zeros((0, ))

        for i in tqdm(range(generator.size())):
            detections = all_detections[i][label][0]
            detections_rotations = all_detections[i][label][1]
            detections_translations = all_detections[i][label][2]
            annotations = all_annotations[i][label][0]
            annotations_rotations = all_annotations[i][label][1]
            annotations_translations = all_annotations[i][label][2]
            num_annotations += annotations.shape[0]
            detected_annotations = []

            for d, d_rotation, d_translation in zip(detections,
                                                    detections_rotations,
                                                    detections_translations):
                scores = np.append(scores, d[4])

                if annotations.shape[0] == 0:
                    false_positives = np.append(false_positives, 1)
                    true_positives = np.append(true_positives, 0)
                    continue

                overlaps = compute_overlap(np.expand_dims(d, axis=0),
                                           annotations)
                assigned_annotation = np.argmax(overlaps, axis=1)
                max_overlap = overlaps[0, assigned_annotation]
                assigned_rotation = annotations_rotations[
                    assigned_annotation, :3]
                assigned_translation = annotations_translations[
                    assigned_annotation, :]

                if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
                    false_positives = np.append(false_positives, 0)
                    true_positives = np.append(true_positives, 1)
                    detected_annotations.append(assigned_annotation)
                    #correct 2d object detection => check if the 6d pose is also correct
                    is_correct_6d_pose_add, mean_distances_add, transformed_points_gt, transformed_points_pred = check_6d_pose_add(
                        model_3d_points,
                        model_3d_diameter,
                        rotation_gt=generator.axis_angle_to_rotation_mat(
                            assigned_rotation),
                        translation_gt=np.squeeze(assigned_translation),
                        rotation_pred=generator.axis_angle_to_rotation_mat(
                            d_rotation),
                        translation_pred=d_translation,
                        diameter_threshold=diameter_threshold)

                    is_correct_6d_pose_add_s, mean_distances_add_s = check_6d_pose_add_s(
                        model_3d_points,
                        model_3d_diameter,
                        rotation_gt=generator.axis_angle_to_rotation_mat(
                            assigned_rotation),
                        translation_gt=np.squeeze(assigned_translation),
                        rotation_pred=generator.axis_angle_to_rotation_mat(
                            d_rotation),
                        translation_pred=d_translation,
                        diameter_threshold=diameter_threshold)

                    is_correct_6d_pose_5cm_5degree, translation_distance, rotation_distance = check_6d_pose_5cm_5degree(
                        rotation_gt=generator.axis_angle_to_rotation_mat(
                            assigned_rotation),
                        translation_gt=np.squeeze(assigned_translation),
                        rotation_pred=generator.axis_angle_to_rotation_mat(
                            d_rotation),
                        translation_pred=d_translation)

                    is_correct_2d_projection = check_6d_pose_2d_reprojection(
                        model_3d_points,
                        rotation_gt=generator.axis_angle_to_rotation_mat(
                            assigned_rotation),
                        translation_gt=np.squeeze(assigned_translation),
                        rotation_pred=generator.axis_angle_to_rotation_mat(
                            d_rotation),
                        translation_pred=d_translation,
                        camera_matrix=generator.load_camera_matrix(i),
                        pixel_threshold=5.0)

                    # #draw transformed gt points in image to test the transformation
                    test_draw(generator.load_image(i),
                              generator.load_camera_matrix(i),
                              transformed_points_pred, i)

                    if is_correct_6d_pose_add:
                        true_positives_add = np.append(true_positives_add, 1)
                    if is_correct_6d_pose_add_s:
                        true_positives_add_s = np.append(
                            true_positives_add_s, 1)
                    if is_correct_6d_pose_5cm_5degree:
                        true_positives_5cm_5degree = np.append(
                            true_positives_5cm_5degree, 1)
                    if is_correct_2d_projection:
                        true_positives_2d_projection = np.append(
                            true_positives_2d_projection, 1)

                    translation_diffs = np.append(translation_diffs,
                                                  translation_distance)
                    rotation_diffs = np.append(rotation_diffs,
                                               rotation_distance)
                    point_distance_errors = np.append(point_distance_errors,
                                                      mean_distances_add)
                    point_sym_distance_errors = np.append(
                        point_sym_distance_errors, mean_distances_add_s)
                else:
                    false_positives = np.append(false_positives, 1)
                    true_positives = np.append(true_positives, 0)

        # no annotations -> AP for this class is 0 (is this correct?)
        if num_annotations == 0:
            average_precisions[label] = 0, 0
            continue

        # sort by score
        indices = np.argsort(-scores)
        false_positives = false_positives[indices]
        true_positives = true_positives[indices]

        # compute false positives and true positives
        false_positives = np.cumsum(false_positives)
        true_positives = np.cumsum(true_positives)

        # compute recall and precision
        recall = true_positives / num_annotations
        precision = true_positives / np.maximum(
            true_positives + false_positives,
            np.finfo(np.float64).eps)

        # compute average precision
        average_precision = _compute_ap(recall, precision)
        average_precisions[label] = average_precision, num_annotations

        #compute add accuracy
        add_accuracy = np.sum(true_positives_add) / num_annotations
        add_metric[label] = add_accuracy, num_annotations

        #compute add-s accuracy
        add_s_accuracy = np.sum(true_positives_add_s) / num_annotations
        add_s_metric[label] = add_s_accuracy, num_annotations

        #compute 5cm 5degree accuracy
        accuracy_5cm_5degree = np.sum(
            true_positives_5cm_5degree) / num_annotations
        metric_5cm_5degree[label] = accuracy_5cm_5degree, num_annotations

        #compute the mean and std of the translation- and rotation differences
        mean_translations = np.mean(translation_diffs)
        std_translations = np.std(translation_diffs)
        translation_diff_metric[label] = mean_translations, std_translations

        mean_rotations = np.mean(rotation_diffs)
        std_rotations = np.std(rotation_diffs)
        rotation_diff_metric[label] = mean_rotations, std_rotations

        #compute 2d projection accuracy
        accuracy_2d_projection = np.sum(
            true_positives_2d_projection) / num_annotations
        metric_2d_projection[label] = accuracy_2d_projection, num_annotations

        #compute the mean and std of the transformed point errors
        mean_point_distance_errors = np.mean(point_distance_errors)
        std_point_distance_errors = np.std(point_distance_errors)
        average_point_distance_error_metric[
            label] = mean_point_distance_errors, std_point_distance_errors

        #compute the mean and std of the symmetric transformed point errors
        mean_point_sym_distance_errors = np.mean(point_sym_distance_errors)
        std_point_sym_distance_errors = np.std(point_sym_distance_errors)
        average_sym_point_distance_error_metric[
            label] = mean_point_sym_distance_errors, std_point_sym_distance_errors

    #fill in the add values for asymmetric objects and add-s for symmetric objects
    for label, add_tuple in add_metric.items():
        add_s_tuple = add_s_metric[label]
        if generator.class_labels_to_object_ids[
                label] in generator.symmetric_objects:
            mixed_add_and_add_s_metric[label] = add_s_tuple
        else:
            mixed_add_and_add_s_metric[label] = add_tuple

    #fill in the average point distance values for asymmetric objects and the corresponding average sym point distances for symmetric objects
    for label, asym_tuple in average_point_distance_error_metric.items():
        sym_tuple = average_sym_point_distance_error_metric[label]
        if generator.class_labels_to_object_ids[
                label] in generator.symmetric_objects:
            mixed_average_point_distance_error_metric[label] = sym_tuple
        else:
            mixed_average_point_distance_error_metric[label] = asym_tuple

    return average_precisions, add_metric, add_s_metric, metric_5cm_5degree, translation_diff_metric, rotation_diff_metric, metric_2d_projection, mixed_add_and_add_s_metric, average_point_distance_error_metric, average_sym_point_distance_error_metric, mixed_average_point_distance_error_metric
Exemplo n.º 15
0
def evaluate_mAP(generator,
                 model,
                 method='iou',
                 start_threshold=0.5,
                 score_threshold=0.01,
                 max_detections=100,
                 visualize=False,
                 epoch=0):
    """
    Evaluate a given dataset using a given model for mAPstart_threshold:0.95.
    Args:
        generator: The generator that represents the dataset to evaluate.
        model: The model to evaluate.
        iou_threshold: The threshold used to consider when a detection is positive or negative.
        score_threshold: The score confidence threshold to use for detections.
        max_detections: The maximum number of detections to use per image.
        visualize: Show the visualized detections or not.
    Returns:
        A dict mapping class names to mAP scores.
    """
    if method == 'iou':
        from utils.compute_overlap import compute_overlap
    elif method == 'piou':
        from utils.compute_overlap_piou import compute_overlap

    # gather all detections and annotations
    all_detections = _get_detections(generator,
                                     model,
                                     score_threshold=score_threshold,
                                     max_detections=max_detections)
    all_annotations = _get_annotations(generator)
    average_precisions = {}
    num_tp = 0
    num_fp = 0

    # process detections and annotations
    for iou_threshold in np.arange(start_threshold, 1.0, step=0.05):
        for label in progressbar.progressbar(
                range(generator.num_classes()),
                prefix=f'Evaluating threshold {iou_threshold:.2f}: '):
            if not generator.has_label(label):
                continue

            false_positives = np.zeros((0, ))
            true_positives = np.zeros((0, ))
            scores = np.zeros((0, ))
            num_annotations = 0.0

            for i in range(generator.size()):
                detections = all_detections[i][label]
                annotations = all_annotations[i][label]
                num_annotations += annotations.shape[0]
                detected_annotations = []

                for d in detections:
                    scores = np.append(scores, d[4])

                    if annotations.shape[0] == 0:
                        false_positives = np.append(false_positives, 1)
                        true_positives = np.append(true_positives, 0)
                        continue
                    overlaps = compute_overlap(np.expand_dims(d, axis=0),
                                               annotations)
                    assigned_annotation = np.argmax(overlaps, axis=1)
                    max_overlap = overlaps[0, assigned_annotation]

                    if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
                        false_positives = np.append(false_positives, 0)
                        true_positives = np.append(true_positives, 1)
                        detected_annotations.append(assigned_annotation)
                    else:
                        false_positives = np.append(false_positives, 1)
                        true_positives = np.append(true_positives, 0)

            # no annotations -> AP for this class is 0 (is this correct?)
            if num_annotations == 0:
                average_precisions[label] = 0, 0
                continue

            # sort by score
            indices = np.argsort(-scores)
            false_positives = false_positives[indices]
            true_positives = true_positives[indices]

            # compute false positives and true positives
            false_positives = np.cumsum(false_positives)
            true_positives = np.cumsum(true_positives)

            if false_positives.shape[0] == 0:
                num_fp += 0
            else:
                num_fp += false_positives[-1]
            if true_positives.shape[0] == 0:
                num_tp += 0
            else:
                num_tp += true_positives[-1]

            # compute recall and precision
            recall = true_positives / num_annotations
            precision = true_positives / np.maximum(
                true_positives + false_positives,
                np.finfo(np.float64).eps)

            # store precision for give threshold
            average_precision = _compute_ap(recall, precision)
            if not (label in average_precisions):
                average_precisions[label] = {'map': [], 'ann': []}
            average_precisions[label]['map'].append([average_precision])
            average_precisions[label]['ann'].append([num_annotations])

        # average precision for the given iou threshold
        ap_iou = [
            average_precisions[label]['map'][-1]
            for label in average_precisions
        ]
        print(f'AP{iou_threshold:.2f} = {np.mean(ap_iou):.4f}')

    # compute average precision
    for label in average_precisions:
        average_precision = average_precisions[label]['map']
        num_annotations = average_precisions[label]['ann']
        average_precisions[label] = (np.mean(average_precision),
                                     np.mean(num_annotations))

    print('num_fp={}, num_tp={}'.format(num_fp, num_tp))

    return average_precisions