Exemplo n.º 1
0
def compute_average_precision_per_class(args, num_true_cases, gt_boxes,
                                        difficult_cases, prediction_file,
                                        iou_threshold, use_2007_metric):
    with open(prediction_file) as f:
        image_ids = []
        boxes = []
        scores = []
        for line in f:
            t = line.rstrip().split(" ")
            image_ids.append(t[0])
            scores.append(float(t[1]))
            box = torch.tensor([float(v) for v in t[2:]]).unsqueeze(0)
            box -= 1.0  # convert to python format where indexes start from 0
            boxes.append(box)
        scores = np.array(scores)
        sorted_indexes = np.argsort(-scores)
        boxes = [boxes[i] for i in sorted_indexes]
        image_ids = [image_ids[i] for i in sorted_indexes]
        true_positive = np.zeros(len(image_ids))
        false_positive = np.zeros(len(image_ids))
        matched = set()
        for i, image_id in enumerate(image_ids):
            box = boxes[i]

            if args['flow_control']['dataset_type'] == "voc":
                pass
            else:
                image_id = int(image_id)

            if image_id not in gt_boxes.keys():
                false_positive[i] = 1
                continue

            gt_box = gt_boxes[image_id]
            ious = box_utils.iou_of(box, gt_box)
            max_iou = torch.max(ious).item()
            max_arg = torch.argmax(ious).item()
            if max_iou > iou_threshold:
                if difficult_cases[image_id][max_arg] == 0:
                    if (image_id, max_arg) not in matched:
                        true_positive[i] = 1
                        matched.add((image_id, max_arg))
                    else:
                        false_positive[i] = 1
            else:
                false_positive[i] = 1

    true_positive = true_positive.cumsum()
    false_positive = false_positive.cumsum()
    precision = true_positive / (true_positive + false_positive + 1e-8)
    recall = true_positive / num_true_cases
    # import pdb;pdb.set_trace()
    _vtp = true_positive[-1] if len(true_positive) != 0 else 0
    _vfp = false_positive[-1] if len(false_positive) != 0 else 0
    if use_2007_metric:
        return measurements.compute_voc2007_average_precision(
            precision,
            recall), _vtp / (_vtp + _vfp + 1e-8), _vtp / num_true_cases
    else:
        return measurements.compute_average_precision(precision, recall)
Exemplo n.º 2
0
def compute_average_precision_per_class(num_true_cases, gt_boxes,
                                        difficult_cases, prediction_file,
                                        iou_threshold, use_2007_metric):
    """
    num_true_cases : 実際にそのクラスに属するtarget総数
    """
    with open(prediction_file) as f:
        image_ids = []
        boxes = []
        scores = []
        for line in f:  # そのクラス分のprediction box 全部読み込み
            t = line.rstrip().split(" ")  # t: [image_id, scores, boxes]
            image_ids.append(t[0])
            scores.append(float(t[1]))
            box = torch.tensor([float(v) for v in t[2:]]).unsqueeze(0)
            box -= 1.0  # convert to python format where indexes start from 0
            boxes.append(box)
        scores = np.array(scores)
        sorted_indexes = np.argsort(
            -scores
        )  # -付きなのでlarger is firstでindex // 全ボックス内でconfidenceの高い順にsort
        boxes = [boxes[i] for i in sorted_indexes]
        image_ids = [image_ids[i] for i in sorted_indexes]
        true_positive = np.zeros(len(image_ids))
        false_positive = np.zeros(len(image_ids))
        matched = set()
        for i, image_id in enumerate(
                image_ids
        ):  # image_ids : このクラスと判断されたimageのid / gt_boxes : 実際にこのクラスだったボックス
            box = boxes[i]
            if image_id not in gt_boxes:
                false_positive[i] = 1  # このクラスの物体box,と予測して実際は違った
                continue

            gt_box = gt_boxes[image_id]  # 多分複数ボックス(そのimage_id中のこのクラスの正解ボックス)
            ious = box_utils.iou_of(box, gt_box)
            max_iou = torch.max(ious).item()
            max_arg = torch.argmax(ious).item()
            if max_iou > iou_threshold:  # 少なくともどれかのgtboxに対してIoU>しきい値であったpredictionが存在
                if difficult_cases[image_id][max_arg] == 0:
                    if (
                            image_id, max_arg
                    ) not in matched:  # これまで,同じimage_id中で今見てるボックス(max_arg)が検出されていない場合(not in match)
                        true_positive[i] = 1  # max_argのボックス = true positive
                        matched.add((image_id, max_arg))
                    else:  # 同じgt_boxが既に検出され,true positiveとして数えられている: 同じ物体を重複してpredicしている場合など.
                        false_positive[i] = 1
            else:
                false_positive[i] = 1

    true_positive = true_positive.cumsum()
    false_positive = false_positive.cumsum()
    precision = true_positive / (true_positive + false_positive)
    recall = true_positive / num_true_cases
    if use_2007_metric:
        return measurements.compute_voc2007_average_precision(
            precision, recall)
    else:
        return measurements.compute_average_precision(precision, recall)
Exemplo n.º 3
0
def compute_average_precision_per_class(
    num_true_cases,
    gt_boxes,
    difficult_cases,
    prediction_file,
    iou_threshold,
    use_2007_metric,
):
    with open(str(prediction_file)) as f:
        image_ids = []
        boxes = []
        scores = []
        for line in f:
            t = line.rstrip().split(" ")
            image_ids.append(t[0])
            scores.append(float(t[1]))
            box = torch.tensor([float(v) for v in t[2:]]).unsqueeze(0)
            box -= 1.0  # convert to python format where indexes start from 0
            boxes.append(box)
        scores = np.array(scores)
        sorted_indexes = np.argsort(-scores)
        boxes = [boxes[i] for i in sorted_indexes]
        image_ids = [image_ids[i] for i in sorted_indexes]
        true_positive = np.zeros(len(image_ids))
        false_positive = np.zeros(len(image_ids))
        matched = set()
        for i, image_id in enumerate(image_ids):
            box = boxes[i]
            if image_id not in gt_boxes:
                false_positive[i] = 1
                continue

            gt_box = gt_boxes[image_id]
            ious = box_utils.iou_of(box, gt_box)
            max_iou = torch.max(ious).item()
            max_arg = torch.argmax(ious).item()
            if max_iou > iou_threshold:
                if difficult_cases[image_id][max_arg] == 0:
                    if (image_id, max_arg) not in matched:
                        true_positive[i] = 1
                        matched.add((image_id, max_arg))
                    else:
                        false_positive[i] = 1
            else:
                false_positive[i] = 1

    true_positive = true_positive.cumsum()
    false_positive = false_positive.cumsum()
    precision = true_positive / (true_positive + false_positive)
    recall = true_positive / num_true_cases
    if use_2007_metric:
        return measurements.compute_voc2007_average_precision(
            precision, recall)
    else:
        return measurements.compute_average_precision(precision, recall)
Exemplo n.º 4
0
def compute_average_precision_per_class(num_true_cases, gt_boxes,
                                        difficult_cases, prediction_file,
                                        iou_threshold, use_2007_metric):
    with open(prediction_file) as f:
        image_ids = []
        boxes = []
        scores = []
        for line in f:
            #ImageID,Source,LabelName,Confidence,XMin,XMax,YMin,YMax,IsOccluded,IsTruncated,IsGroupOf,IsDepiction,IsInside,ClassId,ClassName
            t = line.rstrip().split(",")
            image_ids.append(t[0])
            scores.append(float(t[3]))
            box = torch.tensor(
                [float(t[4]),
                 float(t[6]),
                 float(t[5]),
                 float(t[7])]).unsqueeze(0)
            box -= 1.0  # convert to python format where indexes start from 0
            boxes.append(box)
        scores = np.array(scores)
        sorted_indexes = np.argsort(-scores)
        boxes = [boxes[i] for i in sorted_indexes]
        image_ids = [image_ids[i] for i in sorted_indexes]
        true_positive = np.zeros(len(image_ids))
        false_positive = np.zeros(len(image_ids))
        matched = set()
        for i, image_id in enumerate(image_ids):
            box = boxes[i]
            if image_id not in gt_boxes:
                false_positive[i] = 1
                continue

            gt_box = gt_boxes[image_id]
            ious = box_utils.iou_of(box, gt_box)
            max_iou = torch.max(ious).item()
            max_arg = torch.argmax(ious).item()
            if max_iou > iou_threshold:
                if difficult_cases[image_id][max_arg] == 0:
                    if (image_id, max_arg) not in matched:
                        true_positive[i] = 1
                        matched.add((image_id, max_arg))
                    else:
                        false_positive[i] = 1
            else:
                false_positive[i] = 1

    true_positive = true_positive.cumsum()
    false_positive = false_positive.cumsum()
    precision = true_positive / (true_positive + false_positive)
    recall = true_positive / num_true_cases
    if use_2007_metric:
        return measurements.compute_voc2007_average_precision(
            precision, recall)
    else:
        return measurements.compute_average_precision(precision, recall)
Exemplo n.º 5
0
def compute_average_precision_per_class(num_true_cases, gt_boxes,
                                        difficult_cases, id_list, score_list,
                                        box_list, iou_threshold,
                                        use_2007_metric):

    image_ids = id_list
    boxes = []
    scores = score_list
    for bl in box_list:
        box = torch.tensor(bl).unsqueeze(0)
        box -= 1.0  # convert to python format where indexes start from 0
        boxes.append(box)
    scores = np.array(scores)
    sorted_indexes = np.argsort(-scores)
    boxes = [boxes[i] for i in sorted_indexes]
    image_ids = [image_ids[i] for i in sorted_indexes]
    true_positive = np.zeros(len(image_ids))
    false_positive = np.zeros(len(image_ids))
    matched = set()
    for i, image_id in enumerate(image_ids):
        box = boxes[i]
        if image_id not in gt_boxes:
            false_positive[i] = 1
            continue

        gt_box = gt_boxes[image_id]
        ious = box_utils.iou_of(box, gt_box)
        max_iou = torch.max(ious).item()
        max_arg = torch.argmax(ious).item()
        if max_iou > iou_threshold:
            if difficult_cases[image_id][max_arg] == 0:
                if (image_id, max_arg) not in matched:
                    true_positive[i] = 1
                    matched.add((image_id, max_arg))
                else:
                    false_positive[i] = 1
        else:
            false_positive[i] = 1

    true_positive = true_positive.cumsum()
    false_positive = false_positive.cumsum()
    precision = true_positive / (true_positive + false_positive)
    recall = true_positive / num_true_cases
    if use_2007_metric:
        return compute_voc2007_average_precision(precision, recall)
    else:
        return compute_average_precision(precision, recall)
Exemplo n.º 6
0
def coco_ap_per_class(num_true_cases, gt_boxes, difficult_cases,
                      prediction_file, use_2007_metric):
    aps, precs, recalls = [], [], []
    _measurement_func = measurements.compute_voc2007_average_precision if use_2007_metric else measurements.compute_average_precision
    with open(prediction_file) as f:
        image_ids = []
        boxes = []
        scores = []
        for line in f:
            t = line.rstrip().split(" ")
            image_ids.append(t[0])
            scores.append(float(t[1]))
            box = torch.tensor([float(v) for v in t[2:]]).unsqueeze(0)
            box -= 1.0  # convert to python format where indexes start from 0
            boxes.append(box)
        scores = np.array(scores)
        sorted_indexes = np.argsort(-scores)
        boxes = [boxes[i] for i in sorted_indexes]
        image_ids = [image_ids[i] for i in sorted_indexes]

    max_ious = []
    max_args = []
    for i, image_id in enumerate(image_ids):
        box = boxes[i]
        image_id = int(image_id)
        # if image_id not in gt_boxes.keys():
        #     false_positive[i] = 1
        #     continue
        if image_id in gt_boxes.keys():
            gt_box = gt_boxes[image_id]
            ious = box_utils.iou_of(box, gt_box)
            max_iou = torch.max(ious).item()
            max_arg = torch.argmax(ious).item()
        else:
            max_iou = 0.0
            max_arg = 0
        max_ious.append(max_iou)
        max_args.append(max_arg)
    for _iou_thresh in [0.5 + 0.05 * i for i in range(0, 10)]:
        true_positive = np.zeros(len(image_ids))
        false_positive = np.zeros(len(image_ids))
        matched = set()
        for i, (max_iou, max_arg,
                image_id) in enumerate(zip(max_ious, max_args, image_ids)):
            image_id = int(image_id)
            if max_iou > _iou_thresh:
                if difficult_cases[image_id][max_arg] == 0:
                    if (image_id, max_arg) not in matched:
                        true_positive[i] = 1
                        matched.add((image_id, max_arg))
                    else:
                        false_positive[i] = 1
            else:
                false_positive[i] = 1

        true_positive = true_positive.cumsum()
        false_positive = false_positive.cumsum()
        precision = true_positive / (true_positive + false_positive + 1e-8)
        recall = true_positive / num_true_cases

        _vtp = true_positive[-1] if len(true_positive) != 0 else 0
        _vfp = false_positive[-1] if len(false_positive) != 0 else 0
        ap = _measurement_func(precision, recall)
        _prec = _vtp / (_vtp + _vfp + 1e-8)
        _recall = _vtp / num_true_cases
        aps.append(ap)
        precs.append(_prec)
        recalls.append(_recall)
    return sum(aps) / len(aps), sum(precs) / len(precs), sum(recalls) / len(
        recalls)
def calc_mAP(weights,
             batch_size=16,
             img_size=640,
             iou_thres=0.5,
             conf_thres=0.001,
             nms_thres=0.5,
             save_json=False,
             model=None):
    label_path = "models/train-coco_person_face-0.0.3-RFB-160/coco-person-face-labels.txt"
    class_names = [name.strip() for name in open(label_path).readlines()]
    num_classes = len(class_names)
    if opt.net_type == 'RFB':
        net = create_Mb_Tiny_RFB_fd(len(class_names),
                                    is_test=True,
                                    device="cuda:0")
        predictor = create_Mb_Tiny_RFB_fd_predictor(net,
                                                    candidate_size=100,
                                                    device="cuda:0")
    elif opt.net_type == 'slim':
        net = create_mb_tiny_fd(len(class_names),
                                is_test=True,
                                device="cuda:0")
        predictor = create_mb_tiny_fd_predictor(net,
                                                candidate_size=100,
                                                device="cuda:0")

    net.load(weights)
    net.eval()
    device = "cuda:0"

    # target_transform = MatchPrior(config.priors, config.center_variance,
    #                               config.size_variance, 0.34999999404)
    #
    # test_transform = TestTransform(config.image_size, config.image_mean_test, config.image_std)
    # val_dataset = COCODataset("/media/test/data/coco", transform=test_transform,
    #                           target_transform=target_transform, is_test=True)
    # logging.info("validation dataset size: {}".format(len(val_dataset)))
    #
    # val_loader = DataLoader(val_dataset, batch_size,
    #                         num_workers=4,
    #                         shuffle=False)

    data_list = json.load(open("/media/test/data/coco/test_datalist.json"))

    all_correct = None
    all_p_prob = None
    all_p_label = None
    all_g_label = None
    seen = 0
    for data in tqdm(data_list):
        image_id = data['image_file']
        gt_boxes = np.array(data['boxes'], dtype=np.float32)
        gt_labels = np.array(data['labels'], dtype=np.int64)
        image = cv2.imread(image_id)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        p_boxes, p_labels, p_probs = predictor.predict(image)
        nl = gt_labels.shape[0]
        correct = np.array([0] * p_boxes.shape[0])
        for i, gt_box in enumerate(gt_boxes):
            seen += 1
            p_index = np.array(range(p_boxes.shape[0]))
            gt_label = gt_labels[i]
            valid_p_boxes = p_boxes[correct == 0]  # remove matched predic box
            valid_p_index = p_index[correct == 0]
            valid_p_probs = p_probs[correct == 0]
            valid_p_labels = p_labels[correct == 0]
            valid_p_boxes = valid_p_boxes[
                valid_p_labels == gt_label]  # select predict label == gt label
            valid_p_index = valid_p_index[valid_p_labels == gt_label]
            valid_p_probs = valid_p_probs[valid_p_labels == gt_label]
            if valid_p_boxes.shape[0] == 0:
                continue
            iou = iou_of(torch.tensor(valid_p_boxes),
                         torch.tensor(np.expand_dims(gt_box, axis=0)))
            max_val = torch.max(iou)
            if max_val.item() > iou_thres:
                correct[valid_p_index[torch.argmax(iou).item()]] = 1
        all_correct = np.concatenate(
            [all_correct, correct],
            axis=0) if all_correct is not None else correct
        all_p_prob = np.concatenate(
            [all_p_prob, p_probs],
            axis=0) if all_p_prob is not None else p_probs
        all_p_label = np.concatenate(
            [all_p_label, p_labels],
            axis=0) if all_p_label is not None else p_labels
        all_g_label = np.concatenate(
            [all_g_label, gt_labels],
            axis=0) if all_g_label is not None else gt_labels
    p, r, ap, f1, ap_class = ap_per_class(all_correct, all_p_prob, all_p_label,
                                          all_g_label)
    mp, mr, map, mf1 = p.mean(), r.mean(), ap.mean(), f1.mean()
    nt = np.bincount(all_g_label.astype(np.int64),
                     minlength=2)  # number of targets per class
    # Print results
    phead = '%30s' + '%10s' * 6
    print(phead % ('type', 'total', 'total', 'mp', 'mr', 'map', 'mf1'))
    pf = '%30s' + '%10.3g' * 6  # print format
    print(pf % ('all', seen, nt.sum(), mp, mr, map, mf1))

    # Print results per class
    names = ['BACKGROUND', 'person', 'face']
    for i, c in enumerate(ap_class):
        print(pf % (names[c], seen, nt[c], p[i], r[i], ap[i], f1[i]))