def compute_average_precision_per_class(num_true_cases, gt_boxes,
										prediction_file, iou_threshold, use_2007_metric):
	""" Computes average precision per class

		num_true_cases 和 gt_boxes 是从Annotations读取的信息
			num_true_cases: 这个class出现了多少次
			gt_boxes:  gt_boxes[image_id]存着对应的gt_box

		prediction_file存着模型的输出结果, 依次为 img路径, score概率, x1, y1, x2, y2
	"""
	with open(prediction_file) as f:
		image_ids = []
		boxes = []
		scores = []
		for line in f:
			t = line.rstrip().split(" ")
			image_ids.append(t[0])
			scores.append(float(t[1]))
			box = torch.tensor([float(v) for v in t[2:]]).unsqueeze(0)
			box -= 1.0  # convert to python format where indexes start from 0
			boxes.append(box)
		scores = np.array(scores)
		sorted_indexes = np.argsort(-scores)  # index从大到小排序
		# boxes , image_ids 按 scores 大小排序
		boxes = [boxes[i] for i in sorted_indexes]
		image_ids = [image_ids[i] for i in sorted_indexes]
		true_positive = np.zeros(len(image_ids))
		false_positive = np.zeros(len(image_ids))
		matched = set()
		# 得到FP, TP
		for i, image_id in enumerate(image_ids):
			box = boxes[i]
			if image_id not in gt_boxes:  # 预测有而实际没有
				false_positive[i] = 1
				continue

			gt_box = gt_boxes[image_id]    # gt_box可能有多个
			ious = box_utils.iou_of(box, gt_box)   # predicted boxes和每个ground truth boxes 的 IoU
			max_iou = torch.max(ious).item()
			max_arg = torch.argmax(ious).item()   # max_arg表示和预测结果最吻合的是第几个gt_box
			if max_iou > iou_threshold:
				if (image_id, max_arg) not in matched:
					true_positive[i] = 1
					matched.add((image_id, max_arg))
				else:
					false_positive[i] = 1
			else:
				false_positive[i] = 1
	# 得到precision, recall, 计算AP
	true_positive = true_positive.cumsum()
	false_positive = false_positive.cumsum()
	precision = true_positive / (true_positive + false_positive)
	recall = true_positive / num_true_cases
	if use_2007_metric:
		return measurements.compute_voc2007_average_precision(precision, recall)
	else:
		return measurements.compute_average_precision(precision, recall)
def compute_average_precision_per_class(num_true_cases, gt_boxes,
                                        difficult_cases, prediction_file,
                                        iou_threshold, use_2007_metric):
    with open(prediction_file) as f:
        image_ids = []
        boxes = []
        scores = []

        for line in f:
            t = line.rstrip().split(" ")
            image_ids.append(t[0])
            scores.append(float(t[1]))
            box = torch.tensor([float(v) for v in t[2:]]).unsqueeze(0)
            box -= 0.0  # convert to python format where indexes start from 0
            boxes.append(box)

        scores = np.array(scores)
        sorted_indexes = np.argsort(-scores)
        boxes = [boxes[i] for i in sorted_indexes]
        image_ids = [image_ids[i] for i in sorted_indexes]
        true_positive = np.zeros(len(image_ids))
        false_positive = np.zeros(len(image_ids))
        matched = set()
        for i, image_id in enumerate(image_ids):
            box = boxes[i]
            if image_id not in gt_boxes:
                false_positive[i] = 1
                continue

            gt_box = gt_boxes[image_id]
            ious = box_utils.iou_of(box, gt_box)
            max_iou = torch.max(ious).item()
            max_arg = torch.argmax(ious).item()
            if max_iou > iou_threshold:
                if difficult_cases[image_id][max_arg] == 0:
                    if (image_id, max_arg) not in matched:
                        true_positive[i] = 1
                        matched.add((image_id, max_arg))
                    else:
                        false_positive[i] = 1
            else:
                false_positive[i] = 1

    true_positive = true_positive.cumsum()
    false_positive = false_positive.cumsum()
    precision = true_positive / (true_positive + false_positive)
    recall = true_positive / num_true_cases

    if use_2007_metric:
        return measurements.compute_voc2007_average_precision(
            precision, recall)
    else:
        return measurements.compute_average_precision(precision, recall)