def evaluate_detection(detections, annotation_dir, testfiles, threshold=0.5, margin=0, shape=(0, 0)):

    boundingboxes = {}
    for key in testfiles:
        annotation_filename = annotation_dir + key[:-3] + "xml"
        boundingboxes[key] = cdb.get_bounding_boxes_for_single_image(annotation_filename)

    boxes = []
    image_files = []

    for key in detections.keys():

        img_filename = key
        boxes.append(detections[key])
        image_files.append([key for i in xrange(len(detections[key]))])

    boxes = [j for i in boxes for j in i]

    if boxes == []:
        print "No detections"
        return [], [], []

    boxes = np.vstack(boxes)
    image_files = np.hstack(image_files)

    return evaluate_model(boxes, image_files, boundingboxes, overlapThreshold=threshold, margin=margin, shape=shape)
def show_detections(listfiles, annotation_dir, img_dir, dects, op, probs_dict, STEP, SIZE):
    fig, ax = plt.subplots()
    fig.set_size_inches(15, 20)

    num_imgs = len(listfiles)
    for i in range(num_imgs):

        k = listfiles[i]

        annotation_filename = annotation_dir + k[:-3] + "xml"
        boundingboxes = cdb.get_bounding_boxes_for_single_image(annotation_filename)

        plt.subplot(num_imgs, 2, i * 2 + 1)
        img = io.imread(img_dir + k)

        plt.axis("off")

        for bb in boundingboxes:
            cx1 = bb[0]
            cx2 = bb[1]
            cy1 = bb[2]
            cy2 = bb[3]
            cv2.circle(img, ((cx1 + cx2) / 2, (cy1 + cy2) / 2), 20, (255, 50, 50, 255), 3)
        for dd in dects[k]:
            if dd[4] > op:
                cx1 = int(dd[0])
                cx2 = int(dd[2])
                cy1 = int(dd[1])
                cy2 = int(dd[3])
                cv2.rectangle(img, (cx1, cy1), (cx2, cy2), (50, 255, 50, 255), 5)

        plt.imshow(img)

        plt.subplot(num_imgs, 2, i * 2 + 2)
        prob = np.uint8(plt.cm.cubehelix(probs_dict[k].copy()) * 255)
        plt.axis("off")
        for bb in boundingboxes:
            cx1 = bb[0] / STEP - SIZE / 2 / STEP
            cx2 = bb[1] / STEP - SIZE / 2 / STEP
            cy1 = bb[2] / STEP - SIZE / 2 / STEP
            cy2 = bb[3] / STEP - SIZE / 2 / STEP
            cv2.circle(prob, ((cx1 + cx2) / 2, (cy1 + cy2) / 2), 4, (255, 50, 50, 255), 1)

        for dd in dects[k]:
            if dd[4] > op:
                cx1 = int(dd[0] / STEP - SIZE / 2 / STEP)
                cx2 = int(dd[2] / STEP - SIZE / 2 / STEP)
                cy1 = int(dd[1] / STEP - SIZE / 2 / STEP)
                cy2 = int(dd[3] / STEP - SIZE / 2 / STEP)
                cv2.rectangle(prob, (cx1, cy1), (cx2, cy2), (50, 255, 50, 255), 1)

        plt.imshow(prob[: -SIZE / STEP, : -SIZE / STEP])
def per_image(found, annotation_dir, img_dir, prob=0.5, threshold=0.5):

    num_pos = 0

    boundingboxes = {}

    assigned = {}

    if len(found.keys()) == 0:
        print "No detections"
        return [], [], []

    per_image_stats = []
    for key in found.keys():
        img = cv2.imread(img_dir + key, cv2.IMREAD_GRAYSCALE)
        probs = np.histogram(img.flatten(), bins=250, density=True, range=(0, 255))[0]

        lprobs = np.log(probs)
        lprobs[np.isneginf(lprobs)] = 0

        entropy = -sum(probs * lprobs)
        tp = []
        fp = []
        num_pos
        annotation_filename = annotation_dir + key[:-3] + "xml"
        boundingboxes[key] = cdb.get_bounding_boxes_for_single_image(annotation_filename)
        num_pos = len(boundingboxes[key])
        assigned[key] = np.zeros(len(boundingboxes[key]))

        boxes = found[key]

        img = img.flatten()

        contrast = img.std()

        if len(boxes) == 0:
            prec = 0
            rec = 0
            if num_pos == 0:
                prec = 1
                rec = 1

            per_image_stats.append((num_pos, 0, 0, prec, rec, entropy, contrast))

            continue

        boxes = np.vstack(boxes)

        x1 = boxes[:, 0]
        y1 = boxes[:, 1]
        x2 = boxes[:, 2]
        y2 = boxes[:, 3]
        p = boxes[:, 4].copy()

        area = (x2 - x1 + 1) * (y2 - y1 + 1)

        idx = np.argsort(-p)
        dp = 0
        for i in idx:

            if p[i] < prob:
                break

            if boundingboxes[key] == [] or boundingboxes[key].shape[0] == 0:
                by1 = by2 = bx1 = bx2 = np.array([])
            else:
                bx1 = boundingboxes[key][:, 0]
                bx2 = boundingboxes[key][:, 1]
                by1 = boundingboxes[key][:, 2]
                by2 = boundingboxes[key][:, 3]

            max_overlap = -1
            max_index = 0

            for b in range(len(boundingboxes[key])):
                if assigned[key][b] != 0:
                    continue

                xx1 = max(x1[i], bx1[b])
                yy1 = max(y1[i], by1[b])
                xx2 = min(x2[i], bx2[b])
                yy2 = min(y2[i], by2[b])

                # compute the width and height of the bounding box
                w = max(0, xx2 - xx1 + 1)
                h = max(0, yy2 - yy1 + 1)

                areabb = (bx2[b] - bx1[b]) * (by2[b] - by1[b])

                # compute the ratio of overlap between the computed
                # bounding box and the bounding box in the area list
                overlap = float(w * h) / (area[i] + areabb)
                if overlap > max_overlap:
                    max_overlap = overlap
                    max_index = b

            if max_overlap > threshold:
                assigned[key][max_index] = 1
                tp.append(1)
                fp.append(0)
            else:
                tp.append(0)
                fp.append(1)

        tp = np.sum(tp)
        fp = np.sum(fp)

        rec = tp * 1.0 / num_pos
        pre = tp * 1.0 / (tp + fp)
        if tp + fp == 0:
            pre = 1
        if num_pos == 0:
            rec = 1
        per_image_stats.append((num_pos, tp, fp, pre, rec, entropy, contrast))

    return np.array(per_image_stats), found.keys()
def bootstrap(models, annotation_dir, test_set, iters=10, overlapThreshold=0.5):
    """
    Bootstraps a set of models and computes the AP for each 
    iteration.
    
    Parameters
    ----------
    models : dic
        Contains the detections of each model
    annotation_dir : string
        Path to the annotation files
    test_set : list
        Filenames of the images in the test set
    iters : int, optional
        Number of bootstrapping iteratins, 10 by default
    
    Returns
    -------
    aps: ndarray
        APs of each model for each iteration
    """
    boundingboxes = {}

    aps = np.zeros((len(models), iters))

    for f in test_set:
        boundingboxes[f] = cdb.get_bounding_boxes_for_single_image(annotation_dir + f[:-3] + "xml")
    for bootstrap_iter in range(iters):

        selected_imgs = random.randint(0, len(boundingboxes.keys()) - 1, len(boundingboxes.keys()) - 1)

        for m_id, model in enumerate(models.keys()):
            selected_bbs = {}
            assigned = {}

            for i in selected_imgs:

                if test_set[i] not in selected_bbs:
                    selected_bbs[test_set[i]] = boundingboxes[test_set[i]]
                    assigned[test_set[i]] = np.zeros(len(boundingboxes[test_set[i]]))

                assigned[test_set[i]] += np.ones(len(boundingboxes[test_set[i]]))

            selected_dets = []
            image_files = []

            for i in selected_imgs:
                if test_set[i] in models[model]:
                    selected_dets.append(models[model][test_set[i]])
                    image_files.append([test_set[i] for j in xrange(len(models[model][test_set[i]]))])
            selected_dets = [j for i in selected_dets for j in i]

            selected_dets = np.vstack(selected_dets)
            image_files = np.hstack(image_files)

            rec, prec, p = evaluate_model(
                selected_dets, image_files, selected_bbs, assigned, overlapThreshold=overlapThreshold
            )
            aps[m_id, bootstrap_iter] = compute_auc(rec, prec)

    return aps