Ejemplo n.º 1
0
def calc_detection_hits(gt_bbox, crop_bbox, score_bbox, iou_thr = 0.2, score_thr=0.):
    # bboxes and score must be tensors
    if isinstance(crop_bbox, list):
        crop_bbox = torch.stack(crop_bbox)

    if isinstance(score_bbox, list):
        score_bbox = torch.stack(score_bbox)

    # sort according to score and limit according to score_thr
    scores, indices = torch.sort(score_bbox, descending=True)
    boxes = crop_bbox[indices]

    boxes = boxes[scores > score_thr]
    scores = scores[scores > score_thr]

    # calculate iou
    iou_matrix = box_iou(change_box_order(gt_bbox, order='xywh2xyxy'),
                         change_box_order(boxes, order="xywh2xyxy"))

    # create label list, denoting at max one detection per lesion
    labels = torch.zeros_like(scores)
    iou_hits = torch.zeros_like(scores)
    for i in range(iou_matrix.shape[0]):
        hits = (iou_matrix[i] > iou_thr).nonzero()
        if len(hits) > 0:
            labels[hits[0].cpu()] = 1.
            iou_hits[hits[0].cpu()] = iou_matrix[i, hits[0].cpu()]

    return labels, scores, iou_hits
Ejemplo n.º 2
0
    def forward(self, data, loc_preds, loc_targets, cls_preds, cls_targets):
        batch_size = loc_preds.shape[0]
        anchors = Anchors()
        labels = []
        preds = []

        for i in range(batch_size):

            pred_boxes, pred_labels, pred_score = anchors.generateBoxesFromAnchors(
                loc_preds[i],
                cls_preds[i],
                tuple(data[i].shape[1:]),
                cls_tresh=0.05)

            #start = time.time()
            target_boxes, target_labels = anchors.restoreBoxesFromAnchors(
                loc_targets[i], cls_targets[i], tuple(data[i].shape[1:]))
            #end = time.time()
            #print(end-start)

            if pred_boxes is None and target_boxes is None:
                continue

            if pred_boxes is None:
                preds.append(torch.zeros_like(target_labels))
                labels.append(target_labels)
                continue

            if target_boxes is None:
                preds.append(pred_labels)
                labels.append(torch.zeros_like(pred_labels))
                continue

            pred_boxes = change_box_order(pred_boxes, order='xywh2xyxy')
            target_boxes = change_box_order(target_boxes, order='xywh2xyxy')

            iou_matrix = box_iou(target_boxes, pred_boxes)
            iou_matrix = iou_matrix > self.iou_thr

            box_labels = torch.clamp(torch.sum(iou_matrix, 0), 0, 1)

            preds.append(pred_score)
            labels.append(box_labels)

        labels = torch.tensor([item for sublist in labels for item in sublist])\
            .type(torch.float32)
        preds = torch.tensor([item for sublist in preds for item in sublist])\
            .type(torch.float32)

        if not any(labels):
            return float_to_tensor(0.5)
        elif all(labels):
            return float_to_tensor(1)
        elif labels.dim() == 0:
            return float_to_tensor(0)
        else:
            return float_to_tensor(roc_auc_score(labels, preds))
Ejemplo n.º 3
0
def gt_overlap(image_bbox, crop_bbox, iou_thr = 0.2):
    # convert bbox format from [x, y, w, h] to [x_1, y_1, x_2, y_2] for IoU
    # calculation
    image_bbox = change_box_order(image_bbox, order='xywh2xyxy')
    crop_bbox = change_box_order(crop_bbox, order="xywh2xyxy")

    # determine overlap with ground truth bboxes
    iou_matrix = box_iou(image_bbox, crop_bbox)
    iou_matrix = iou_matrix > iou_thr

    # assign each detected bbox a label: 1 if its overlap with the ground
    # truth is higher than the given threshold, 0 otherwise
    box_class = torch.clamp(torch.sum(iou_matrix, 0), 0, 1)

    return box_class
Ejemplo n.º 4
0
def calc_tp_fn_fp(gt_bbox, crop_bbox, score_bbox, iou_thr = 0.2,
                  confidence_values = np.arange(0.5, 1, 0.05)):
    # bboxes and score must be tensors
    if isinstance(crop_bbox, list):
        crop_bbox = torch.stack(crop_bbox)

    if isinstance(score_bbox, list):
        score_bbox = torch.stack(score_bbox)

    tp_list = []
    fp_list = []
    fn_list = []
    iou_list = []
    gt_bbox = change_box_order(gt_bbox, order='xywh2xyxy')

    for j in confidence_values:
        current_bbox = crop_bbox[score_bbox > j]

        if len(current_bbox) == 0:
            tp_list.append(torch.tensor(0, device = current_bbox.device))
            fp_list.append(torch.tensor(0, device = current_bbox.device))
            fn_list.append(torch.tensor(gt_bbox.shape[0],
                                        device = current_bbox.device))
            continue
            # break

        iou_matrix = box_iou(gt_bbox,
                             change_box_order(current_bbox,
                                              order="xywh2xyxy"))
        hits = iou_matrix > iou_thr
        iou_values = iou_matrix[hits]
        iou_list.append(iou_values)

        # true positives are the lesions that are recognized
        # count only one detected box per lesion as positive
        tp = torch.clamp(torch.sum(hits, 1), 0, 1).sum()
        tp_list.append(tp)

        # false negatives are the lesions that are NOT recognized
        fn = gt_bbox.shape[0] - tp
        fn_list.append(fn)

        # number of false positives
        fp = (current_bbox.shape[0] - tp).type(torch.float32)
        fp_list.append(fp)

    return tp_list, fp_list, fn_list
Ejemplo n.º 5
0
    def iteration(annotation_list):
        total_set_list = []
        merged_list = []
        for annotation in annotation_list:
            if len(total_set_list) == 0:
                total_set_list.append([annotation])
            else:
                isAppended = False
                for set in total_set_list:
                    for saved_annotation in set:
                        iou = box_iou(saved_annotation[0].view(1, -1),
                                      annotation[0].view(1, -1),
                                      "xyxy")
                        isCovered = isInside(saved_annotation[0],
                                             annotation[0])
                        #if iou > merge_thr or isCovered:
                        if iou > 0.2:
                            set.append(annotation)
                            isAppended = True
                            break

                    if isAppended == True:
                        break

                if isAppended == False:
                    total_set_list.append([annotation])

        for set in total_set_list:
            value = 0
            max_area = 0
            max_info = None
            max_value = 0
            for idx, saved_annotation in enumerate(set):
                area = (saved_annotation[0][3] - saved_annotation[0][1]) * \
                       (saved_annotation[0][2] - saved_annotation[0][0])
                value += saved_annotation[1]

                if area > max_area:
                    max_area = area
                    max_info = saved_annotation
                if float(saved_annotation[1]) > max_value:
                    max_value = float(saved_annotation[1])

            value = float(value) / len(set)
            merged_list.append((max_info[0], max_value))
        return merged_list
Ejemplo n.º 6
0
def merge(bboxes, score):
    # merge overlapping bounding boxes

    # bboxes and score must be tensors
    if isinstance(bboxes, list):
        bboxes = torch.stack(bboxes)

    if isinstance(score, list):
        score = torch.stack(score)

    # sort the score in descending order, adjust bboxes accordingly
    score, indices = torch.sort(score, descending=True)
    bboxes = bboxes[indices]

    # limit the amount of bboxes to 300 (or less)
    if len(score) > 300:
        limit = 300
    else:
        limit = len(score)
    score = score[0:limit]
    bboxes = bboxes[0:limit]

    # choose the highest scoring bboxes as starting point; add any bbox that is
    # not yet in the list and has no IoU value higher than 0.2 with every other
    # bbox in the list
    result_bbox = [bboxes[0]]
    result_score = [score[0]]
    for j in range(1, len(bboxes)):
        iou = box_iou(torch.stack(result_bbox), bboxes[j].view(1, -1), "xywh")
        if any(iou > 0.2):
            continue
        else:
            result_bbox.append(bboxes[j])
            result_score.append(score[j])

    bboxes = torch.stack(result_bbox)
    score = torch.stack(result_score)

    return bboxes, score
Ejemplo n.º 7
0
def eval(dataset, model_path, plot=False):
    # device
    device = 'cuda'

    # load model
    checkpoint = torch.load(model_path)
    model = RetinaNet(**checkpoint['init_kwargs']).eval()
    model.load_state_dict(checkpoint['state_dict'])
    model.to(device)

    # hyperparams
    crop_size = [600, 600]
    overlapped_boxes = 0.5
    confidence_values = np.arange(0.5, 1, 0.05)
    tpr_list = []
    fppi_list = []

    with torch.no_grad():
        for i in tqdm(range(len(dataset))):
            torch.cuda.empty_cache()

            # get image data
            test_data = dataset[i]

            # crop background
            test_data = inbreast_utils.segment_breast(test_data)
            image_bbox = utils.bounding_box(dataset[i]["seg"])

            # generate crops
            crop_list, corner_list = inbreast_utils.create_crops(test_data)

            # define list for predicted bboxes in crops
            crop_bbox = []
            score_bbox = []

            # plot the image with the according bboxes
            if plot:
                # plot image
                plt.figure(1, figsize=(15, 10))
                fig, ax = plt.subplots(1)

                ax.imshow(test_data["data"][0, :, :], cmap='Greys_r')

                # show bboxes as saved in data (in red with center)
                for l in range(len(image_bbox)):
                    pos = tuple(image_bbox[l][0:2])
                    plt.plot(pos[0], pos[1], 'r.')
                    width = image_bbox[l][2]
                    height = image_bbox[l][3]
                    pos = (pos[0] - np.floor(width / 2),
                           pos[1] - np.floor(height / 2))

                    # Create a Rectangle patch
                    rect = patches.Rectangle(pos,
                                             width,
                                             height,
                                             linewidth=1,
                                             edgecolor='r',
                                             facecolor='none')
                    ax.add_patch(rect)

            # iterate over crops
            for j in tqdm(range(0, len(crop_list))):
                torch.cuda.empty_cache()
                test_image = torch.Tensor(crop_list[j]['data']).to(device)
                test_bbox = utils.bounding_box(crop_list[j]['seg'])

                # predict anchors and labels for the crops using the loaded model
                anchor_preds, cls_preds = model(test_image.unsqueeze(0))

                # convert the predicted anchors to bboxes
                anchors = Anchors()
                boxes, labels, score = anchors.generateBoxesFromAnchors(
                    anchor_preds[0].to('cpu'),
                    cls_preds[0].to('cpu'),
                    tuple(test_image.shape[1:]),
                    cls_tresh=0.05)

                # correct the predicted bboxes
                for k in range(len(boxes)):
                    center_corrected = boxes[k][0:2] + \
                                       torch.Tensor(corner_list[j])
                    crop_bbox.append(
                        torch.cat((center_corrected, boxes[k][2:])))
                    score_bbox.append(score[k])

            # merge overlapping bounding boxes
            crop_bbox, score_bbox = merge(crop_bbox, score_bbox)

            # calculate the FROC metric (TPR vs. FPPI)
            tpr_int = []
            fppi_int = []
            image_bbox = change_box_order(torch.Tensor(image_bbox),
                                          order='xywh2xyxy').to('cpu')
            iou_thr = 0.2
            for j in confidence_values:
                current_bbox = crop_bbox[score_bbox > j]

                if len(current_bbox) == 0:
                    tpr_int.append(torch.Tensor([0]))
                    fppi_int.append(torch.Tensor([0]))
                    continue
                    #break

                iou_matrix = box_iou(
                    image_bbox,
                    change_box_order(current_bbox, order="xywh2xyxy"))
                iou_matrix = iou_matrix > iou_thr

                # true positives are the lesions that are recognized
                tp = iou_matrix.sum()

                # false negatives are the lesions that are NOT recognized
                fn = image_bbox.shape[0] - tp

                # true positive rate
                tpr = tp.type(torch.float32) / (tp + fn).type(torch.float32)
                tpr = torch.clamp(tpr, 0, 1)

                # number of false positives per image
                fp = (current_bbox.shape[0] - tp).type(torch.float32)

                tpr_int.append(tpr)
                fppi_int.append(fp)
            tpr_list.append(tpr_int)
            fppi_list.append(fppi_int)

            if plot:
                # show the predicted bboxes (in blue)
                print("Number of detected bboxes: {0}".format(len(crop_bbox)))
                keep = score_bbox > 0.5
                crop_bbox = crop_bbox[keep]
                score_bbox = score_bbox[keep]
                for j in range(len(crop_bbox)):
                    width = crop_bbox[j][2]
                    height = crop_bbox[j][3]
                    pos = (crop_bbox[j][0] - torch.floor(width / 2),
                           crop_bbox[j][1] - torch.floor(height / 2))

                    # Create a Rectangle patch
                    rect = patches.Rectangle(pos,
                                             width,
                                             height,
                                             linewidth=1,
                                             edgecolor='b',
                                             facecolor='none')
                    ax.add_patch(rect)
                    ax.annotate("{:.2f}".format(score_bbox[j]),
                                pos,
                                fontsize=6,
                                xytext=(pos[0] + 10, pos[1] - 10))

                    print("BBox params: {0}, score: {1}".format(
                        crop_bbox[j], score_bbox[j]))
                plt.show()

            #     fig.savefig("../plots/" + "_".join(model_path.split("/")[5:8]) + ".png")

        # calculate FROC over all test images
        tpr_list = np.asarray(tpr_list)
        tpr = np.sum(tpr_list, axis=0) / tpr_list.shape[0]

        fppi_list = np.asarray(fppi_list)
        fppi = np.sum(fppi_list, axis=0) / fppi_list.shape[0]

    # plt.figure(1)
    # plt.ylim(0, 1.1)
    # plt.xlabel("False Positve per Image (FPPI)")
    # plt.ylabel("True Positive Rate (TPR)")
    # plt.title("Free Response Operating Characteristic (FROC)")
    # plt.plot(np.asarray(fppi), np.asarray(tpr), "rx-")
    # plt.show()

    return tpr, fppi
Ejemplo n.º 8
0
def my_merging_2(bboxes, scores, crop_center_factor, heatmap_factor, thr=0.2):
    # bboxes and score must be tensors
    if isinstance(bboxes, list):
        device = bboxes[0].device
        bboxes = torch.stack(bboxes).to("cpu")
    else:
        device = bboxes.device
        bboxes = bboxes.to("cpu")

    if isinstance(scores, list):
        scores = torch.stack(scores).to("cpu")

    # sort the score in descending order, adjust bboxes accordingly
    scores, indices = torch.sort(scores, descending=True)
    bboxes = bboxes[indices]

    # change the order from xywh to xyxy
    bboxes = change_box_order(bboxes, order="xywh2xyxy")

    box_set_list = []
    for i in range(len(bboxes)):
        if len(box_set_list) == 0:
            box_set_list.append([[bboxes[i], scores[i]]])
        else:
            isAppended = False
            for box_set in box_set_list:
                for saved_box in box_set:
                    iou = box_iou(saved_box[0].view(1, -1),
                                  bboxes[i].view(1, -1),
                                  "xyxy")
                    isCovered = isInside(saved_box[0],
                                         bboxes[i])
                    if iou > thr or isCovered:
                        # if iou > 0.2:
                        box_set.append([bboxes[i], scores[i]])
                        isAppended = True
                        break

                if isAppended == True:
                    break

            if isAppended == False:
                box_set_list.append([[bboxes[i], scores[i]]])

    merged_box_list = []
    merged_score_list = []
    for i in range(len(box_set_list)):
        pos = [box_set_list[i][j][0] for j in range(len(box_set_list[i]))]
        scores = [box_set_list[i][j][1] for j in range(len(box_set_list[i]))]

        pos = torch.stack(pos)
        scores = torch.stack(scores)

        xx1 = torch.max(pos[0, 0], pos[:,0])
        yy1 = torch.max(pos[0, 1], pos[:,1])
        xx2 = torch.min(pos[0, 2], pos[:,2])
        yy2 = torch.min(pos[0, 3], pos[:,3])

        w = torch.max(torch.Tensor([0]), xx2 - xx1 + 1)
        h = torch.max(torch.Tensor([0]), yy2 - yy1 + 1)
        inter = w * h

        areas = (pos[:,2] - pos[:,0] + 1) * (pos[:,3] - pos[:,1] + 1)

        # overall between currently highest scoring box and all boxes.
        iou = inter / (areas[0] + areas - inter)

        match_weigths = iou
        match_scores = scores * match_weigths

        avg_score = torch.sum(match_scores) / torch.sum(match_weigths)
        merged_score_list.append(avg_score)

        avg_pos = torch.tensor([torch.sum(pos[:,0] * match_scores) / torch.sum(match_scores),
                                torch.sum(pos[:, 1] * match_scores) / torch.sum(match_scores),
                                torch.sum(pos[:, 2] * match_scores) / torch.sum(match_scores),
                                torch.sum(pos[:, 3] * match_scores) / torch.sum(match_scores)])
        merged_box_list.append(avg_pos)


    # convert the boxes in the "xywh" form and tensors again
    keep_bboxes = change_box_order(torch.stack(merged_box_list),
                                   order="xyxy2xywh").to(device)

    keep_scores = torch.tensor(merged_score_list, dtype=torch.float32).to(device)

    return keep_bboxes, keep_scores