def debug_non_max_suppression(boxes, scores, threshold):
    """Performs non-maximum supression and returns indicies of kept boxes.
    boxes: [N, (y1, x1, y2, x2)]. Notice that (y2, x2) lays outside the box.
    scores: 1-D array of box scores.
    threshold: Float. IoU threshold to use for filtering.
    """
    assert boxes.shape[0] > 0
    if boxes.dtype.kind != "f":
        boxes = boxes.astype(np.float32)
    # print(' non_max_suppression ')
    # Compute box areas
    y1 = boxes[:, 0]
    x1 = boxes[:, 1]
    y2 = boxes[:, 2]
    x2 = boxes[:, 3]
    area = (y2 - y1) * (x2 - x1)

    # Get indicies of boxes sorted by scores (highest first)
    ixs = scores.argsort()[::-1]

    pick = []
    print('====> Initial Ixs: ', ixs)
    while len(ixs) > 0:
        # Pick top box and add its index to the list
        i = ixs[0]
        cy = y1[i] + (y2[i] - y1[i]) // 2
        cx = x1[i] + (x2[i] - x1[i]) // 2
        print('     ix : ', ixs, 'ctr (x,y)', cx, ' ', cy, ' box:', boxes[i],
              ' compare ', i, ' with ', ixs[1:])
        pick.append(i)

        # Compute IoU of the picked box with the rest
        iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]])
        print('     ious:', iou)
        # Identify boxes with IoU over the threshold. This
        # returns indicies into ixs[1:], so add 1 to get
        # indicies into ixs.
        tst = np.where(iou > threshold)
        remove_ixs = np.where(iou > threshold)[0] + 1
        print('     np.where( iou > threshold) : ', tst,
              'tst[0] (index into ixs[1:]: ', tst[0],
              ' remove_ixs (index into ixs) : ', remove_ixs)

        # Remove indicies of the picked and overlapped boxes.
        ixs = np.delete(ixs, remove_ixs)
        ixs = np.delete(ixs, 0)
        print(' ending ixs (after deleting ixs[0]): ', ixs, ' picked so far: ',
              pick)
    print('====> Final Picks: ', pick)
    return np.array(pick, dtype=np.int32)
Example #2
0
def assign_next_frame(prior, post, th=0.7, pr=False):
    iou = np.zeros(len(prior))
    status = np.zeros(len(prior))
    iou_mat = np.zeros((len(prior), len(post)))
    for k in range(len(prior)):
        p = prior.loc[k, :]
        iou_mat[k,:] = utils.compute_iou( (p.y1,p.x1,p.y2,p.x2),\
                         post[["y1","x1","y2","x2"]].values,p.a, post["a"].values)
    id_map = {}
    count = min(len(prior), len(post))
    mat = np.copy(iou_mat)
    while count > 0:
        r, k = np.unravel_index(np.argmax(iou_mat, axis=None), iou_mat.shape)
        if iou_mat[r, k] > th:
            id_map[post.at[k, "labels"]] = prior.at[r, "labels"]
            iou[r] = iou_mat[r, k]
            status[r] = 1
        iou_mat[r, :] = -99
        iou_mat[:, k] = -99
        count = count - 1
    return mat, iou, id_map, status.astype(bool)
Example #3
0
def build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks,
                            config):
    """
    Generate targets for training Stage 2 classifier and mask heads.
    This is not used in normal training. It's useful for debugging or to train
    the Mask RCNN heads without using the RPN head.

    Inputs:
    --------
    rpn_rois:           [N, (y1, x1, y2, x2)] proposal boxes.
    gt_class_ids:       [instance count] Integer class IDs
    gt_boxes:           [instance count, (y1, x1, y2, x2)]
    gt_masks:           [height, width, instance count] Grund truth masks. Can be full
                        size or mini-masks.

    Returns:
    --------
   
    rois:               [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]
    class_ids:          [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
    bboxes:             [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific
                        bbox refinments.
    masks:              [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped
                        to bbox boundaries and resized to neural network output size.
    """
    assert rpn_rois.shape[0] > 0
    assert gt_class_ids.dtype == np.int32, "Expected int but got {}".format(
        gt_class_ids.dtype)
    assert gt_boxes.dtype == np.int32, "Expected int but got {}".format(
        gt_boxes.dtype)
    assert gt_masks.dtype == np.bool_, "Expected bool but got {}".format(
        gt_masks.dtype)

    # It's common to add GT Boxes to ROIs but we don't do that here because
    # according to XinLei Chen's paper, it doesn't help.

    # Trim empty padding in gt_boxes and gt_masks parts
    instance_ids = np.where(gt_class_ids > 0)[0]
    assert instance_ids.shape[0] > 0, "Image must contain instances."
    gt_class_ids = gt_class_ids[instance_ids]
    gt_boxes = gt_boxes[instance_ids]
    gt_masks = gt_masks[:, :, instance_ids]

    # Compute areas of ROIs and ground truth boxes.
    rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \
        (rpn_rois[:, 3] - rpn_rois[:, 1])
    gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \
        (gt_boxes[:, 3] - gt_boxes[:, 1])

    # Compute overlaps [rpn_rois, gt_boxes]
    overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))
    for i in range(overlaps.shape[1]):
        gt = gt_boxes[i]
        overlaps[:, i] = utils.compute_iou(gt, rpn_rois, gt_box_area[i],
                                           rpn_roi_area)

    # Assign ROIs to GT boxes
    rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)
    rpn_roi_iou_max = overlaps[np.arange(overlaps.shape[0]),
                               rpn_roi_iou_argmax]
    # GT box assigned to each ROI
    rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]
    rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]

    # Positive ROIs are those with >= 0.5 IoU with a GT box.
    fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]

    # Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)
    # TODO: To hard example mine or not to hard example mine, that's the question
    #     bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]
    bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]

    # Subsample ROIs. Aim for 33% foreground.
    # FG
    fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)
    if fg_ids.shape[0] > fg_roi_count:
        keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)
    else:
        keep_fg_ids = fg_ids
    # BG
    remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]
    if bg_ids.shape[0] > remaining:
        keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
    else:
        keep_bg_ids = bg_ids
    # Combine indicies of ROIs to keep
    keep = np.concatenate([keep_fg_ids, keep_bg_ids])
    # Need more?
    remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]
    if remaining > 0:
        # Looks like we don't have enough samples to maintain the desired
        # balance. Reduce requirements and fill in the rest. This is
        # likely different from the Mask RCNN paper.

        # There is a small chance we have neither fg nor bg samples.
        if keep.shape[0] == 0:
            # Pick bg regions with easier IoU threshold
            bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
            assert bg_ids.shape[0] >= remaining
            keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
            assert keep_bg_ids.shape[0] == remaining
            keep = np.concatenate([keep, keep_bg_ids])
        else:
            # Fill the rest with repeated bg rois.
            keep_extra_ids = np.random.choice(keep_bg_ids,
                                              remaining,
                                              replace=True)
            keep = np.concatenate([keep, keep_extra_ids])
    assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \
        "keep doesn't match ROI batch size {}, {}".format(
            keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)

    # Reset the gt boxes assigned to BG ROIs.
    rpn_roi_gt_boxes[keep_bg_ids, :] = 0
    rpn_roi_gt_class_ids[keep_bg_ids] = 0

    # For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.
    rois = rpn_rois[keep]
    roi_gt_boxes = rpn_roi_gt_boxes[keep]
    roi_gt_class_ids = rpn_roi_gt_class_ids[keep]
    roi_gt_assignment = rpn_roi_iou_argmax[keep]

    # Class-aware bbox deltas. [y, x, log(h), log(w)]
    bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.NUM_CLASSES, 4),
                      dtype=np.float32)
    pos_ids = np.where(roi_gt_class_ids > 0)[0]
    bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(
        rois[pos_ids], roi_gt_boxes[pos_ids, :4])
    # Normalize bbox refinments
    bboxes /= config.BBOX_STD_DEV

    # Generate class-specific target masks.
    masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0],
                      config.MASK_SHAPE[1], config.NUM_CLASSES),
                     dtype=np.float32)
    for i in pos_ids:
        class_id = roi_gt_class_ids[i]
        assert class_id > 0, "class id must be greater than 0"
        gt_id = roi_gt_assignment[i]
        class_mask = gt_masks[:, :, gt_id]

        if config.USE_MINI_MASK:
            # Create a mask placeholder, the size of the image
            placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)
            # GT box
            gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]
            gt_w = gt_x2 - gt_x1
            gt_h = gt_y2 - gt_y1
            # Resize mini mask to size of GT box
            placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \
                np.round(scipy.misc.imresize(class_mask.astype(float), (gt_h, gt_w),
                                             interp='nearest') / 255.0).astype(bool)
            # Place the mini batch in the placeholder
            class_mask = placeholder

        # Pick part of the mask and resize it
        y1, x1, y2, x2 = rois[i].astype(np.int32)
        m = class_mask[y1:y2, x1:x2]
        mask = scipy.misc.imresize(
            m.astype(float), config.MASK_SHAPE, interp='nearest') / 255.0
        masks[i, :, :, class_id] = mask

    return rois, roi_gt_class_ids, bboxes, masks
def build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):
    assert rpn_rois.shape[0] > 0
    assert gt_class_ids.dtype == np.int32, "Expected int but got {}".format(
        gt_class_ids.dtype)
    assert gt_boxes.dtype == np.int32, "Expected int but got {}".format(
        gt_boxes.dtype)
    assert gt_masks.dtype == np.bool_, "Expected bool but got {}".format(
        gt_masks.dtype)

    instance_ids = np.where(gt_class_ids > 0)[0]
    assert instance_ids.shape[0] > 0, "Image must contain instances."
    gt_class_ids = gt_class_ids[instance_ids]
    gt_boxes = gt_boxes[instance_ids]
    gt_masks = gt_masks[:, :, instance_ids]

    rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \
        (rpn_rois[:, 3] - rpn_rois[:, 1])
    gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \
        (gt_boxes[:, 3] - gt_boxes[:, 1])

    overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))
    for i in range(overlaps.shape[1]):
        gt = gt_boxes[i]
        overlaps[:, i] = utils.compute_iou(
            gt, rpn_rois, gt_box_area[i], rpn_roi_area)

    rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)
    rpn_roi_iou_max = overlaps[np.arange(
        overlaps.shape[0]), rpn_roi_iou_argmax]
    # GT box assigned to each ROI
    rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]
    rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]

    fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]

    bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]

    fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)
    if fg_ids.shape[0] > fg_roi_count:
        keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)
    else:
        keep_fg_ids = fg_ids
    # BG
    remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]
    if bg_ids.shape[0] > remaining:
        keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
    else:
        keep_bg_ids = bg_ids
    # Combine indices of ROIs to keep
    keep = np.concatenate([keep_fg_ids, keep_bg_ids])
    # Need more?
    remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]
    if remaining > 0:
        if keep.shape[0] == 0:
            # Pick bg regions with easier IoU threshold
            bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
            assert bg_ids.shape[0] >= remaining
            keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
            assert keep_bg_ids.shape[0] == remaining
            keep = np.concatenate([keep, keep_bg_ids])
        else:
            # Fill the rest with repeated bg rois.
            keep_extra_ids = np.random.choice(
                keep_bg_ids, remaining, replace=True)
            keep = np.concatenate([keep, keep_extra_ids])
    assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \
        "keep doesn't match ROI batch size {}, {}".format(
            keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)

    # Reset the gt boxes assigned to BG ROIs.
    rpn_roi_gt_boxes[keep_bg_ids, :] = 0
    rpn_roi_gt_class_ids[keep_bg_ids] = 0

    # For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.
    rois = rpn_rois[keep]
    roi_gt_boxes = rpn_roi_gt_boxes[keep]
    roi_gt_class_ids = rpn_roi_gt_class_ids[keep]
    roi_gt_assignment = rpn_roi_iou_argmax[keep]

    # Class-aware bbox deltas. [y, x, log(h), log(w)]
    bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,
                       config.NUM_CLASSES, 4), dtype=np.float32)
    pos_ids = np.where(roi_gt_class_ids > 0)[0]
    bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(
        rois[pos_ids], roi_gt_boxes[pos_ids, :4])
    # Normalize bbox refinements
    bboxes /= config.BBOX_STD_DEV

    # Generate class-specific target masks
    masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),
                     dtype=np.float32)
    for i in pos_ids:
        class_id = roi_gt_class_ids[i]
        assert class_id > 0, "class id must be greater than 0"
        gt_id = roi_gt_assignment[i]
        class_mask = gt_masks[:, :, gt_id]

        if config.USE_MINI_MASK:
            # Create a mask placeholder, the size of the image
            placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)
            # GT box
            gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]
            gt_w = gt_x2 - gt_x1
            gt_h = gt_y2 - gt_y1
            # Resize mini mask to size of GT box
            placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \
                np.round(utils.resize(class_mask, (gt_h, gt_w))).astype(bool)
            # Place the mini batch in the placeholder
            class_mask = placeholder

        # Pick part of the mask and resize it
        y1, x1, y2, x2 = rois[i].astype(np.int32)
        m = class_mask[y1:y2, x1:x2]
        mask = utils.resize(m, config.MASK_SHAPE)
        masks[i, :, :, class_id] = mask

    return rois, roi_gt_class_ids, bboxes, masks
Example #5
0
def Test_Dataset(TaskID,
                 savePath,
                 classList,
                 modelConfig,
                 modelPath,
                 testData,
                 saveFig=False,
                 saveMask=False,
                 IoU_thr=0):

    MODEL_DIR = ""
    if os.path.isfile(modelPath):
        MODEL_DIR = os.path.dirname(modelPath)
    elif os.path.isdir(modelPath):
        MODEL_DIR = modelPath
    else:
        print("No exist path : ", modelPath)

    SAVE_DIR = savePath + "/Test_" + TaskID
    if (not os.path.isdir(SAVE_DIR)):
        os.makedirs(SAVE_DIR)

    Mask_DIR = None
    if (saveMask):
        Mask_DIR = SAVE_DIR + "/Mask"
        if (not os.path.isdir(Mask_DIR)):
            os.makedirs(Mask_DIR)
    Figure_DIR = None
    if (saveFig):
        Figure_DIR = SAVE_DIR + "/Fig"  # Fig: overlay,contour,bbox,score, ...
        if (not os.path.isdir(Figure_DIR)):
            os.makedirs(Figure_DIR)

    print("prepare Data...")
    testData.prepare()
    print("prepare Data Done !")

    print("load Model...")
    model = modellib.MaskRCNN(mode="inference",
                              model_dir=MODEL_DIR,
                              config=modelConfig)
    if os.path.isdir(modelPath):
        modelPath = model.find_last()
    model.load_weights(modelPath, by_name=True)
    print("load Model Done !")

    Classes = copy.deepcopy(classList)
    Classes.insert(0, "BG")
    print("Classes : ", Classes)

    ##############################################################
    # Run detection
    print("Test Run...")

    for i, image_id in enumerate(testData._image_ids):

        testImage, testFileName = testData.load_image_custom(image_id)
        gt_masks, gt_class_ids = testData.load_mask_custom(
            image_id, testImage.shape)

        start_T = time.time()
        ##########################################################
        # Detection
        result = model.detect([testImage], verbose=0)[0]  # batch=1

        spend_T = time.time() - start_T
        ######################################################
        # Display & Save
        if (saveFig):
            saveFileName = os.path.splitext(testFileName)[0] + ".png"
            if (testImage.shape[2] > 1):
                testImage = testImage[:, :, 0]  # artery phase
            visualize.save_result_figures(testImage, result, Classes, saveFileName, \
                                        truemasks = gt_masks, truemasks_class_id = gt_class_ids,\
                                        maskDir = Mask_DIR, figDir = Figure_DIR)

        if (gt_masks is None):
            continue

        ##########################################################
        # Evaluation
        bboxes = result['rois']
        class_ids = result['class_ids']
        masks = result['masks']
        scores = result['scores']

        if (len(bboxes) == 0):
            continue

        FP_count = 0
        Detected = [False] * gt_masks.shape[2]  # for TP & FN
        IoUs = []
        for i_pred in range(len(bboxes)):
            # y1, x1, y2, x2
            bbox = bboxes[i_pred]
            class_id = class_ids[i_pred]
            mask = masks[:, :, i_pred]
            score = scores[i_pred]

            if class_id == 0:
                continue

            truemasks = gt_masks[:, :, np.where(gt_class_ids == class_id)[0]]
            FP = True
            for i_true in range(truemasks.shape[2]):
                truemask = gt_masks[:, :,
                                    np.where(
                                        gt_class_ids == class_id)[0][i_true]]
                truemask = np.asarray(truemask, dtype="uint8")
                _, truemask_binary = cv2.threshold(truemask, 0, 1,
                                                   cv2.THRESH_BINARY)

                true_bbox = utils.extract_bboxes(
                    np.expand_dims(truemask_binary,
                                   -1))[0]  #1 truemask - 1 instance
                if not np.any(true_bbox):
                    # no ground-truth
                    continue

                mask = np.asarray(mask)
                iou = utils.compute_iou(
                    true_bbox, np.asarray([bbox]),
                    (true_bbox[2] - true_bbox[0]) *
                    (true_bbox[3] - true_bbox[1]),
                    np.array([(bbox[2] - bbox[0]) * (bbox[3] - bbox[1])]))[0]

                if iou > IoU_thr:
                    # TP : over IoU-threshold
                    Detected[np.where(
                        gt_class_ids == class_id)[0][i_true]] = True
                    IoUs.append(iou)
                    FP = False

            if FP:
                FP_count += 1

        TP_count = len(np.where(Detected)[0])
        Detected[:] = [not x for x in Detected]
        FN_count = len(np.where(Detected)[0])

        print("{0}  :  TP({1}), IoU({2}) / FP({3}) / FN({4}) ".format(
            testFileName, TP_count, IoUs, FP_count, FN_count))

    del model
    gc.collect()
    K.clear_session()

    print("Test Done ! : ", TaskID)