def run_edge_boxes(image_file, num_proposals, draw_only_positives):
    img = cv2.imread('./JPEGImages/' + image_file + '.jpg')

    parameters_combinations = [(0.25, 0.85), (0.45, 0.45), (0.65, 0.75),
                               (0.85, 0.35), (0.85, 0.85)]
    recall_list = []

    for parameter_tuple in parameters_combinations:
        edge_detection = cv2.ximgproc.createStructuredEdgeDetection(
            'model.yml.gz')

        rgb_im = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        edges = edge_detection.detectEdges(np.float32(rgb_im / 255.0))
        orientation = edge_detection.computeOrientation(edges)

        edges = edge_detection.edgesNms(edges, orientation)
        edge_boxes = cv2.ximgproc.createEdgeBoxes(alpha=parameter_tuple[0],
                                                  beta=parameter_tuple[1])
        proposal_boxes, scores = edge_boxes.getBoundingBoxes(
            edges, orientation)

        image_output = img.copy()

        if len(proposal_boxes) > 0:
            boxes_scores = zip(proposal_boxes, scores)

            for i, b_s in enumerate(boxes_scores):
                if (i < num_proposals):
                    box = b_s[0]
                    x, y, w, h = box
                    if draw_only_positives and not utils.compare_gt(
                        (x, y, x + w, y + h)):
                        continue
                    cv2.rectangle(image_output, (x, y), (x + w, y + h),
                                  (0, 255, 0), 1, cv2.LINE_AA)
                    score = b_s[1][0]
                    cv2.putText(image_output, "{:.2f}".format(score), (x, y),
                                cv2.FONT_HERSHEY_PLAIN, 0.8, (255, 255, 255),
                                1, cv2.LINE_AA)
                    # print("score={:f}".format(score))
                else:
                    break

        if draw_only_positives:
            utils.draw_groundtruth(image_output)

        recall_list.append(utils.compute_recall(proposal_boxes))
        # cv2.imshow("Edgeboxes output " + str(parameter_tuple), image_output)
        cv2.imwrite(
            "./eb_output_" + image_file + "_" + str(parameter_tuple) + ".jpg",
            image_output)
        # cv2.waitKey(0)
        # cv2.destroyAllWindows()

    return recall_list
Example #2
0
def generate_results(gt, pred):
    cm = confusion_matrix(gt, pred, labels=[0, 1])
    my_f1 = compute_f1(cm)
    my_precision = compute_precision(cm)
    my_recall = compute_recall(cm)

    prec, rec, f1, _ = precision_recall_fscore_support(gt,
                                                       pred,
                                                       labels=[0, 1],
                                                       average=None)
    assert ((my_precision - prec) < 1e-3).all()
    assert ((my_recall - rec) < 1e-3).all()
    assert ((my_f1 - f1) < 1e-3).all()

    return cm, prec, rec, f1
def run_selective_search(image_file, num_proposals, draw_only_positives):
    img = cv2.imread('./JPEGImages/' + image_file + '.jpg')

    # create Selective Search Segmentation Object using default parameters
    ss = cv2.ximgproc.segmentation.createSelectiveSearchSegmentation()

    ss.setBaseImage(img)

    strategy_color = cv2.ximgproc.segmentation.createSelectiveSearchSegmentationStrategyColor(
    )
    strategy_texture = cv2.ximgproc.segmentation.createSelectiveSearchSegmentationStrategyTexture(
    )
    strategy_size = cv2.ximgproc.segmentation.createSelectiveSearchSegmentationStrategySize(
    )
    strategy_fill = cv2.ximgproc.segmentation.createSelectiveSearchSegmentationStrategyFill(
    )
    strategy_combined = cv2.ximgproc.segmentation.createSelectiveSearchSegmentationStrategyMultiple(
        strategy_color, strategy_texture, strategy_size, strategy_fill)

    strategies = [
        strategy_texture, strategy_color, strategy_size, strategy_fill,
        strategy_combined
    ]

    recall_list = []

    for strategy in strategies:

        ss.addStrategy(strategy)

        ss.switchToSelectiveSearchFast()
        # ss.switchToSelectiveSearchQuality()

        prososal_boxes = ss.process()

        image_output = img.copy()

        # iterate over all the region proposals
        for i, rect in enumerate(prososal_boxes):
            # draw rectangle for region proposal till numShowRects
            if (i < num_proposals):
                x, y, w, h = rect
                if draw_only_positives and not utils.compare_gt(
                    (x, y, x + w, y + h)):
                    continue
                cv2.rectangle(image_output, (x, y), (x + w, y + h),
                              (0, 255, 0), 1, cv2.LINE_AA)
            else:
                break

        if draw_only_positives:
            utils.draw_groundtruth(image_output)

        recall_list.append(utils.compute_recall(prososal_boxes))
        # cv2.imshow("Selective search output " + str(strategy), image_output)
        cv2.imwrite(
            "./ss_output_" + "_" + image_file + "_" + str(strategy) + ".jpg",
            image_output)
        # cv2.waitKey(0)
        ss.clearStrategies()
        # cv2.destroyAllWindows()

    return recall_list
Example #4
0
image_to_gt = utils.load_evaluation_set(hp, files['iota10k'], files['gt_fn'],
                                        args.min_rater_count)

# Arrange metrics for the gt labels in df.
image_metrics = utils.compute_image_metrics(image_to_gt,
                                            label_metrics,
                                            files,
                                            method=hp['eval_method'],
                                            do_verify=hp['do_verify'],
                                            gt_in_voc=hp['gt_vocab'],
                                            y_force=hp['y_force'])

images = list(set(image_metrics.ImageID.values.tolist()))
raters_ub = utils.raters_performance(images, files['gt_fn'])
print('Raters agreement: %s' % raters_ub)

# Compute precision & recall over all metrics.
precision, sem_p, precision_mat = utils.compute_precision(
    image_metrics, hp['k'])
recall, sem_r, recall_mat = utils.compute_recall(image_metrics, hp['k'])
vis.print_top_pr(precision, recall)
utils.save_results(hp, files['results_dir'], precision, sem_p, recall, sem_r)

# Plot precision, recall and correlation. Save specific examples to HTML.
if args.plot_figures:
    vis.plot_precision(hp, files, precision, sem_p, raters_ub)
    vis.plot_recall(hp, files, recall, sem_r)
    vis.plot_precision_vs_recall(hp, files, precision, recall, raters_ub)
    # vis.plot_correlation(hp, files, image_metrics)
    vis.write_models_to_html(image_metrics, hp, files)
Example #5
0
# These are the same as the previous step (refined anchors
# after NMS) but with coordinates normalized to [0, 1] range.
limit = 50
# Convert back to image coordinates for display
h, w = config.IMAGE_SHAPE[:2]
proposals = rpn['proposals'][0, :limit] * np.array([h, w, h, w])
visualize.draw_boxes(image, refined_boxes=proposals, ax=get_ax())

# Measure the RPN recall (percent of objects covered by anchors)
# Here we measure recall for 3 different methods:
# - All anchors
# - All refined anchors
# - Refined anchors after NMS
iou_threshold = 0.7

recall, positive_anchor_ids = utils.compute_recall(model.anchors, gt_bbox,
                                                   iou_threshold)
print("All Anchors ({:5})       Recall: {:.3f}  Positive anchors: {}".format(
    model.anchors.shape[0], recall, len(positive_anchor_ids)))

recall, positive_anchor_ids = utils.compute_recall(rpn['refined_anchors'][0],
                                                   gt_bbox, iou_threshold)
print("Refined Anchors ({:5})   Recall: {:.3f}  Positive anchors: {}".format(
    rpn['refined_anchors'].shape[1], recall, len(positive_anchor_ids)))

recall, positive_anchor_ids = utils.compute_recall(proposals, gt_bbox,
                                                   iou_threshold)
print("Post NMS Anchors ({:5})  Recall: {:.3f}  Positive anchors: {}".format(
    proposals.shape[0], recall, len(positive_anchor_ids)))

# Get input and output to classifier and mask heads.
mrcnn = model.run_graph([image], [
Example #6
0
# image_ids = np.random.choice(dataset_val.image_ids, 10)
APs = []
recalls_list = []
precisions_list = []
for image_id in range(80):
    # Load image and ground truth data
    image, image_meta, gt_class_id, gt_bbox, gt_mask = \
        modellib.load_image_gt(dataset_val, inference_config,
                               image_id, use_mini_mask=False)
    molded_images = np.expand_dims(modellib.mold_image(image, inference_config), 0)
    # Run object detection
    results = model.detect([image], verbose=0)
    r = results[0]
    # Compute AP
    AP, precisions, recalls, overlaps = \
        utils.compute_ap(gt_bbox, gt_class_id,
                         r["rois"], r["class_ids"], r["scores"])
    APs.append(AP)
    recall, precision = utils.compute_recall(r['rois'], gt_bbox, iou=0.5)
    recalls_list.append(recall)
    precisions_list.append(precision)

print("mAP: ", np.mean(APs))
print("recalls_list: ", np.mean(recalls))
print("precisions_list: ", np.mean(precisions))