def compute_batch_ap(image_ids):
    APs = []
    for image_id in image_ids:
        # Load image
        image, image_meta, gt_class_id, gt_bbox =\
            data.load_image_gt(dataset, config, image_id)
        # Run object detection
        results = model.detect([image], verbose=0)
        # Compute AP
        r = results[0]
        AP, precisions, recalls, overlaps = utils.compute_ap(
            gt_bbox, gt_class_id, r['rois'], r['class_ids'], r['scores'])
        APs.append(AP)
    return APs
ax = get_ax(1)
r = results[0]
visualize.display_instances(image,
                            r['rois'],
                            r['class_ids'],
                            dataset.class_names,
                            r['scores'],
                            ax=ax,
                            title="Predictions")
log("gt_class_id", gt_class_id)
log("gt_bbox", gt_bbox)

#%% Precision-Recall
# Draw precision-recall curve
AP, precisions, recalls, overlaps = utils.compute_ap(gt_bbox, gt_class_id,
                                                     r['rois'], r['class_ids'],
                                                     r['scores'])
visualize.plot_precision_recall(AP, precisions, recalls)

#%%
# Grid of ground truth objects and their predictions
visualize.plot_overlaps(gt_class_id, r['class_ids'], r['scores'], overlaps,
                        dataset.class_names)


#%% Compute mAP @ IoU=50 on Batch of Images
# Compute VOC-style Average Precision
def compute_batch_ap(image_ids):
    APs = []
    for image_id in image_ids:
        # Load image
Esempio n. 3
0
                            gt_bbox,
                            gt_class_id,
                            dataset_train.class_names,
                            ax=get_ax())

results = model.detect([original_image], verbose=1)

r = results[0]
visualize.display_instances(original_image,
                            r['rois'],
                            r['class_ids'],
                            dataset_val.class_names,
                            r['scores'],
                            ax=get_ax())
AP, precisions, recalls, overlaps =\
    utils.compute_ap(gt_bbox, gt_class_id, r["rois"], r["class_ids"], r["scores"])
print(AP)

#%% Evaluation
# Compute VOC-Style mAP @ IoU=0.5
# Running on 10 images. Increase for better accuracy.

limit = 500
coco = dataset_val.load_coco(dataset_dir,
                             val_type,
                             config.IMAGE_MAX_DIM,
                             year=year,
                             return_coco=True,
                             auto_download=download)
dataset_val.prepare()
evaluate_coco(model, dataset_val, coco, "bbox", limit=limit)