Пример #1
0
def compute_mean_AP(model, config, dataset, n_images):
    """ Compute VOC-Style mAP @ IoU=0.5        
    """
    image_ids = np.random.choice(dataset.image_ids, n_images)
    APs = []
    for image_id in image_ids:
        # Load image and ground truth data
        result = modellib.load_image_gt(dataset,
                                        config,
                                        image_id,
                                        use_mini_mask=False)
        if len(result) == 5:
            image, image_meta, class_ids, gt_bbox, gt_mask = result
        else:
            image, image_meta, gt_bbox, gt_mask = result
            class_ids = gt_bbox[:, 4]
            gt_bbox = gt_bbox[:, :4]
        molded_images = np.expand_dims(modellib.mold_image(image, config), 0)
        # Run object detection
        results = model.detect([image], verbose=0)
        r = results[0]
        # Compute AP
        AP, precisions, recalls, overlaps = utils.compute_ap(
            gt_bbox, class_ids, r["rois"], r["class_ids"], r["scores"])
        APs.append(AP)
    return np.mean(APs)
Пример #2
0
def evaluate_model(dataset, model, cfg):
    APs = list()
    for image_id in dataset.image_ids:
        # load image, bounding boxes and masks for the image id
        image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(
            dataset, cfg, image_id, use_mini_mask=False)
        # convert pixel values (e.g. center)
        scaled_image = modellib.mold_image(image, cfg)
        # convert image into one sample
        sample = expand_dims(scaled_image, 0)
        # make prediction
        yhat = model.detect(sample, verbose=0)
        # extract results for first sample
        r = yhat[0]
        try:
            # calculate statistics, including AP
            AP, _, _, _ = utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
                                           r["rois"], r["class_ids"],
                                           r["scores"], r['masks'])
            # store
            APs.append(AP)
            print("good image id : %d" % image_id)
            print("AP : %.3f" % AP)
        except:
            print("bad image id : %d" % image_id)
    # calculate the mean AP across all images
    mAP = mean(APs)
    return mAP
Пример #3
0
def evaluate_apc(model, config, dataset, eval_type="bbox", limit=0):
    """
    Evaluation on APC dataset, using VOC-Style mAP # IoU=0.5 for bbox
    @TODO: add segment evaluation
    :param model:
    :param config:
    :param dataset:
    :param eval_type:
    :param limit:
    :return:
    """
    image_ids = dataset.image_ids
    if limit:
        image_ids = np.random.choice(dataset.image_ids, limit)

    t_prediction = 0
    t_start = time.time()
    APs = []

    for image_id in image_ids:
        # Load image and ground truth data
        image, image_meta, gt_class_id, gt_bbox, gt_mask = \
            modellib.load_image_gt(dataset, config, image_id, use_mini_mask=False)
        molded_images = np.expand_dims(modellib.mold_image(image, config), 0)

        # Run object detection
        t = time.time()
        results = model.detect([image], verbose=0)
        r = results[0]
        t_prediction += (time.time() - t)

        # Compute AP
        AP, precisions, recalls, overlaps = \
            utils.compute_ap(gt_bbox, gt_class_id,
                             r["rois"], r["class_ids"], r["scores"])
        APs.append(AP)

    print("Prediction time: {}. Average {}/image".format(
        t_prediction, t_prediction / len(image_ids)))
    print("Total time: ", time.time() - t_start)
    print("mAP: ", np.mean(APs))
Пример #4
0
def evaluate_wpif(model, config, dataset, eval_type="bbox", limit=0):
    """
    Evaluation on WPIF dataset, using VOC-Style mAP # IoU=0.5 for bbox
    @TODO: add segment evaluation
    :param model:
    :param config:
    :param dataset:
    :param eval_type:
    :param limit:
    :return:
    """
    image_ids = dataset.image_ids
    if limit:
        image_ids = np.random.choice(dataset.image_ids, limit)

    t_prediction = 0
    t_start = time.time()
    APs = []

    for image_id in image_ids:
        # Load image and ground truth data
        image, image_meta, gt_class_id, gt_bbox, gt_mask = \
            modellib.load_image_gt(dataset, config, image_id, use_mini_mask=False)
        molded_images = np.expand_dims(modellib.mold_image(image, config), 0)

        # Run object detection
        t = time.time()
        results = model.detect([image], verbose=0)
        r = results[0]
        t_prediction += (time.time() - t)

        # Compute AP
        AP, precisions, recalls, overlaps = \
            utils.compute_ap(gt_bbox, gt_class_id,
                             r["rois"], r["class_ids"], r["scores"])
        APs.append(AP)

    print("Prediction time: {}. Average {}/image".format(
        t_prediction, t_prediction / len(image_ids)))
    print("Total time: ", time.time() - t_start)
    print("mAP: ", np.mean(APs))
Пример #5
0
def evaluate_maskrcnn(dataset_val,inference_config = InferenceConfig()):
    image_ids = np.random.choice(dataset_val.image_ids, 10)
    APs = []
    for image_id in image_ids:
        # Load image and ground truth data
        image, image_meta, gt_class_id, gt_bbox, gt_mask =\
            modellib.load_image_gt(dataset_val, inference_config,
                                   image_id, use_mini_mask=False)
        molded_images = np.expand_dims(modellib.mold_image(image, inference_config), 0)
        # Run object detection
        results = model.detect([image], verbose=0)
        r = results[0]
        # Compute AP
        AP, precisions, recalls, overlaps =\
            utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
                             r["rois"], r["class_ids"], r["scores"], r['masks'])
        APs.append(AP)


    print("mAP: ", np.mean(APs))
    return APs
Пример #6
0
def evaluate_engine(model,
                    dataset,
                    inference_config,
                    eval_type="bbox",
                    limit=0,
                    image_ids=None):
    """Runs official COCO evaluation.
    dataset: A Dataset object with valiadtion data
    eval_type: "bbox" or "segm" for bounding box or segmentation evaluation
    limit: if not 0, it's the number of images to use for evaluation
    """
    # Pick images from the dataset
    image_ids = np.random.choice(dataset.image_ids, 100)

    # Limit to a subset
    if limit:
        image_ids = image_ids[:limit]

    t_prediction = 0
    t_start = time.time()

    APs = []
    for image_id in image_ids:
        # Load image and ground truth data
        image, image_meta, gt_class_id, gt_bbox, gt_mask =\
            modellib.load_image_gt(dataset, inference_config,
                                   image_id, use_mini_mask=False)
        molded_images = np.expand_dims(
            modellib.mold_image(image, inference_config), 0)
        # Run object detection
        results = model.detect([image], verbose=0)
        r = results[0]
        # Compute AP
        AP, precisions, recalls, overlaps =\
            utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
                             r["rois"], r["class_ids"], r["scores"], r['masks'])
        APs.append(AP)

    print("mAP: ", np.mean(APs))
    print("Total time: ", time.time() - t_start)
Пример #7
0
def plot_actual_vs_predicted(dataset, model, cfg, n_images=1):
    # load image and mask
    for i in range(n_images):
        # load the image and mask
        image = dataset.load_image(i)
        mask, _ = dataset.load_mask(i)
        # convert pixel values (e.g. center)
        scaled_image = modellib.mold_image(image, cfg)
        # convert image into one sample
        sample = expand_dims(scaled_image, 0)
        # make prediction
        yhat = model.detect(sample, verbose=0)[0]
        # define subplot
        pyplot.subplot(n_images, 2, i * 2 + 1)
        # plot raw pixel data
        pyplot.imshow(image)
        pyplot.title('Actual')
        # plot masks
        for j in range(mask.shape[2]):
            pyplot.imshow(mask[:, :, j], cmap='gray', alpha=0.3)
        # get the context for drawing boxes
        pyplot.subplot(n_images, 2, i * 2 + 2)
        # plot raw pixel data
        pyplot.imshow(image)
        pyplot.title('Predicted')
        ax = pyplot.gca()
        # plot each box
        for box in yhat['rois']:
            # get coordinates
            y1, x1, y2, x2 = box
            # calculate width and height of the box
            width, height = x2 - x1, y2 - y1
            # create the shape
            rect = Rectangle((x1, y1), width, height, fill=False, color='red')
            # draw the box
            ax.add_patch(rect)
    # show the figure
    pyplot.show()
Пример #8
0
                            dataset_val.class_names,
                            r['scores'],
                            ax=get_ax())

# ## Evaluation

# In[14]:

# Compute VOC-Style mAP @ IoU=0.5
# Running on 10 images. Increase for better accuracy.
image_ids = np.random.choice(dataset_val.image_ids, 10)
APs = []
for image_id in image_ids:
    # Load image and ground truth data
    image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(
        dataset_val, inference_config, image_id, use_mini_mask=False)
    molded_images = np.expand_dims(
        modellib.mold_image(image, inference_config), 0)
    # Run object detection
    results = model.detect([image], verbose=0)
    r = results[0]
    # Compute AP
    AP, precisions, recalls, overlaps = utils.compute_ap(
        gt_bbox, gt_class_id, gt_mask, r["rois"], r["class_ids"], r["scores"],
        r['masks'])
    APs.append(AP)

print("mAP: ", np.mean(APs))

# In[ ]:
Пример #9
0
    results = model.detect([original_image], verbose=1)

    # original_image = dataset_val.load_image(image_id)


    r = results[0]
    visualize.display_instances(original_image, r['rois'], r['masks'], r['class_ids'],
                                dataset_val.class_names, r['scores'], ax=get_ax())

    # Compute VOC-Style mAP @ IoU=0.5
    # Running on 10 images. Increase for better accuracy.
    image_ids = np.random.choice(dataset_val.image_ids, 10)
    APs = []
    for image_id in image_ids:
        # Load image and ground truth data
        image, image_meta, gt_class_id, gt_bbox, gt_mask = \
            modellib.load_image_gt(dataset_val, inference_config,
                                   image_id, use_mini_mask=False)
        molded_images = np.expand_dims(modellib.mold_image(image, inference_config), 0)
        # Run object detection
        results = model.detect([image], verbose=0)
        r = results[0]
        # Compute AP
        AP, precisions, recalls, overlaps = \
            utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
                             r["rois"], r["class_ids"], r["scores"], r['masks'])
        APs.append(AP)

    print("mAP: ", np.mean(APs))
Пример #10
0
        print("Loading weights From ", args.model)
        model.load_weights(args.model, by_name=True)

        VALSET_DIR = os.path.join(args.dataset, "stage1_val")
        dataset_val = NucleiDataset()
        dataset_val.load_image_info(VALSET_DIR)
        dataset_val.prepare()
        print("Evaluate {} images".format(len(dataset_val.image_ids)))

        APs = []

        for image_id in tqdm(dataset_val.image_ids):
            # Load image and ground truth data
            image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(
                dataset_val, InferenceConfig, image_id, use_mini_mask=False)
            molded_images = np.expand_dims(modellib.mold_image(image, config),
                                           0)

            # Run object detection
            results = model.detect([image], verbose=0)
            r = results[0]

            # Compute AP
            AP, precisions, recalls, overlaps = utils.compute_ap(
                gt_bbox,
                gt_class_id,
                r["rois"],
                r["class_ids"],
                r["scores"],
                iou_threshold=0.5)
            APs.append(AP)