Beispiel #1
0
def compute_batch_ap(dataset, image_ids, verbose=1):
    APs = []
    mask_IoU = []
    for image_id in image_ids:
        # Load image

        image, image_meta, gt_class_id, gt_bbox, gt_mask =\
            modellib.load_image_gt(dataset, config, image_id, use_mini_mask=False)

        # Run object detection
        #results = model.detect_molded(image[np.newaxis], image_meta[np.newaxis], verbose=0)#gave only one mask
        results = model.detect([image], verbose=0)
        # Compute AP over range 0.5 to 0.95
        r = results[0]
        ap = utils.compute_ap_range(gt_bbox,
                                    gt_class_id,
                                    gt_mask,
                                    r['rois'],
                                    r['class_ids'],
                                    r['scores'],
                                    r['masks'],
                                    verbose=0)
        #print(r['scores'])
        #print(r['masks'].shape)
        APs.append(ap)

        if verbose:
            info = dataset.image_info[image_id]
            meta = modellib.parse_image_meta(image_meta[np.newaxis, ...])
            #print("{:3} {}   AP: {:.2f}".format(
            #meta["image_id"][0], meta["original_image_shape"][0], ap))
    return APs  #this outputs mAPs per image. May be usefull to keep it like that
def compute_batch_ap(dataset, image_ids, verbose=1):
    APs = []
    for image_id in image_ids:
        # Load image
        image, image_meta, gt_class_id, gt_bbox, gt_mask =\
            modellib.load_image_gt(dataset, config,
                                   image_id, use_mini_mask=False)
        # Run object detection
        results = model.detect_molded(image[np.newaxis],
                                      image_meta[np.newaxis],
                                      verbose=0)
        # Compute AP over range 0.5 to 0.95
        r = results[0]
        ap = utils.compute_ap_range(gt_bbox,
                                    gt_class_id,
                                    gt_mask,
                                    r['rois'],
                                    r['class_ids'],
                                    r['scores'],
                                    r['masks'],
                                    verbose=0)
        APs.append(ap)
        if verbose:
            info = dataset.image_info[image_id]
            meta = modellib.parse_image_meta(image_meta[np.newaxis, ...])
            print("{:3} {}   AP: {:.2f}".format(
                meta["image_id"][0], meta["original_image_shape"][0], ap))
    return APs
Beispiel #3
0
def compute_batch_ap(model, dataset, image_ids, config, verbose=1):
    assert isinstance(model, model_lib.MaskRCNN)
    APs = []
    buckets = [image_ids[i:i + config.BATCH_SIZE] for i in range(0, len(image_ids), config.BATCH_SIZE)]
    for images_id in buckets:
        if len(images_id) != config.BATCH_SIZE:
            continue
        images = []
        images_meta = []
        for image_id in images_id:
            # Load image
            log.debug('loading image %s' % image_id)
            image, image_meta, gt_class_id, gt_bbox, gt_mask = model_lib.load_image_gt(dataset, config, image_id, use_mini_mask=False)
            images.append(image)
            images_meta.append(image_meta)

        # Run object detection
        results = model.detect_molded(np.stack(images, axis=0), np.stack(images_meta, axis=0), verbose=0)
        assert config.BATCH_SIZE == len(results)

        # Compute AP over range 0.5 to 0.95
        for r, image_id, image_meta in zip(results, images_id, images_meta):
            ap = utils.compute_ap_range(gt_bbox, gt_class_id, gt_mask, r['rois'], r['class_ids'], r['scores'], r['masks'], verbose=0)
            APs.append(ap)
            if verbose:
                info = dataset.image_info[image_id]
                meta = model_lib.parse_image_meta(image_meta[np.newaxis, ...])
                log.debug("{:3} {}   AP: {:.2f}".format(meta["image_id"][0], meta["original_image_shape"][0], ap))

    return APs
def evaluate_raspberry(model, dataset, annotations, eval_type="segm", limit=100, image_ids=None):
    
    # Compute VOC-Style mAP @ IoU=0.5
    # Running on 10 images. Increase for better accuracy.
    inference_config = RaspberryConfig()
    image_ids = np.random.choice(dataset_val.image_ids, limit)
    APs = []
    for image_id in image_ids:
        # Load image and ground truth data
        image, image_meta, gt_class_id, gt_bbox, gt_mask =\
            modellib.load_image_gt(dataset_val, inference_config,
                                   image_id, use_mini_mask=False)
        molded_images = np.expand_dims(modellib.mold_image(image, inference_config), 0)
        # Run object detection
        results = model.detect([image], verbose=0)
        r = results[0]
        # Compute AP
        """AP, precisions, recalls, overlaps =\
            utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
                             r["rois"], r["class_ids"], r["scores"], r['masks'])"""
        AP = utils.compute_ap_range(gt_bbox, gt_class_id, gt_mask, r["rois"], r["class_ids"], r["scores"], r['masks'])
        APs.append(AP)
    
    #print(str(APs))
    print("mAP: ", np.mean(APs))
    mAP=np.mean(APs)
    average.append(mAP)
    mAP=str(mAP)
    f.write("%s, " % mAP)
    print("evaluation completed and recorded")
Beispiel #5
0
def perform_eval(model, dataset):
    APs = []
    for image_id in dataset.image_ids:
        # Load image
        image, image_meta, gt_class_id, gt_bbox, gt_mask =\
            modellib.load_image_gt(dataset, config,
                                   image_id, use_mini_mask=False)
        # Run object detection
        results = model.detect_molded(image[np.newaxis],
                                      image_meta[np.newaxis],
                                      verbose=0)
        # Compute AP over range 0.5 to 0.95
        r = results[0]
        if r['masks'].shape[-1] > 0:
            ap = utils.compute_ap_range(gt_bbox,
                                        gt_class_id,
                                        gt_mask,
                                        r['rois'],
                                        r['class_ids'],
                                        r['scores'],
                                        r['masks'],
                                        verbose=0)
            APs.append(ap)
            info = dataset.image_info[image_id]
            meta = modellib.parse_image_meta(image_meta[np.newaxis, ...])
            print("{:3} {}   AP: {:.2f}".format(
                meta["image_id"][0], meta["original_image_shape"][0], ap))
    print("Mean AP overa {} images: {:.4f}".format(len(APs), np.mean(APs)))
Beispiel #6
0
def compute_aps(detections,
                dataset,
                config,
                preds_cache={},
                shape=256,
                thresh=0.95,
                just_stats=True):
    APs = []
    n_rois = []
    image_ids = []
    n_bbox = []
    for image_id, r in tqdm_notebook(detections.items()):
        image_ids.append(image_id)
        image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(
            dataset, config, image_id, use_mini_mask=False)
        mask = np.where(r['scores'] > thresh)
        rois = r['rois'][np.where(r['scores'] > thresh)].astype(int)
        if (len(rois) + len(gt_bbox)) == 0:
            ap = np.nan
        elif len(rois) == 0:
            ap = 0.0
            # if we have target boxes but no predicted boxes, precision is 0
        elif len(gt_bbox) == 0:
            ap = 0.0
        else:
            masks = r['masks'][:, :, mask[0]]
            scores = r['scores'][mask]
            class_ids = np.ones(rois.shape[0])
            ap = utils.compute_ap_range(gt_bbox,
                                        gt_class_id,
                                        gt_mask,
                                        rois,
                                        class_ids,
                                        scores,
                                        masks,
                                        iou_thresholds=list(
                                            np.arange(0.4, .8, 0.05)),
                                        verbose=0)

        APs.append(
            # average_precision_image(rois, r['scores'], gt_bbox, shape=shape)
            ap
            # average_precision_image(rois, r['scores'], gt_bbox, shape=shape)
        )
        n_rois.append(rois.shape[0])
        n_bbox.append(gt_bbox.shape[0])
    print("mAP {:.4f} ".format(np.nanmean(APs)))
    print("mean n rois {:.2f} ".format(np.mean(n_rois)))
    return pd.DataFrame({
        'ap': APs,
        'n_roi': n_rois,
        'n_bbox': n_bbox
    },
                        index=image_ids)
def compute_metrics(images, gt_boxes, gt_class_ids, gt_masks,
                    results_list) -> List[Tuple]:
    mAPs = []
    APs_75 = []
    APs_5 = []
    recalls_75 = []
    recalls_5 = []
    roundness_list = []

    for image, gt_bbox, gt_class_id, gt_mask, results in zip(
            images, gt_boxes, gt_class_ids, gt_masks, results_list):
        # Compute metrics
        r = results

        AP_75, precisions, recalls, overlaps = \
            utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
                             r['rois'], r['class_ids'], r['scores'], r['masks'], iou_threshold=0.75)

        AP_5, precisions, recalls, overlaps = \
            utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
                             r['rois'], r['class_ids'], r['scores'], r['masks'], iou_threshold=0.5)

        mAP = utils.compute_ap_range(gt_bbox,
                                     gt_class_id,
                                     gt_mask,
                                     r['rois'],
                                     r['class_ids'],
                                     r['scores'],
                                     r['masks'],
                                     verbose=False)

        recall_75, _ = utils.compute_recall(r['rois'], gt_bbox, iou=0.75)
        recall_5, _ = utils.compute_recall(r['rois'], gt_bbox, iou=0.5)
        # Roundness
        contours = get_contours(r)
        img_roundness = avg_roundness(contours)

        mAPs.append(mAP)
        APs_75.append(AP_75)
        APs_5.append(AP_5)
        recalls_75.append(recall_75)
        recalls_5.append(recall_5)
        roundness_list.append(img_roundness)

    names = [
        '1. mAP@IoU Rng', '2. mAP@IoU=75', '3. mAP@IoU=50',
        '4. Recall @ IoU=75', '5. Recall @ IoU=50', '6. Roundness'
    ]
    values_list = [mAPs, APs_75, APs_5, recalls_75, recalls_5, roundness_list]
    avg_values = [np.mean(values) for values in values_list]

    return list(zip(names, avg_values))
def compute_batch_ap(model, model_config, dataset, limit, verbose=1):
    """
    Validates the model on the dataset in the provided directory, and 
    computes validation metric (mAP).
    """
    # Validation dataset
    dataset_val = AlliumDataset()
    dataset_val.load_allium(dataset, "val")
    dataset_val.prepare()
    if limit:
        image_ids = dataset_val.image_ids[:limit]
    else:
        image_ids = dataset_val.image_ids
    print("Images: {}\nClasses: {}".format(len(dataset_val.image_ids),
                                           dataset_val.class_names))

    # Compute mAP
    APs = []
    for image_id in image_ids:
        # Load image
        image, image_meta, gt_class_id, gt_bbox, gt_mask =\
            modellib.load_image_gt(dataset_val, config,
                                   image_id, use_mini_mask=False)
        # Run object detection
        results = model.detect_molded(image[np.newaxis],
                                      image_meta[np.newaxis],
                                      verbose=0)
        # Compute AP over range 0.5 to 0.95
        r = results[0]
        ap = utils.compute_ap_range(gt_bbox,
                                    gt_class_id,
                                    gt_mask,
                                    r['rois'],
                                    r['class_ids'],
                                    r['scores'],
                                    r['masks'],
                                    verbose=0)
        APs.append(ap)
        if verbose:
            # info = dataset.image_info[image_id]
            meta = modellib.parse_image_meta(image_meta[np.newaxis, ...])
            print("{:3} {}   AP: {:.2f}".format(
                meta["image_id"][0], meta["original_image_shape"][0], ap))

    # Print the results
    mAP = np.mean(APs)
    print("Average precisions are: {}".format(APs))
    print("Mean average precision is: {}".format(mAP))

    return mAP
Beispiel #9
0
def run_eval(dataset, show_image=False):
    APs = []
    for image_id in dataset.image_ids:
        image, image_meta, gt_class_id, gt_bbox, gt_mask =\
            modellib.load_image_gt(dataset, inference_config,
                                   image_id, use_mini_mask=False)
        results = model.detect([image], verbose=1)
        r = results[0]

        #import pdb; pdb.set_trace()
        """
        AP, precisions, recalls, overlaps =\
        utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
                         r["rois"], r["class_ids"], r["scores"], r['masks'])
        
        APs.append(AP)
        print (AP, "-", precisions, "-", recalls, "-", overlaps)
        """

        _, ax = plt.subplots(1, figsize=(16, 16))

        AP = utils.compute_ap_range(gt_bbox, gt_class_id, gt_mask, r["rois"],
                                    r["class_ids"], r["scores"], r['masks'])

        #print (AP)
        APs.append(AP)

        visualize.display_instances(image,
                                    r['rois'],
                                    r['masks'],
                                    r['class_ids'],
                                    dataset_val.class_names,
                                    r['scores'],
                                    figsize=(8, 8),
                                    ax=ax)

        #save image ..
        filename = dataset.image_reference(image_id)

        plt.savefig(os.path.join(OUTPUT_FOLDER, filename + ".png"))
        if show_image:
            plt.show()
    return APs
Beispiel #10
0
def compute_ap_range(dataset, model, inference_config):
    image_ids = dataset.image_ids  #np.random.choice(dataset.image_ids, 10)
    APs = []
    for image_id in image_ids:
        # Load image and ground truth data
        image_name = dataset.image_info[image_id]['id']

        print(image_name + " ap range results:")
        image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(
            dataset, inference_config, image_id, use_mini_mask=False)
        molded_images = np.expand_dims(
            modellib.mold_image(image, inference_config), 0)

        # Run detection
        results = model.detect([image], verbose=0)
        r = results[0]

        # Compute AP
        AP = utils.compute_ap_range(gt_bbox, gt_class_id, gt_mask, r["rois"],
                                    r["class_ids"], r["scores"], r['masks'])
        APs.append(AP)

    print("mAP Range: ", np.mean(APs))
Beispiel #11
0
# Compute mAP
#APs_05: IoU = 0.5
#APs_all: IoU from 0.5-0.95 with increments of 0.05
image_ids = np.random.choice(dataset_val.image_ids, 489)
APs_05 = []
APs_all = []

for image_id in image_ids:
    # Load images and ground truth data
    image, image_meta, gt_class_id, gt_bbox, gt_mask = \
        modellib.load_image_gt(dataset_val, inference_config,
                               image_id, use_mini_mask=False)
    molded_images = np.expand_dims(
        modellib.mold_image(image, inference_config), 0)
    # Run object detection
    results = model.detect([image], verbose=0)
    r = results[0]
    # Compute AP
    AP_05, precisions, recalls, overlaps = \
        utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
                         r["rois"], r["class_ids"], r["scores"], r['masks'])
    APs_05.append(AP_05)

    AP_all = \
        utils.compute_ap_range(gt_bbox, gt_class_id, gt_mask,
                         r["rois"], r["class_ids"], r["scores"], r['masks'])
    APs_all.append(AP_all)

print("mAP: ", np.mean(APs_05))
print("mAP: ", np.mean(APs_all))
Beispiel #12
0
APs = []
for image_id in image_ids:
    # Load image and ground truth data
    image, _, class_id, boxes, r_mask, g_mask, b_mask =\
        modellib.load_image_gt(dataset_val, inference_config,
                               image_id, use_mini_mask=False)
    #molded_images = np.expand_dims(modellib.mold_image(image, inference_config), 0)
    results = model.detect([image], verbose=0)
    r = results[0]
    # Compute AP over range
    ap = utils.compute_ap_range(boxes,
                                class_id,
                                r_mask,
                                g_mask,
                                b_mask,
                                r['rois'],
                                r['class_ids'],
                                r['scores'],
                                r['r_masks'],
                                r['g_masks'],
                                r['b_masks'],
                                verbose=0)

    #AP, precisions, recalls, overlaps =\
    #    utils.compute_ap(boxes, class_id, r_mask, g_mask, b_mask,
    #                     r['rois'], r['class_ids'], r['scores'], r['r_masks'], r['g_masks'], r['b_masks'])
    APs.append(ap)

#print("mAP: ", np.mean(APs))
print("Mean AP over {} images: {:.4f}".format(len(APs), np.mean(APs)))
Beispiel #13
0
def evaluate_model(model, config):
    """
    Evaluate the loaded model on the target dataset
    :param model: the architecture model, with loaded weights
    :param config: the configuration class for this dataset
    """

    # Automatically discriminate the dataset according to the config file
    if isinstance(config, configurations.TabletopConfigInference):
        # Load the validation dataset
        dataset_val = datasets.TabletopDataset()
    elif isinstance(config, configurations.YCBVideoConfigInference):
        dataset_val = datasets.YCBVideoDataset()

    dataset_val.load_dataset(args.dataset, "val")
    dataset_val.prepare()

    # Compute COCO-Style mAP @ IoU=0.5-0.95 in 0.05 increments
    # Running on all images
    #image_ids = np.random.choice(dataset_val.image_ids, 200)
    image_ids = dataset_val.image_ids
    APs = []
    AP50s = []
    AP75s = []
    image_batch_vector = []
    image_batch_eval_data = []
    img_batch_count = 0

    import time
    t_inference = 0

    class image_eval_data:
        def __init__(self, image_id, gt_class_id, gt_bbox, gt_mask):
            self.IMAGE_ID = image_id
            self.GT_CLASS_ID = gt_class_id
            self.GT_BBOX = gt_bbox
            self.GT_MASK = gt_mask
            self.DETECTION_RESULTS = None

    print("Evaluating model...")
    progbar = Progbar(target=len(image_ids))

    for idx, image_id in enumerate(image_ids):
        # Load image and ground truth data
        image, image_meta, gt_class_id, gt_bbox, gt_mask = \
            modellib.load_image_gt(dataset_val, config,
                                   image_id, use_mini_mask=False)

        # Compose a vector of images and data
        image_batch_vector.append(image)
        image_batch_eval_data.append(
            image_eval_data(image_id, gt_class_id, gt_bbox, gt_mask))
        img_batch_count += 1

        # If a batch is ready, go on to detection
        # The last few images in the dataset will not be used if batch size > 1
        if img_batch_count < config.BATCH_SIZE:
            continue

        # Run object detection
        t_start = time.time()
        results = model.detect(image_batch_vector, verbose=0)
        t_inference += (time.time() - t_start)

        assert len(image_batch_eval_data) == len(results)

        for eval_data, detection_results in zip(image_batch_eval_data,
                                                results):
            eval_data.DETECTION_RESULTS = detection_results
            # Compute mAP at different IoU (as msCOCO mAP is computed)
            AP = utils.compute_ap_range(
                eval_data.GT_BBOX,
                eval_data.GT_CLASS_ID,
                eval_data.GT_MASK,
                eval_data.DETECTION_RESULTS["rois"],
                eval_data.DETECTION_RESULTS["class_ids"],
                eval_data.DETECTION_RESULTS["scores"],
                eval_data.DETECTION_RESULTS['masks'],
                verbose=0)
            AP50, _, _, _ = utils.compute_ap(
                eval_data.GT_BBOX,
                eval_data.GT_CLASS_ID,
                eval_data.GT_MASK,
                eval_data.DETECTION_RESULTS["rois"],
                eval_data.DETECTION_RESULTS["class_ids"],
                eval_data.DETECTION_RESULTS["scores"],
                eval_data.DETECTION_RESULTS['masks'],
                iou_threshold=0.5)
            AP75, _, _, _ = utils.compute_ap(
                eval_data.GT_BBOX,
                eval_data.GT_CLASS_ID,
                eval_data.GT_MASK,
                eval_data.DETECTION_RESULTS["rois"],
                eval_data.DETECTION_RESULTS["class_ids"],
                eval_data.DETECTION_RESULTS["scores"],
                eval_data.DETECTION_RESULTS['masks'],
                iou_threshold=0.75)

            APs.append(AP)
            AP50s.append(AP50)
            AP75s.append(AP75)

        # Reset the batch info
        image_batch_vector = []
        image_batch_eval_data = []
        img_batch_count = 0

        progbar.update(idx + 1)

    print("\nmAP[0.5::0.05::0.95]: ", np.mean(APs))
    print("mAP[0.5]: ", np.mean(AP50s))
    print("mAP[0.75]: ", np.mean(AP75s))

    print("Inference time for", len(image_ids), "images: ", t_inference,
          "s \tAverage FPS: ",
          len(image_ids) / t_inference)

    return APs
Beispiel #14
0
def compute_batch_metrics(dataset):
    APs50 = []
    APs75 = []
    APs_range = []
    ARs50 = []
    ARs75 = []
    ARs_range = []
    APs_range_small = []
    APs_range_medium = []
    APs_range_large = []
    ARs_range_small = []
    ARs_range_medium = []
    ARs_range_large = []
    '''for image_id in image_ids:
        # Load image
        image, image_meta, gt_class_id, gt_bbox =\
            modellib.load_image_gt(dataset, config,
                                   image_id, use_mini_mask=False)'''

    for image_id in dataset.image_ids:
        image = dataset.load_image(image_id)
        '''gt_bbox=[]
        gt_class_id=[]
        for dict in dataset.image_info[image_id]['annotations']:
            gt_bbox.append(dict['bbox'])
            gt_class_id.append(dict['category_id'])
            cv2.rectangle(image, (dict['bbox'][1], dict['bbox'][0]), (dict['bbox'][3], dict['bbox'][2]), (255, 0, 0), 2)

        gt_bbox = np.array(gt_bbox)
        print('gt_bbox:,', gt_bbox)
        #print('gt_bbox.shape:', gt_bbox.shape)
        gt_class_id = np.array(gt_class_id)
        #zerod = np.zeros(len(gt_class_id))
        #gt_class_id = np.vstack((gt_class_id, zerod))'''

        gt_bbox, gt_class_id = dataset.load_mask(image_id)
        '''print('gt_bbox:,', gt_bbox)
        if len(gt_bbox[0]) == 4:
            for i in range(len(gt_bbox)):
                cv2.rectangle(image, (gt_bbox[i][1], gt_bbox[i][0]), (gt_bbox[i][3], gt_bbox[i][2]), (255, 0, 0), 2)'''

        gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)

        faces = faceCascade.detectMultiScale(gray,
                                             scaleFactor=1.03,
                                             minNeighbors=6,
                                             minSize=(24, 24))

        # Draw a rectangle around the faces
        pred_bbox = []
        pred_scores = []
        pred_class_id = []
        count = 0
        for (x, y, w, h) in faces:
            #cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
            pred_bbox.append([y, x, y + h, x + w])
            pred_scores.append(1.)
            pred_class_id.append(1)
            cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
            count += 1
        if count == 0:
            pred_bbox.append([])
            pred_scores.append(1.)
            pred_class_id.append(1)

        pred_bbox = np.array(pred_bbox)
        pred_scores = np.array(pred_scores)
        pred_class_id = np.array(pred_class_id)
        #zerod = np.zeros(len(pred_scores))
        #pred_scores = np.vstack((pred_scores, zerod))
        #pred_class_id = np.vstack((pred_class_id, zerod))
        #print(pred_bbox)
        r = {
            'rois': pred_bbox,
            'class_ids': pred_class_id,
            'scores': pred_scores
        }
        '''print("r['rois']:", r['rois'])
        img = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        cv2.imshow('Video', img)
        cv2.waitKey(0)'''
        # Run object detection
        #results = model.detect([image], verbose=0)
        # Compute AP
        #r = results[0]
        if pred_bbox.shape != (1, 0):
            AP50, precisions50, recalls50, overlaps =\
                utils.compute_ap(gt_bbox, gt_class_id,
                                  r['rois'], r['class_ids'], r['scores'], config=config, iou_threshold=0.5)
            #print('AP50:', AP50)
            #print('pred_bbox.shape:', pred_bbox.shape)
            AP75, precisions75, recalls75, overlaps =\
                utils.compute_ap(gt_bbox, gt_class_id,
                                  r['rois'], r['class_ids'], r['scores'], config=config, iou_threshold=0.75)
            AP_range =\
                utils.compute_ap_range(gt_bbox, gt_class_id,
                                  r['rois'], r['class_ids'], r['scores'], config=config)

            AR50, positive_ids50 =\
                utils.compute_recall(r['rois'], gt_bbox, iou=0.5)

            AR75, positive_ids75 =\
                utils.compute_recall(r['rois'], gt_bbox, iou=0.75)

            AR_range =\
                utils.compute_ar_range(r['rois'], gt_bbox)
        else:
            AP50 = 0.
            AP75 = 0.
            AP_range = 0.
            AR50 = 0.
            AR75 = 0.
            AR_range = 0.

        #AR50 = recalls50[-2]
        #AR75 = recalls75[-2]
        #AR_range = recalls_range[-2]
        APs50.append(AP50)
        APs75.append(AP75)
        APs_range.append(AP_range)
        ARs50.append(AR50)
        ARs75.append(AR75)
        ARs_range.append(AR_range)

        gt_ind_small, gt_ind_medium, gt_ind_large = find_bbox_indices_by_size(
            gt_bbox)

        gt_bbox_small = gt_bbox[gt_ind_small, :]
        gt_bbox_medium = gt_bbox[gt_ind_medium, :]
        gt_bbox_large = gt_bbox[gt_ind_large, :]

        gt_class_id_small = gt_class_id[gt_ind_small]
        gt_class_id_medium = gt_class_id[gt_ind_medium]
        gt_class_id_large = gt_class_id[gt_ind_large]

        if pred_bbox.shape != (1, 0):
            pred_ind_small, pred_ind_medium, pred_ind_large = find_bbox_indices_by_size(
                r['rois'])

            pred_bbox_small = r['rois'][pred_ind_small, :]
            pred_bbox_medium = r['rois'][pred_ind_medium, :]
            pred_bbox_large = r['rois'][pred_ind_large, :]

            pred_class_id_small = r['class_ids'][pred_ind_small]
            pred_class_id_medium = r['class_ids'][pred_ind_medium]
            pred_class_id_large = r['class_ids'][pred_ind_large]

            pred_score_small = r['scores'][pred_ind_small]
            pred_score_medium = r['scores'][pred_ind_medium]
            pred_score_large = r['scores'][pred_ind_large]

        if gt_bbox_small.size != 0:
            if pred_bbox.shape != (1, 0):
                AP_range_small =\
                    utils.compute_ap_range(gt_bbox_small, gt_class_id_small,
                                      pred_bbox_small, pred_class_id_small, pred_score_small, config=config)

                AR_range_small =\
                    utils.compute_ar_range(pred_bbox_small, gt_bbox_small)
            else:
                AP_range_small = 0.
                AR_range_small = 0.

            APs_range_small.append(AP_range_small)
            ARs_range_small.append(AR_range_small)

        if gt_bbox_medium.size != 0:
            if pred_bbox.shape != (1, 0):
                AP_range_medium =\
                    utils.compute_ap_range(gt_bbox_medium, gt_class_id_medium,
                                      pred_bbox_medium, pred_class_id_medium, pred_score_medium, config=config)

                AR_range_medium =\
                    utils.compute_ar_range(pred_bbox_medium, gt_bbox_medium)
            else:
                AP_range_medium = 0.
                AR_range_medium = 0.

            APs_range_medium.append(AP_range_medium)
            ARs_range_medium.append(AR_range_medium)

        if gt_bbox_large.size != 0:
            if pred_bbox.shape != (1, 0):
                AP_range_large =\
                utils.compute_ap_range(gt_bbox_large, gt_class_id_large,
                                  pred_bbox_large, pred_class_id_large, pred_score_large, config=config)

                AR_range_large =\
                utils.compute_ar_range(pred_bbox_large, gt_bbox_large)
            else:
                AP_range_large = 0.
                AR_range_large = 0.

            APs_range_large.append(AP_range_large)
            ARs_range_large.append(AR_range_large)

    return APs50, APs75, APs_range, ARs50, ARs75, ARs_range, APs_range_small, APs_range_medium,\
        APs_range_large, ARs_range_small, ARs_range_medium, ARs_range_large
# Run object detection
results = model.detect_molded(np.expand_dims(image, 0),
                              np.expand_dims(image_meta, 0),
                              verbose=1)

# Display results
r = results[0]
log("gt_class_id", gt_class_id)
log("gt_bbox", gt_bbox)
log("gt_mask", gt_mask)

# Compute AP over range 0.5 to 0.95 and print it
utils.compute_ap_range(gt_bbox,
                       gt_class_id,
                       gt_mask,
                       r['rois'],
                       r['class_ids'],
                       r['scores'],
                       r['masks'],
                       verbose=1)

visualize.display_differences(image,
                              gt_bbox,
                              gt_class_id,
                              gt_mask,
                              r['rois'],
                              r['class_ids'],
                              r['scores'],
                              r['masks'],
                              dataset.class_names,
                              ax=get_ax(),
                              show_box=False,
Beispiel #16
0
def compute_batch_ap(dataset, image_ids, verbose=1):
    APs_general = []
    APs_ring = []
    APs_crack = []
    # loop throgh all val dataset
    for image_id in image_ids:
        # Load image ground truths
        image, image_meta, gt_class_id, gt_bbox, gt_mask =\
            modellib.load_image_gt(dataset, config, image_id, use_mini_mask=False)
        print(image_id)
        # Run object detection
        results = model.detect([image], verbose=0)
        # Compute AP over range 0.5 to 0.95
        r = results[0]
        ap = utils.compute_ap_range(gt_bbox,
                                    gt_class_id,
                                    gt_mask,
                                    r['rois'],
                                    r['class_ids'],
                                    r['scores'],
                                    r['masks'],
                                    verbose=0)

        APs_general.append(ap)
        #print('APs_general', APs_general)
        # get AP values for ring and crack separately
        AP_loop = []
        for i in [1, 2]:
            #print("LOOP START", i)
            if gt_mask[:, :, gt_class_id == i].shape[-1] > 0:
                ap = utils.compute_ap_range(
                    gt_bbox[gt_class_id == i],
                    gt_class_id[gt_class_id == i],
                    gt_mask[:, :, gt_class_id == i],
                    r['rois'][r['class_ids'] == i],
                    r['class_ids'][r['class_ids'] == i],
                    r['scores'][r['class_ids'] == i],
                    r['masks'][:, :, r['class_ids'] == i],
                    verbose=0)
                AP_loop.append(ap)
                #print(ap)
            else:
                ap = np.nan
                AP_loop.append(ap)
                #print(ap)

        #print('AP_loop', AP_loop)
        APs_ring.append(AP_loop[0])
        APs_crack.append(AP_loop[1])

        if verbose:
            info = dataset.image_info[image_id]
            meta = modellib.parse_image_meta(image_meta[np.newaxis, ...])
            #print("{:3} {}   AP: {:.2f}".format(
            #meta["image_id"][0], meta["original_image_shape"][0], ap))

    mAP_general = np.nanmean(APs_general)
    mAP_ring = np.nanmean(APs_ring)
    mAP_crack = np.nanmean(APs_crack)

    return mAP_general, mAP_ring, mAP_crack  #this outputs mAPs per image. May be usefull to keep it like that
def compute_batch_ap(dataset, image_ids, verbose=1):
    """
	# Load validation dataset if you need to use this function.
	dataset = slum.slumDataset()
	dataset.load_slum(folder_path,fol)

	"""

    APs = []
    IOUs = []

    for image_id in image_ids:
        # Load image
        image, image_meta, gt_class_id, gt_bbox, gt_mask =\
         modellib.load_image_gt(dataset, config,
                 image_id, use_mini_mask=False)

        # Run object detection
        results = model.detect_molded(image[np.newaxis],
                                      image_meta[np.newaxis],
                                      verbose=0)
        # Compute AP over range 0.5 to 0.95
        r = results[0]

        #merge_masks.
        gt_merge_mask = np.zeros((gt_mask.shape[:2]))
        for i in range(gt_mask.shape[2]):
            gt_merge_mask = np.logical_or(gt_merge_mask, gt_mask[:, :, i])

        pred_merge_mask = np.zeros((r['masks'].shape[:2]))
        for i in range(r['masks'].shape[2]):
            pred_merge_mask = np.logical_or(pred_merge_mask, r['masks'][:, :,
                                                                        i])

        pred_merge_mask = np.expand_dims(pred_merge_mask, 2)
        #print(pred_merge_mask.shape)
        pred_merge_mask, wind, scale, pad, crop = utils.resize_image(
            pred_merge_mask, 1024, 1024)
        #print(pred_merge_mask.shape,gt_merge_mask.shape)

        iou = jaccard_similarity_score(np.squeeze(pred_merge_mask),
                                       gt_merge_mask)

        #mAP at 50
        print("mAP at 50")
        ap = utils.compute_ap_range(gt_bbox,
                                    gt_class_id,
                                    gt_mask,
                                    r['rois'],
                                    r['class_ids'],
                                    r['scores'],
                                    r['masks'],
                                    np.arange(0.5, 1.0),
                                    verbose=0)

        #Make sure ap doesnt go above 1 !
        if ap > 1.0:
            ap = 1.0

        APs.append(ap)
        IOUs.append(iou)

        if verbose:
            info = dataset.image_info[image_id]
            meta = modellib.parse_image_meta(image_meta[np.newaxis, ...])
            print("{:3} {}   AP: {:.2f} Image_id: {}, IOU: {}".format(
                meta["image_id"][0], meta["original_image_shape"][0], ap,
                image_id, iou))
    return APs, IOUs
def evaluation(model, dataset_dir, subset):

    # Create directory
    if not os.path.exists(RESULTS_DIR):
        os.makedirs(RESULTS_DIR)
    submit_dir = "submit_{:%Y%m%dT%H%M%S}".format(datetime.datetime.now())
    submit_dir = os.path.join(RESULTS_DIR, submit_dir)
    os.makedirs(submit_dir)

    # Read dataset
    dataset_val = vehDetectionDataset()
    dataset_val.load_evaluation(dataset_dir, subset)
    dataset_val.prepare()

    numImages_eval = dataset_val.image_ids.size
    # Prepare for saving as matlab file
    images_eval = []
    mAP_all = []
    mAP_all_range = []
    precisions_all = []
    recalls_all = []
    overlaps_all = []

    for image_id in range(0,numImages_eval):
        print(image_id)
        source_id = dataset_val.image_info[image_id]["id"]
        print(source_id)
        # Load image and ground truth data
        image, image_meta, gt_class_id, gt_bbox, gt_mask = \
            modellib.load_image_gt(dataset_val, config,
                                   image_id, use_mini_mask=False)
        molded_images = np.expand_dims(modellib.mold_image(image, config), 0)
        # Run object detection
        results = model.detect([image], verbose=0)
        r = results[0]
        # Compute AP
        mAP, precisions, recalls, overlaps = \
            utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
                             r["rois"], r["class_ids"], r["scores"], r['masks'])

        # Compute AP range (0.5:0.05:0.95)
        mAP_range = \
            utils.compute_ap_range(gt_bbox, gt_class_id, gt_mask,
                             r["rois"], r["class_ids"], r["scores"], r['masks'], iou_thresholds = None, verbose = 1)


        # Append results from image
        mAP_all.append(mAP)
        mAP_all_range.append(mAP_range)
        images_eval.append(source_id)
        precisions_all.append(precisions)
        recalls_all.append(recalls)
        overlaps_all.append(overlaps)

        # Save image with shape polygon around vehicles. Bbox and mask can be activated, s. below
        visualize.display_instances(
            image, r['rois'], r['masks'], r['class_ids'],
            dataset_val.class_names,  scores=None,# r['scores'],
            show_bbox=False, show_mask=False,
            colors=None,
            figsize=(19.20,10.80))   # can also add title="Predictions"
        plt.box(on=None) # plt.box(False)
        plt.savefig(os.path.join(submit_dir, dataset_val.image_info[image_id]["id"]))
        plt.close() # plt.clf()

    print("Evaluation Process with Mask-RCNN Done. Files saved to ", submit_dir)

    print("mAP: ", np.mean(mAP_all))

    # FK: save to Matlab files
    saveMatFileName = submit_dir + "\evaluation_maskrcnn_output.mat"

    savemat(saveMatFileName,
            {"mAP_all": mAP_all, "mAP_all_range": mAP_all_range, "images_eval": images_eval, "precisions_all": precisions_all, "recalls_all": recalls_all, "overlaps_all": overlaps_all})
Beispiel #19
0
for image_id in dataset.image_ids:
    image, image_meta, gt_class_id, gt_bbox, gt_mask =\
       modellib.load_image_gt(dataset, config,
                              image_id, use_mini_mask=False)
    molded_images = np.expand_dims(modellib.mold_image(image, config), 0)
    results = model.detect([image], verbose=0)

    r = results[0]
    index = [x for x in range(len(r['class_ids'])) if r['class_ids'][x] <= 4]

    AP_pascal, precision, recall, overlap = utils.compute_ap(
        gt_bbox, gt_class_id, gt_mask, r["rois"][index], r["class_ids"][index],
        r["scores"][index], r['masks'][..., index])
    AP_coco = utils.compute_ap_range(gt_bbox, gt_class_id, gt_mask,
                                     r["rois"][index], r["class_ids"][index],
                                     r["scores"][index], r['masks'][...,
                                                                    index])

    # if r["rois"].shape[0]:
    #     # Precision-Recall curve
    #     visualize.plot_precision_recall(AP, precision, recall)
    #     # Grid of ground truth objects and their predictions
    #     visualize.plot_overlaps(gt_class_id, r['class_ids'], r['scores'], overlap, dataset.class_names)

    mAP_pascal.append(AP_pascal)
    mAP_coco.append(AP_coco)
    mprecision.append(precision)
    mrecall.append(recall)
    moverlap.append(overlap)

print("mAP PASCAL", np.mean(mAP_pascal) * 100, "%")
Beispiel #20
0
                flow_step += 1
                P2, P3, P4, P5, P6 = warp.predict([key_P2, key_P3, key_P4, key_P5, key_P6, flow])

        inputs=[image_metas, P2, P3, P4, P5, P6]
        result = maskrcnn.detect_molded(inputs)
        
        # Compute AP
        if (image_id+1) % args.num_frames == 0 or args.fix:
            if np.sum(result["scores"]) == 0:
                print("{} Fasle".format(image_id))
                continue
                
            AP, precisions, recalls, overlaps = utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
                            result["rois"], result["class_ids"], result["scores"], result['masks'])
            AP50s.append(AP)
            AP = utils.compute_ap_range(gt_bbox, gt_class_id, gt_mask,
                           result["rois"], result["class_ids"], result["scores"], result['masks'], verbose=0)
            APs.append(AP)
            print("step: {:4d}, AP50: {:.3f}, mAP: {:.3f}".format(image_id, np.mean(AP50s), np.mean(APs)))
        
        # Save
        if args.is_save:
            save_name = args.save_dir + 'mask' + str(image_id) + '.png'
            colors = np.array(label_colors)/255.0
            pred_img = visualize.display_instances(image[:,:,3:], result['rois'], result['masks'], result['class_ids'], 
                            dataset.class_names, result['scores'], colors = colors, save_name=save_name)
    print("step: {:4d}, AP50: {:.3f}, mAP: {:.3f}".format(image_id, np.mean(AP50s), np.mean(APs)))
    print("segmentation steps:", seg_step, "flow steps:", flow_step)

if __name__ == '__main__':
    main()
Beispiel #21
0
    def process( self, args ):

        """
        Run inference with trained model
        """

        # load validation / test dataset
        test_ds = ShipDataset( (768,768) )
        test_ds.load_info( os.path.join( args.data_path, 'test' ) )
        test_ds.prepare()

        # Load weights
        print("Loading weights ", args.model_pathname)
        self._model.load_weights(args.model_pathname, by_name=True)

        for sample in range ( 10 ):

            # pick random image
            image_id = random.choice( test_ds.image_ids )
            image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(  test_ds, 
                                                                                        self._config, 
                                                                                        image_id, 
                                                                                        use_mini_mask=False )
            # get image info
            info = test_ds.image_info[ image_id ]
            print("image ID: {}.{} ({}) {}".format( info["source"], 
                                                    info["id"], 
                                                    image_id, 
                                                    test_ds.image_reference(image_id) ) )

            # run object detection
            results = self._model.detect([image], verbose=1)
            r = results[0]
    
            # compute AP over range 0.5 to 0.95 and print it
            utils.compute_ap_range( gt_bbox, 
                                    gt_class_id, 
                                    gt_mask,
                                    r['rois'], 
                                    r['class_ids'], 
                                    r['scores'], 
                                    r['masks'],
                                    verbose=1)
    
            # display results
            visualize.display_instances(    image, 
                                            r['rois'], 
                                            r['masks'], 
                                            r['class_ids'], 
                                            test_ds.class_names, 
                                            r['scores'], 
                                            title="Predictions",
                                            show_bbox=False)

            # display actual vs predicted differences
            visualize.display_differences(  image, 
                                            gt_bbox, 
                                            gt_class_id, 
                                            gt_mask,
                                            r['rois'], 
                                            r['class_ids'], 
                                            r['scores'], 
                                            r['masks'], 
                                            test_ds.class_names, 
                                            title="Actual vs Predict Difference" )

        return
                      10))  # this has to be the number of ground truth
        TPs_bbox.append(np.zeros(10))
        FPs_bbox.append(np.zeros(10))
        FNs_bbox.append(
            np.repeat(gt_mask.shape[-1],
                      10))  # this has to be the number of ground truth
        APlist.append(np.zeros(10))
        mask_normal = np.zeros(shape=(imgheight, imgheight))
        mask_normal = np.reshape(mask_normal, (1024, 1024, 1))
    else:
        ###calculate all the stuff
        mask_normal = r['masks']  # for the combined mask at the end
        ap = utils.compute_ap_range(gt_bbox,
                                    gt_class_id,
                                    gt_mask,
                                    r['rois'],
                                    r['class_ids'],
                                    r['scores'],
                                    r['masks'],
                                    verbose=0)
        #print(r['scores'])
        #print(r['masks'].shape)
        mAP.append(ap)

        #compute mask IoU
        IoU_m = utils.compute_overlaps_masks(gt_mask, r['masks'])
        IoU_m = np.nan_to_num(np.mean(IoU_m))  #change nans to 0
        mask_IoU.append(IoU_m)

        #compute bbox IoU
        IoU_bbox = utils.compute_overlaps(gt_bbox, r['rois'])
        IoU_bbox = np.nan_to_num(np.mean(IoU_bbox))