def compute_metrics(images, gt_boxes, gt_class_ids, gt_masks,
                    results_list) -> List[Tuple]:
    mAPs = []
    APs_75 = []
    APs_5 = []
    recalls_75 = []
    recalls_5 = []
    roundness_list = []

    for image, gt_bbox, gt_class_id, gt_mask, results in zip(
            images, gt_boxes, gt_class_ids, gt_masks, results_list):
        # Compute metrics
        r = results

        AP_75, precisions, recalls, overlaps = \
            utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
                             r['rois'], r['class_ids'], r['scores'], r['masks'], iou_threshold=0.75)

        AP_5, precisions, recalls, overlaps = \
            utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
                             r['rois'], r['class_ids'], r['scores'], r['masks'], iou_threshold=0.5)

        mAP = utils.compute_ap_range(gt_bbox,
                                     gt_class_id,
                                     gt_mask,
                                     r['rois'],
                                     r['class_ids'],
                                     r['scores'],
                                     r['masks'],
                                     verbose=False)

        recall_75, _ = utils.compute_recall(r['rois'], gt_bbox, iou=0.75)
        recall_5, _ = utils.compute_recall(r['rois'], gt_bbox, iou=0.5)
        # Roundness
        contours = get_contours(r)
        img_roundness = avg_roundness(contours)

        mAPs.append(mAP)
        APs_75.append(AP_75)
        APs_5.append(AP_5)
        recalls_75.append(recall_75)
        recalls_5.append(recall_5)
        roundness_list.append(img_roundness)

    names = [
        '1. mAP@IoU Rng', '2. mAP@IoU=75', '3. mAP@IoU=50',
        '4. Recall @ IoU=75', '5. Recall @ IoU=50', '6. Roundness'
    ]
    values_list = [mAPs, APs_75, APs_5, recalls_75, recalls_5, roundness_list]
    avg_values = [np.mean(values) for values in values_list]

    return list(zip(names, avg_values))
Example #2
0
    def evaluate(self, n=10):
        # evaluate mAP for crop.
        # version can be '' or banana
        image_ids = self.test_dataset.image_ids  # np.random.choice(dataset.image_ids, 10)

        APs = []
        for image_id in image_ids:
            # Load image and ground truth data
            image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(
                self.test_dataset,
                inference_config,
                image_id,
                use_mini_mask=False)
            # molded_images = np.expand_dims(modellib.mold_image(image, inference_config), 0)
            # Run object detection
            results = self.model.detect([image], verbose=0)
            r = results[0]
            #visualize.display_instances(original_image, r['rois'], r['masks'], r['class_ids'],  class_names, r['scores'], ax=get_ax())
            # Compute AP
            try:
                if 'banana' in self.task:
                    AP = banana_compute_ap(gt_bbox, gt_class_id, gt_mask,
                                           r["rois"], r["class_ids"],
                                           r["scores"], r['masks'])
                else:
                    AP, precisions, recalls, overlaps = utils.compute_ap(
                        gt_bbox, gt_class_id, gt_mask, r["rois"],
                        r["class_ids"], r["scores"], r['masks'])
            except:
                AP = 0
                print('error in compute')
            APs.append(AP)

        print("mAP: ", np.mean(APs))
        return AP, APs
Example #3
0
def evaluate_model(type='RGB'):

    if (type == 'RGB'):
        modlib = modellib
    else:
        modlib = modellibDepth

    model = modlib.MaskRCNN(mode="inference",
                            config=config,
                            model_dir=DEFAULT_LOGS_DIR)

    if args.model:
        print("Loading weights ", DEFAULT_LOGS_DIR + '/' + args.model)
        model.load_weights(DEFAULT_LOGS_DIR + '/' + args.model, by_name=True)

    dataset_test = NYUDepthDataset(type='test')
    nyu = dataset_test.load_nyu_depth_v2('nyu_depth_v2_labeled.mat')
    dataset_test.prepare()
    mAP = 0
    APs = []
    #image_id = random.choice(dataset_test.image_ids)
    for image_id in dataset_test.image_ids:
        if (type == 'RGB'):
            image, image_meta, gt_class_id, gt_bbox, gt_mask = \
                modlib.load_image_gt(dataset_test, config, image_id, use_mini_mask=False)
        else:
            image, depthimage, image_meta, gt_class_id, gt_bbox, gt_mask = \
                modlib.load_images_gt(dataset_test, config, image_id, use_mini_mask=False)

        info = dataset_test.image_info[image_id]
        print("image ID: {}.{} ({}) {}".format(
            info["source"], info["id"], image_id,
            dataset_test.image_reference(image_id)))
        # Run object detection
        if (type == 'RGB'):
            results = model.detect([image])
        else:
            results = model.detectWdepth([image], [depthimage])

        r = results[0]

        try:
            AP, precisions, recalls, overlaps = utils.compute_ap(
                gt_bbox,
                gt_class_id,
                gt_mask,
                r['rois'],
                r['class_ids'],
                r['scores'],
                r['masks'],
                iou_threshold=IOU_THRESHOLD)
            if (AP > AP_THRESHOLD):
                APs.append(AP)
                print('Image %d AP = : %f' % (image_id, AP))
                dataset_test.nyu_do.load_image(image_id)
        except:
            print('GT classes in the image are not covered')

    mAP = np.mean(np.array(APs))
    print('mAP : %f' % mAP)
Example #4
0
def compute_map(image_info, r, iou_threshold=0.5):

    gt_bboxes = []
    gt_class_ids = []
    for idx, bbox in enumerate(image_info['bboxes']):
        gt_bboxes.append([bbox[1], bbox[0], bbox[3], bbox[2]])
        gt_class_ids.append(image_info['category_ids'][idx])
    gt_bboxes = np.array(gt_bboxes)
    gt_class_ids = np.array(gt_class_ids)
    mAP = 1
    precision = 1
    recall = 1
    if len(gt_bboxes.shape) == 2:
        mAP, precisions, recalls, overlaps = compute_ap(gt_bboxes,
                                                        gt_class_ids,
                                                        r['rois'],
                                                        r["class_ids"],
                                                        r["scores"],
                                                        iou_threshold=0.5)

        if len(precisions) >= 3:
            precision = precisions[-2]
            recall = recalls[-2]
    else:
        print('gt error  no bboxes')

    return mAP, precision, recall
Example #5
0
def evaluate(model, dataset_val, num_eval_images=25):
    """
    Compute VOC mAP @ IoU=0.5
    Default on 25 images
    """
    config = InferenceConfig()
    config.display()
    image_ids = np.random.choice(dataset_val.image_ids, num_eval_images)
    APs = []

    for image_id in image_ids:
        # load image and annotations
        image, image_meta, gt_class_id, gt_bbox, gt_mask =\
            modellib.load_image_gt(dataset_val, config, image_id, use_mini_mask=False)
        # molded_images = np.expand_dims(modellib.mold_image(image, config), 0)

        # Run model on image (suppress logging)
        results = model.detect([image], verbose=0)
        r = results[0]

        # Compute avg precision
        AP, precision, recall, overlap = utils.compute_ap(
            gt_bbox, gt_class_id, gt_mask, r["rois"], r["class_ids"],
            r["scores"], r["masks"])
        print(f"AP @ 0.5 <{image_id}: {AP}")
        APs.append(AP)
    print(f"mAP: {np.mean(APs)}")
Example #6
0
def evalution_fun(dataset_val):
    # Compute VOC-Style mAP @ IoU=0.5
    # Running on 10 images. Increase for better accuracy.
    inference_config = InferenceConfig()
    # Recreate the model in inference mode
    model = modellib.MaskRCNN(mode="inference",
                              config=inference_config,
                              model_dir=MODEL_DIR)
    # model_path = os.path.join(ROOT_DIR, ".h5 file name here")
    model_path = model.find_last()
    # Load trained weights
    print("[Evalution]: Loading weights from ", model_path)
    model.load_weights(model_path, by_name=True)

    image_ids = np.random.choice(dataset_val.image_ids, 10)
    APs = []
    for image_id in image_ids:
        # Load image and ground truth data
        image, image_meta, gt_class_id, gt_bbox, gt_mask = \
            modellib.load_image_gt(dataset_val, inference_config,
                                   image_id, use_mini_mask=False)
        molded_images = np.expand_dims(modellib.mold_image(image, inference_config), 0)
        # Run object detection
        results = model.detect([image], verbose=0)
        r = results[0]
        # Compute AP
        AP, precisions, recalls, overlaps = \
            utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
                             r["rois"], r["class_ids"], r["scores"], r['masks'])
        APs.append(AP)
    print("mAP: ", np.mean(APs))
def evaluate(dataset, model, cfg):
    '''
    computes the mean average precision (mAP) of a provided dataset
    using the provided model and configuration

    :param
    - dataset <LicensePlateDataset>: dataset to evaluate mAP on
    - model <mrcnn.model.MaskRCNN>: model used to evalute mAP
    - cfg <mrcnn.config.Config>: configurations for the mrcnn model 

    :return
    - <float>: value for mAP
    '''

    APs = []

    for idx, img_id in enumerate(dataset.image_ids):
        img, img_meta, gt_class_id, gt_bbox, gt_mask = load_image_gt(
            dataset, cfg, img_id, use_mini_mask=False)
        sample = np.expand_dims(mold_image(img, cfg), 0)
        pred = model.detect(sample)

        AP, _, _, _ = compute_ap(gt_bbox, gt_class_id, gt_mask,
                                 pred[0]['rois'], pred[0]['class_ids'],
                                 pred[0]['scores'], pred[0]['masks'])

        APs.append(AP)

    return np.mean(APs)
Example #8
0
def evaluate(model, limit=0):
    """Runs construction site images dataset evaluation. Compute VOC-Style mAP @ IoU=0.5
    limit: if not 0, it's the number of images to use for evaluation
    """
    # Limit to a subset
    if limit:
        image_ids = np.random.choice(args.dataset.image_ids, limit)
    # With no limit, use all images from args.dataset
    else:
        image_ids = args.dataset.image_ids

    APs = []
    for image_id in image_ids:
        # Load image and ground truth data
        image, image_meta, gt_class_id, gt_bbox, gt_mask =\
            modellib.load_image_gt(args.dataset, inference_config,
                                image_id, use_mini_mask=False)
        molded_images = np.expand_dims(
            modellib.mold_image(image, inference_config), 0)
        # Run object detection
        results = model.detect([image], verbose=0)
        r = results[0]
        # Compute AP
        AP, precisions, recalls, overlaps =\
            utils.compute_ap(gt_bbox, gt_class_id,
                            r["rois"], r["class_ids"], r["scores"])
        APs.append(AP)

    print("mAP: ", np.mean(APs))
Example #9
0
def evaluate_ap(model, dataset, inference_config, coco, limit=0, image_ids=None):
    # Pick COCO images from the dataset
    image_ids = image_ids or dataset.image_ids

    # Limit to a subset
    if limit:
        image_ids = image_ids[:limit]

    # Get corresponding COCO image IDs.
    coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids]

    t_prediction = 0
    t_start = time.time()

    results = []
    APs = []
    for image_id in image_ids:
        # Load image and ground truth data
        image, image_meta, gt_class_id, gt_bbox, gt_mask =\
            modellib.load_image_gt(dataset_val, inference_config, image_id)
        molded_images = np.expand_dims(modellib.mold_image(image, inference_config), 0)
        # Run object detection
        results = model.detect([image], verbose=0)
        r = results[0]
        # Compute AP
        AP, precisions, recalls, overlaps =\
            utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
                            r["rois"], r["class_ids"], r["scores"], r['masks'])
        APs.append(AP)
        print("AP for {}: {}".format(image_id, AP))
    print("mAP for {} images: {}".format(len(image_ids),np.mean(APs)))
Example #10
0
def calc_mean_average_precision(dataset_val, inference_config, model):
    # Compute VOC-Style mAP @ IoU=0.5
    # Running on 10 images. Increase for better accuracy.
    image_ids = np.random.choice(dataset_val.image_ids, 1000)
    APs = []
    F1s = []
    for image_id in image_ids:
        # Load image and ground truth data
        image, image_meta, gt_class_id, gt_bbox, gt_mask = \
            modellib.load_image_gt(dataset_val, inference_config,
                                   image_id, use_mini_mask=False)
        molded_images = np.expand_dims(modellib.mold_image(image, inference_config), 0)
        # Run object detection
        results = model.detect([image], verbose=0)
        r = results[0]
        # Compute AP
        AP, precisions, recalls, overlaps = \
            utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
                             r["rois"], r["class_ids"], r["scores"], r['masks'])
        precision = np.mean(precisions)
        recall = np.mean(recalls)
        F1_instance = 2 * (precision * recall) / (precision + recall)
        APs.append(AP)
        F1s.append(F1_instance)
    return np.mean(APs), np.mean(F1s)
Example #11
0
def evaluate(model, dataset, config):
    # Compute VOC-Style mAP @ IoU=0.5
    APs = []
    cnt = 0
    for image_id in dataset.image_ids:
        try:
            # Load image and ground truth data
            image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(
                dataset, config, image_id, use_mini_mask=False)
            molded_images = np.expand_dims(modellib.mold_image(image, config),
                                           0)
            # Run object detection
            results = model.detect([image], verbose=0)
            r = results[0]
            # Compute AP
            AP, precisions, recalls, overlaps = utils.compute_ap(
                gt_bbox, gt_class_id, gt_mask, r["rois"], r["class_ids"],
                r["scores"], r['masks'])
            APs.append(AP)
        except:
            print("Error while doing inference on image with image_id=" +
                  str(image_id))

        # Printing the progress while evaluating the model:
        cnt = cnt + 1
        print("Progress: {:2.1%}".format(cnt / len(dataset.image_ids)),
              end="\r")

    print("Evaluation completed.")
    print("mAP: ", np.mean(APs))
Example #12
0
def evaluate(model, dataset, config):
    # Compute VOC-Style mAP @ IoU=0.5
    num_test = 10  # len(dataset.image_ids)
    image_ids = np.random.choice(dataset.image_ids, num_test)
    APs = []

    for image_id in image_ids:
        path = dataset.image_reference(image_id)

        # Load image and ground truth data
        image, image_meta, gt_class_id, gt_bbox, gt_mask =\
            modellib.load_image_gt(dataset, config,
                                   image_id, use_mini_mask=False)
        print("path, image_id, gt_class_id, gt_bbox =", path, image_id,
              gt_class_id, gt_bbox)

        if gt_class_id.size != 0:  # skip 'empty' images
            molded_images = np.expand_dims(modellib.mold_image(image, config),
                                           0)
            # Run object detection
            results = model.detect([image], verbose=1)
            r = results[0]
            print("        model predictions = ", r)
            # Compute AP
            AP, precisions, recalls, overlaps =\
                utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
                                 r["rois"], r["class_ids"], r["scores"], r['masks'])
            print("        AP, precisions, recalls, overlaps =", AP,
                  precisions, recalls, overlaps, "\n")
            APs.append(AP)
        else:
            print("      skipping")

    print("mAP: ", np.mean(APs))
Example #13
0
def evaluate(model, dataset, config, limit=0):
    image_ids = dataset.image_ids
    if limit:
        image_ids = image_ids[:limit]
    t_prediction = 0
    t_start = time.time()
    APs = []
    for image_id in tqdm(image_ids):
        image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(dataset, \
            config, image_id, use_mini_mask=False)
        # image = dataset.load_image(image_id)
        # gt_mask, gt_class_id = dataset.load_mask(image_id)
        # gt_bbox = utils.extract_bboxes(gt_mask)
        t = time.time()
        r = model.detect([image])[0]
        t_prediction += (time.time() - t)
        AP, precisions, recalls, overlaps = utils.compute_ap(gt_bbox, gt_class_id, gt_mask, \
            r['rois'], r['class_ids'], r['scores'], r['masks'])
        APs.append(AP)
        if image_id % 2000 == 0:
            print("mean Average Precision @ IoU=50: ", np.mean(APs))
    print("Prediction time: {}. Average {}/image".format(
        t_prediction, t_prediction / len(image_ids)))
    print("Total time: ", time.time() - t_start)
    print("mean Average Precision @ IoU=50: ", np.mean(APs))
Example #14
0
def evaluate(model, dataset_dir):
    f = open("rgbdi_mAP.txt", "a+")
    # Compute VOC-Style mAP @ IoU=0.5
    # Running on 10 images. Increase for better accuracy.
    image_ids = val_examples
    APs = []
    dataset_val = TrafficDataset()
    dataset_val.load_traffic(dataset_dir, "val")
    dataset_val.prepare()
    for i, image_id in enumerate(image_ids):
        # Load image and ground truth data
        image, image_meta, gt_class_id, gt_bbox, gt_mask = \
            modellib.load_image_gt(dataset_val, inference_config, i, use_mini_mask=False)
        molded_images = np.expand_dims(
            modellib.mold_image(image, inference_config), 0)
        print(image)
        print(i)
        # Run object detection
        results = model.detect([image], verbose=0)
        r = results[0]
        print(r)
        f.write(str(r))
        # Compute AP
        AP, precisions, recalls, overlaps = \
            utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
                             r["rois"], r["class_ids"], r["scores"], r['masks'])
        print('AP: %f' % AP)
        APs.append(AP)
        f.write(str(AP))

    print("mAP: ", np.mean(APs))
    f.write("mAP: %f" % np.mean(APs))
 def evaluate_mAP(self, model, config, nums):
     """ Randomly choose n images and calculate the overall accuracy based on given model and configuration
     Args:
         model: The model to calculate the overall accuracy
         config: The model configuration when doing inference
         nums: The number of images want to test
     Returns:
         mAP: the mean of the accuracy after test n images
     """
     image_ids = np.random.choice(self.image_ids, nums)
     APs = []
     for image_id in image_ids:
         # Load image and ground truth data
         image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(self, config,
                                                                                   image_id, use_mini_mask=False)
         molded_images = np.expand_dims(modellib.mold_image(image, config), 0)
         # Run object detection
         results = model.detect([image], verbose=0)
         r = results[0]
         # Compute AP
         AP, precisions, recalls, overlaps = \
             utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
                              r["rois"], r["class_ids"], r["scores"], r['masks'])
         APs.append(AP)
         mAP = np.mean(APs)
     return mAP
Example #16
0
def evaluate_model(dataset, augmentation, model, cfg):
    start = time.time()
    aps = list()
    i = 0

    for image_id in dataset.image_ids:
        image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(
            dataset, cfg, image_id, use_mini_mask=False)

        r = model.detect([image], verbose=0)[0]

        ap, precisions, recalls, overlaps = utils.compute_ap(gt_bbox,
                                                             gt_class_id,
                                                             gt_mask,
                                                             r["rois"],
                                                             r["class_ids"],
                                                             r["scores"],
                                                             r['masks'],
                                                             iou_threshold=0.5)
        aps.append(ap)

        i += 1

    mAP = np.mean(aps)

    end = time.time()
    print("Imagine processed: {}".format(i))
    print("FPS: {}".format(i / (end - start)))

    return mAP
Example #17
0
    def evaluate_model(dataset, model, cfg):
        APs = list()
        for image_id in dataset.image_ids:
            # load image, bounding boxes and masks for the image id
            image, image_meta, gt_class_id, gt_bbox, gt_mask = load_image_gt(
                dataset, cfg, image_id, use_mini_mask=False)
            # convert pixel values (e.g. center)
            scaled_image = mold_image(image, cfg)
            # convert image into one sample
            sample = np.expand_dims(scaled_image, 0)
            # make prediction
            yhat = model.detect(sample, verbose=1)
            # extract results for first sample
            r = yhat[0]
            # calculate statistics, including AP
            AP, _, _, _ = compute_ap(gt_bbox, gt_class_id, gt_mask, r["rois"],
                                     r["class_ids"], r["scores"], r['masks'])
            # store
            APs.append(AP)

        # calculate the mean AP across all images
        mAP = np.mean(APs)

        print('Mean Average Precision: {}'.format(mAP))
        return mAP
Example #18
0
def compute_ap_range_list(gt_box,
                          gt_class_id,
                          gt_mask,
                          pred_box,
                          pred_class_id,
                          pred_score,
                          pred_mask,
                          iou_thresholds=None,
                          verbose=1):
    """Compute AP over a range or IoU thresholds. Default range is 0.5-0.95."""
    # Default is 0.5 to 0.95 with increments of 0.05
    iou_thresholds = iou_thresholds or np.arange(0.5, 1.0, 0.05)

    # Compute AP over range of IoU thresholds
    APlist = []
    for iou_threshold in iou_thresholds:
        ap, precisions, recalls, overlaps =\
            utils.compute_ap(gt_box, gt_class_id, gt_mask,
                        pred_box, pred_class_id, pred_score, pred_mask, iou_threshold=iou_threshold)
        APlist.append(ap)

        if verbose:
            print("AP @{:.2f}:\t {:.3f}".format(iou_threshold, ap))

    #print(APlist)
    return APlist
Example #19
0
def eval_mAP(model, dataset, config, sample_size=50):

    image_ids = np.random.choice(dataset.image_ids, sample_size)

    #each input is a tuple of form : image, image_meta, gt_class_id, gt_bbox, gt_mask
    inputs = [modellib.load_image_gt(dataset, config, iid, use_mini_mask=False) for iid in image_ids]

    APs = []

    n = config.BATCH_SIZE

    for i in range(0,len(image_ids),n): 

        curr_inputs = inputs[i:i+n]
        
        if (len(curr_inputs)%n) != 0:
            break
        
        results = model.detect([inp[0] for inp in curr_inputs], verbose=0)
        
        for j in range(len(results)):
            r = results[j]
            # Compute AP
            AP, precisions, recalls, overlaps = utils.compute_ap(curr_inputs[j][3], curr_inputs[j][2], curr_inputs[j][4], 
                                                r["rois"], r["class_ids"], r["scores"], r['masks'])
            APs.append(AP)
        
    return np.mean(APs)
Example #20
0
def evaluate(model):
    dataset_test = FoodDataset()
    dataset_test.load_food(args.dataset, "val")
    dataset_test.prepare()
    all_APs = []
    # print(dataset_test.image_ids)
    np.random.shuffle(dataset_test.image_ids)
    # iou_t = 0.5
    ious = [0.50,0.55,0.60,0.65,0.70,0.75,0.80,0.85,0.90,0.95]
    for iou_t in ious:
        APs = []
        for image_id in dataset_test.image_ids:
            # Load image and ground truth data
            image, image_meta, gt_class_id, gt_bbox, gt_mask =\
                modellib.load_image_gt(dataset_test, config,
                                       image_id, use_mini_mask=False)
            molded_images = np.expand_dims(modellib.mold_image(image, config), 0)
            # Run object detection
            results = model.detect([image], verbose=0)
            r = results[0]
            if r["class_ids"].size != 0:
                # Compute AP
                AP, precisions, recalls, overlaps =\
                    utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
                                     r["rois"], r["class_ids"], r["scores"], r['masks'], iou_threshold=iou_t)
                APs.append(AP)
                all_APs.append(AP)
                # print(AP)
            else:
                APs.append(0.0)
                all_APs.append(0.0)
        print("mAP-{}: ".format(iou_t), np.mean(APs) * 100, "%")
    print("mAP: ", np.mean(all_APs) * 100, "%")
Example #21
0
def compute_overall_mAP(model,
                        image_ids,
                        dataset,
                        inference_config,
                        filepath,
                        iou_thresholds=None):
    warnings.filterwarnings("ignore")

    image_gt_data = []
    overall_mAP = []
    overall_precisions = []
    overall_recalls = []
    overall_overlaps = []
    batch_len = inference_config.IMAGES_PER_GPU

    for i, image_id in tqdm(list(enumerate(image_ids)),
                            ascii=True,
                            desc="Loading GT data..."):
        original_image, image_meta, gt_class_id, gt_bbox, gt_mask =\
            modellib.load_image_gt(dataset, inference_config,
                                   image_id, use_mini_mask=False)
        image_gt_data.append(
            (original_image, image_meta, gt_class_id, gt_bbox, gt_mask))

        if (i + 1) % batch_len == 0:
            original_images, image_metas, gt_class_ids, gt_boxes, gt_masks = zip(
                *image_gt_data)
            batch_results = model.detect(original_images)
            pred_boxes = [result['rois'] for result in batch_results]
            pred_class_ids = [result['class_ids'] for result in batch_results]
            pred_scores = [result['scores'] for result in batch_results]
            pred_masks = [result['masks'] for result in batch_results]
            img_data = zip(gt_boxes, gt_class_ids, gt_masks, pred_boxes,
                           pred_class_ids, pred_scores, pred_masks)

            for data in img_data:
                gt_boxes, gt_class_ids, gt_masks, pred_boxes, pred_class_ids, pred_scores, pred_masks = data
                mAP, precisions, recalls, overlaps = compute_ap(
                    gt_boxes,
                    gt_class_ids,
                    gt_masks,
                    pred_boxes,
                    pred_class_ids,
                    pred_scores,
                    pred_masks,
                    iou_threshold=0.5)
                overall_mAP.append(mAP)
                overall_precisions.append(precisions)
                overall_recalls.append(recalls)
                overall_overlaps.append(overlaps)
            image_gt_data = []
            save_pickle(
                zip(overall_mAP, overall_precisions, overall_recalls,
                    overall_overlaps), filepath, "acc_tmp.pickle")

    warnings.filterwarnings("default")

    return overall_mAP, overall_precisions, overall_recalls, overall_overlaps
Example #22
0
    def visualize_segmentations(self, config, model):
        # Validation dataset
        dataset_val = CocoDataset()
        val_type = "train"
        coco = dataset_val.load_coco(val_type, return_coco=True)
        dataset_val.prepare()

        # Test on a random image
        image_id = random.choice(dataset_val.image_ids)

        # Test on a specific image
        image_to_test = '2010_000898.jpg'
        saved_index = -1
        for index, value in enumerate(dataset_val.image_info):
            file = value['path']
            info = file.rsplit('/', 1)
            file_name = info[1]
            if file_name == image_to_test:
                saved_index = index


        image_id = saved_index
        original_image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(dataset_val, config,
                                                                                           image_id, use_mini_mask=False)

        log("original_image", original_image)
        log("image_meta", image_meta)
        log("gt_class_id", gt_class_id)
        log("gt_bbox", gt_bbox)
        log("gt_mask", gt_mask)

        visualize.display_instances(original_image, gt_bbox, gt_mask, gt_class_id, dataset_val.class_names,
                                    figsize=(8, 8), name='ground_truth.png')

        results = model.detect([original_image], verbose=1)

        r = results[0]
        visualize.display_instances(original_image, r['rois'], r['masks'], r['class_ids'], dataset_val.class_names,
                                    r['scores'], ax=get_ax(), name='predicted.png')

        # Compute VOC-Style mAP @ IoU=0.5
        # Running on 10 images. Increase for better accuracy.
        image_ids = np.random.choice(dataset_val.image_ids, 10)
        APs = []
        for image_id in image_ids:
            # Load image and ground truth data
            image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(dataset_val, config, image_id,
                                                                                      use_mini_mask=False)
            molded_images = np.expand_dims(modellib.mold_image(image, config), 0)
            # Run object detection
            results = model.detect([image], verbose=0)
            r = results[0]
            # Compute AP
            AP, precisions, recalls, overlaps = utils.compute_ap(gt_bbox, gt_class_id, gt_mask, r["rois"],
                                                                 r["class_ids"], r["scores"], r['masks'])
            APs.append(AP)

        print("mAP: ", np.mean(APs))
Example #23
0
def compute_batch_detections(model, image_ids):
    # Compute VOC-style Average Precision
    APs = []
    PRs = []
    REs = []
    detections = []

    
    dice_dic = {}

    for image_id in tqdm(image_ids):
        # Load image
        image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(
            dataset, config, image_id, use_mini_mask=False
        )        
        w, h = image.shape[1], image.shape[0]
        if w < h:
            w = h
        else:
            h = w
        #image = cv2.addWeighted(image, alpha, np.zeros(img.shape, img.dtype), 0, beta)
        #image = cv2.resize(image, (w, h),interpolation=cv2.INTER_CUBIC)    
        
        # Run object detection
        results = model.detect([image], verbose=0)
        # Compute AP
        r = results[0]

        AP, precisions, recalls, overlaps = utils.compute_ap(
            gt_bbox,
            gt_class_id,
            gt_mask,
            r["rois"],
            r["class_ids"],
            r["scores"],
            r["masks"],
            iou_threshold=0.4, 
        )

        APs.append(AP)
        PRs.append(precisions)
        REs.append(recalls)

        # list_overlaps
        detection = Detection(gt_class_id, r, overlaps)

        detections.append(detection)
        dice_dic[image_id] = coeff_per_image('dice', image_id, r, gt_mask, gt_class_id)
    try:
        print("[INFO] Dice Coefficient: ", )
        dice_path = os.getcwd() + "dice_coeff.p"
        pickle.dump(dice_dic, open(dice_path, 'wb'))
        print(dice_dic)
    except Exception:
        pass

    return detections, APs, PRs, REs
Example #24
0
def evaluate(model, dataset_dir, subset, config):
    """Run evaluation on images in the given directory."""
    print("Running on images in the given directory")

    submit_dir = "submit_{:%Y%m%dT%H%M%S}".format(datetime.datetime.now())
    submit_dir = os.path.join('/backup/yuxin/Mask_RCNN/samples/breast/', submit_dir)
    os.makedirs(submit_dir)
    
    dataset = BreastDataset()
    dataset.load_breast(dataset_dir, subset)
    dataset.prepare()

    image_ids = dataset.image_ids
    print(len(image_ids))
    APs = []
    #sub = {}
    for image_id in image_ids:
        # Load image and ground truth data
        image, image_meta, gt_class_id, gt_bbox, gt_mask = \
                modellib.load_image_gt(dataset, config,
                        image_id, use_mini_mask=False)
        #molded_images = np.expand_dims(modellib.mold_image(image, config), 0)
        # Run object detection
        results = model.detect([image], verbose=0)
        r = results[0]
        # Compute AP
        print(dataset.image_info[image_id]['id']) 
        try:
            AP, precisions, recalls, overlaps =\
                utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
                        r["rois"], r["class_ids"], r["scores"], r['masks'])
        except:
            #print(image_id,dataset.image_info[image_id]["id"])
            #print(gt_mask.shape,r['masks'].shape)
            continue
        APs.append(AP)
        #if AP == 0:
        #    visualize.display_instances(
        #            image, r['rois'], r['masks'], r['class_ids'],
        #            dataset.class_names, r['scores'],
        #            show_bbox=True, show_mask=True,
        #            title="Predictions")
        #    plt.savefig("{}/{}.png".format(submit_dir, dataset.image_info[image_id]["id"]))
            #for i in range(gt_mask.shape[-1]):
        #    gt_mask = np.where(gt_mask, 255, 0) 
        #    cv2.imwrite("{}/{}.png".format(submit_dir, dataset.image_info[image_id]["id"]+'_mask'),
        #            gt_mask[:,:,0])
            #img_ori = cv2.imread("/backup/yuxin/CBIS-MASS-PNG/test/{}/images/000000.png".format(dataset.image_info[image_id]["id"]),
            #        cv2.IMREAD_ANYDEPTH)
            #cv2.imwrite("{}/{}.png".format(submit_dir, dataset.image_info[image_id]["id"]+'_ori'))
            #mask = dataset.load_mask(image_id).astype(np.uint8)
            #plt.savefig("{}/{}.png".format(submit_dir, dataset.image_info[image_id]["id"]+'_mask'))
        #sub.append(dataset.image_info[image_id]["id"]:AP)
        print(AP,dataset.image_info[image_id]["id"])
        #recallss.append(recalls)
        #   overlapss.append(overlaps)
    print("mAP: ", np.mean(APs))
Example #25
0
def run_map(out_path, num2_1, num2_2):
    dataset_val = ExpressDataset()
    dataset_val.out_path(out_path)
    dataset_val.set_dataset_class('val')
    dataset_val.set_dataset_exit_flag(True)
    dataset_val.load_shapes(num2_1, num2_2, 0)
    dataset_val.prepare()

    class InferenceConfig(ExpressConfig):
        GPU_COUNT = 1
        IMAGES_PER_GPU = 1

    inference_config = InferenceConfig()

    # Recreate the model in inference mode
    model = modellib.MaskRCNN(mode="inference",
                              config=inference_config,
                              model_dir=MODEL_DIR)

    # Get path to saved weights
    # Either set a specific path or find last trained weights
    # model_path = os.path.join(ROOT_DIR, ".h5 file name here")
    #model_path = '/home/kingqi/proj/Mask_RCNN/log/express20190424T1751/mask_rcnn_express_0006.h5'
    model_path = model.find_last()

    # Load trained weights
    print("Loading weights from ", model_path)
    model.load_weights(model_path, by_name=True)

    image_ids = np.random.choice(dataset_val.image_ids, num2_1 + num2_2 * 6)
    APs = []
    for image_id in image_ids:
        # Load image and ground truth data
        image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(
            dataset_val, inference_config, image_id, use_mini_mask=False)
        molded_images = np.expand_dims(
            modellib.mold_image(image, inference_config), 0)

        image1 = dataset_val.load_image(image_id)
        mask1, class_ids1 = dataset_val.load_mask(image_id)

        bbox = utils.extract_bboxes(mask1)
        # visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names)
        #visualize.display_instances(image1, bbox, mask1, class_ids1, dataset_val.class_names)

        # Run object detection
        results = model.detect([image], verbose=0)
        r = results[0]
        # visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
        #                            dataset_val.class_names, r['scores'])
        # Compute AP
        AP, precisions, recalls, overlaps = utils.compute_ap(
            gt_bbox, gt_class_id, gt_mask, r["rois"], r["class_ids"],
            r["scores"], r['masks'])
        APs.append(AP)

    print("mAP: ", np.mean(APs))
Example #26
0
    def evaluate_sun(self, dataset_name, model_name, class_id=None):
        """Evaluate model on sun data for specific class."""
        image_ids = self.datasets[dataset_name].image_ids
        APs = []
        ious = []

        for iid in image_ids:
            # Load image and ground truth data
            _, class_ids = self.datasets[dataset_name].load_mask(iid)

            image, _, gt_class_id, gt_bbox, gt_mask =\
                modellib.load_image_gt(self.datasets[dataset_name],
                                       self.inference_config,
                                       iid, use_mini_mask=False)

            if (class_id and class_id not in class_ids) or len(gt_class_id) == 0:
                continue

            molded_images = np.expand_dims(
                modellib.mold_image(image, self.inference_config), 0)
            # Run object detection
            results = self.model.detect(molded_images, verbose=0)
            r = results[0]
            # Compute AP
            AP, _, _, overlaps =\
                utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
                                 r["rois"], r["class_ids"], r["scores"], r['masks'],
                                 class_id=class_id)

            # AP can be nan, check to not include those values
            if (type(AP) is float or type(AP) is np.float64) and not math.isnan(AP):
                APs.append(float(AP))

                if type(overlaps) is np.ndarray and overlaps.shape[0] > 0:
                    iou_per_row = float(0)
                    for row_i in range(overlaps.shape[0]):
                        iou_per_row += max(overlaps[row_i])

                    ious.append(float(iou_per_row / len(overlaps)))

                else:
                    ious.append(float(0))

            # if len(APs) > 2:
            #     return APs, ious

        if len(APs) > 0 and len(ious) > 0:
            print("Num aps analyzed: ", len(APs))
            print("mAP: ", sum(APs) / len(APs))

            return APs, ious
        else:
            print("Num aps analyzed: ", len(APs))
            return [0], [0]
Example #27
0
def compute_batch_ap(image_ids, model, dataset, config):
    import numpy

    APs = []
    class_APs = {}
    size_APs = {}
    class_overlaps = {}
    size_overlaps = {}
    overlaps_list = []
    for iteration in range(5):
        for image_id in image_ids:
            # Load image
            image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(
                dataset, config, image_id, use_mini_mask=False)
            gt_class_ids_set_to_zero = np.zeros(gt_class_id.shape).astype(str)
            if len(gt_class_id) > 0:
                # Run object detection
                results = model.detect([image], verbose=0)
                # Compute AP
                r = results[0]
                for i, roi in enumerate(r["rois"]):
                    mask_size = numpy.sum(r["masks"][:, :, i].flatten())
                    singleton_rois = np.array([roi])
                    singleton_class_ids = np.array([r["class_ids"][i]])
                    singleton_scores = np.array([r["scores"][i]])
                    singleton_masks = np.zeros([1024, 1024, 1])
                    singleton_masks[:, :, 0] = np.array(r["masks"][:, :, i])
                    AP, precisions, recalls, overlaps = utils.compute_ap(
                        gt_bbox,
                        gt_class_ids_set_to_zero,
                        gt_mask,
                        np.array([roi]),
                        np.array(['0.0']),
                        np.array([r["scores"][i]]),
                        np.array([singleton_masks]),
                    )
                    if r["class_ids"][i] in class_APs.keys():
                        class_APs[r["class_ids"][i]].append(AP)
                        class_overlaps[r["class_ids"][i]].append(overlaps)
                    else:
                        class_APs[r["class_ids"][i]] = [AP]
                        class_overlaps[r["class_ids"][i]] = [overlaps]

                    if mask_size in size_APs.keys():
                        size_APs[mask_size].append(AP)
                        size_overlaps[mask_size].append(overlaps)
                    else:
                        size_APs[mask_size] = [AP]
                        size_overlaps[mask_size] = [overlaps]
                    APs.append(AP)
                    overlaps_list.append(overlaps)

    return class_APs, size_APs, APs, class_overlaps, size_overlaps, overlaps_list
Example #28
0
def evaluate_model(dataset,model,cfg):
  APs=list()
  for image_id in dataset.image_ids:
    image,image_meta,gt_class_id,gt_bbox,gt_mask=load_image_gt(dataset,cfg,image_id,use_mini_mask=False)
    scaled_image=mold_image(image,cfg)
    sample=expand_dims(scaled_image,0)
    yhat=model.detect(sample, verbose=0)
    r=yhat[0]
    AP, _, _, _=compute_ap(gt_bbox,gt_class_id,gt_mask,r["rois"],r["class_ids"],r["scores"],r['masks'])
    APs.append(AP)
  mAP=mean(APs)
  return mAP
Example #29
0
def evaluate(model, inference_config):
    dataset_val = IcgDataset()
    dataset_val.load_icg(args.dataset, "val")
    dataset_val.prepare()

    image_id = random.choice(dataset_val.image_ids)

    original_image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(
        dataset_val, inference_config, image_id, use_mini_mask=False)
    log("original_image", original_image)
    log("image_meta", image_meta)
    log("gt_class_id", gt_class_id)
    log("gt_bbox", gt_bbox)
    log("gt_mask", gt_mask)
    visualize.display_instances(original_image,
                                gt_bbox,
                                gt_mask,
                                gt_class_id,
                                dataset_val.class_names,
                                show_bbox=False,
                                show_mask=False,
                                title="Predictions")
    results = model.detect([original_image], verbose=1)

    r = results[0]
    visualize.display_instances(original_image,
                                r['rois'],
                                r['masks'],
                                r['class_ids'],
                                dataset_val.class_names,
                                r['scores'],
                                ax=get_ax())

    image_ids = np.random.choice(dataset_val.image_ids, 10)
    APs = []
    for image_id in image_ids:
        # Load image and ground truth data
        image, image_meta, gt_class_id, gt_bbox, gt_mask =\
         modellib.load_image_gt(dataset_val, inference_config,
                 image_id, use_mini_mask=False)
        molded_images = np.expand_dims(
            modellib.mold_image(image, inference_config), 0)
        # Run object detection
        results = model.detect([image], verbose=0)
        r = results[0]
        # Compute AP
        AP, precisions, recalls, overlaps =\
         utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
              r["rois"], r["class_ids"], r["scores"], r['masks'])
        APs.append(AP)

    print("mAP: ", np.mean(APs))
Example #30
0
        def compute_batch_ap(view_ids):
            max_views = 5
            APs = []
            APs_range = []
            for view_index, view_id in enumerate(view_ids):
                image_ids = dataset.load_view(max_views,
                                              main_image=view_id,
                                              rnd_state=0)
                # skip instance if it has to few views (return of load_views=None)
                if not image_ids:
                    continue
                image_ids = image_ids[:config.NUM_VIEWS]
                # Load image
                print("processing image {} of {}".format(
                    view_index, view_ids.size))
                image, image_meta, gt_class_id, gt_bbox, gt_mask =\
                    modellib.load_image_gt(dataset, config,
                                           image_ids[0], use_mini_mask=False)
                im = []
                Rcam = []
                Kmat = dataset.K
                for image_id in image_ids:
                    image = dataset.load_image(image_id)
                    image, _, _, _, _ = utils.resize_image(
                        image,
                        min_dim=config.IMAGE_MIN_DIM,
                        min_scale=config.IMAGE_MIN_SCALE,
                        max_dim=config.IMAGE_MAX_DIM,
                        mode=config.IMAGE_RESIZE_MODE)
                    im.append(image)
                    Rcam.append(dataset.load_R(image_id))

                im = np.stack(im)
                Rcam = np.stack([Rcam])
                Kmat = np.stack([Kmat])
                # Run object detection
                results = model.detect([im], Rcam, Kmat)
                # Compute AP
                r = results[0]
                AP, precisions, recalls, overlaps =\
                    utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
                                      r['rois'], r['class_ids'], r['scores'], r['masks'])

                #AP_range = utils.compute_ap_range(gt_bbox, gt_class_id, gt_mask,
                #                     r['rois'], r['class_ids'], r['scores'], r['masks'], verbose=0)

                APs.append(AP)
                #APs_range.append(AP_range)
                print("meanAP: {}".format(np.mean(APs)))
                #print("AP_range: {}".format(np.mean(APs_range)))
            return APs, APs_range