def calc_map(self, gt, iou_thresh: float=0.25, ignore_grade: bool=False):

        boundingBoxes = self._extract_bounding_boxes(gt, ignore_grade)

        evaluator = Evaluator()
        metricsPerClass = evaluator.GetPascalVOCMetrics(boundingBoxes, iou_thresh)
        return np.mean([np.nan_to_num(mc['AP']) for mc in metricsPerClass])
def detections(cfg, gtFolder, detFolder, savePath, show_process=True):

    # getGTBoxes函数  getDetBoxes函数  得到真实框 真实种类  和检测框
    gt_boxes, classes, num_pos = getGTBoxes(cfg, gtFolder)
    det_boxes = getDetBoxes(cfg, detFolder)

    # 创建一个对象
    evaluator = Evaluator()

    # 返回内容为
    return evaluator.GetPascalVOCMetrics(cfg, classes, gt_boxes, num_pos,
                                         det_boxes)
Example #3
0
def detections(cfg,
               gtFolder,
               detFolder,
               savePath,
               show_process=True):

    gt_boxes, classes, num_pos = getGTBoxes(cfg, gtFolder)
    det_boxes = getDetBoxes(cfg, detFolder)

    evaluator = Evaluator()
    # 传入配置,类别

    return evaluator.GetPascalVOCMetrics(cfg, classes, gt_boxes, num_pos, det_boxes)
    def drawAllBoundingBoxes(self, gt, image_folder: Path, iou_thresh: float=0.25):

        font = cv2.FONT_HERSHEY_SIMPLEX
        fontScale = 0.5
        fontThickness = 1

        evaluator = Evaluator()
        boundingBoxes = self._extract_bounding_boxes(gt)

        images = {}

        for image_id in gt.images:

            image = np.zeros((376, 256,3), np.uint8)
            image[256:376, 0:256, :] = 255
            image[0:256, 0:256, :] = cv2.imread(str(image_folder/image_id))

            bbxesImage = BoundingBoxes()
            bbxes = boundingBoxes.getBoundingBoxesByImageName(image_id)

            for bb in bbxes:
                bbxesImage.addBoundingBox(bb)

                x1, y1, x2, y2 = bb.getAbsoluteBoundingBox(BBFormat.XYX2Y2)

                color = self.colors[bb.getClassId()]
                if bb.getBBType() == BBType.GroundTruth:
                    cv2.line(image, (x1, y1), (x2, y2), color, 2)
                    cv2.line(image, (x2, y1), (x1, y2), color, 2)
                else:
                    cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)

            metrics_per_class = evaluator.GetPascalVOCMetrics(bbxesImage, iou_thresh)

            for mc in metrics_per_class:
                cv2.putText(image, "Grade: {} mAP: {:01.2f}".format(mc['class'], mc['AP']),
                            (10, 270 + int(20 * mc['class'])),
                            font, fontScale, self.colors[mc['class']], fontThickness, cv2.LINE_AA)

            cv2.putText(image, "mAP: {:01.2f}".format( np.mean([np.nan_to_num(mc['AP']) for mc in metrics_per_class])),
                        (10, 365),
                        font, fontScale, (0,0,0), fontThickness, cv2.LINE_AA)

            images[image_id] = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        return images
def calculate_metrics(IOUThresh=0.5):
    evaluator = Evaluator()

    metricsPerClass = evaluator.GetPascalVOCMetrics(
        getBoundingBoxes()
        [0],  # Object containing all bounding boxes (ground truths and detections)
        IOUThreshold=IOUThresh,  # IOU threshold
        method=MethodAveragePrecision.EveryPointInterpolation
    )  # As the official matlab code
    #print("Average precision values per class:\n")
    # Loop through classes to obtain their metrics

    for mc in metricsPerClass:
        # Get metric values per each class
        c = mc['class']
        precision = mc['precision']
        recall = mc['recall']
        average_precision = mc['AP']
        ipre = mc['interpolated precision']
        irec = mc['interpolated recall']
        print("Precision: " + str(precision[-1]))  #True Precision Value
        print("Recall: " + str(recall[-1]))  #True Recall Value
        return average_precision, precision[-1], recall[-1],
# createImages(dictGroundTruth, dictDetected)
# Create an evaluator object in order to obtain the metrics
evaluator = Evaluator()
##############################################################
# VOC PASCAL Metrics
##############################################################
# Plot Precision x Recall curve
# evaluator.PlotPrecisionRecallCurve(
#     boundingboxes,  # Object containing all bounding boxes (ground truths and detections)
#     IOUThreshold=0.3,  # IOU threshold
#     method=MethodAveragePrecision.EveryPointInterpolation,  # As the official matlab code
#     showAP=True,  # Show Average Precision in the title of the plot
# #     showInterpolatedPrecision=True)  # Plot the interpolated precision curve
# Get metrics with PASCAL VOC metrics
metricsPerClass = evaluator.GetPascalVOCMetrics(
    boundingboxes,  # Object containing all bounding boxes (ground truths and detections)
    IOUThreshold=0.3,  # IOU threshold
    method=MethodAveragePrecision.EveryPointInterpolation
)  # As the official matlab code
print("Average precision values per class:\n")
# Loop through classes to obtain their metrics
for mc in metricsPerClass:
    # Get metric values per each class
    c = mc['class']
    precision = mc['precision']
    recall = mc['recall']
    average_precision = mc['AP']
    ipre = mc['interpolated precision']
    irec = mc['interpolated recall']
    # Print AP per class
    print('%s: %f' % (c, average_precision))
Example #7
0
def get_map(model, confidence, iou_threshold, coco_version, subset=1):

    if type(model) is nn.DataParallel:
        inp_dim = model.module.inp_dim
        pw_ph = model.module.pw_ph
        cx_cy = model.module.cx_cy
        stride = model.module.stride
    else:
        inp_dim = model.inp_dim
        pw_ph = model.pw_ph
        cx_cy = model.cx_cy
        stride = model.stride

    pw_ph = pw_ph.cuda()
    cx_cy = cx_cy.cuda()
    stride = stride.cuda()

    model.eval()
    subset = subset

    max_detections = 100
    transformed_dataset = Coco(partition='val',
                               coco_version=coco_version,
                               subset=subset,
                               transform=transforms.Compose(
                                   [ResizeToTensor(inp_dim)]))

    dataset_len = (len(transformed_dataset))
    #     print('Length of dataset is '+ str(dataset_len)+'\n')
    batch_size = 8

    dataloader = DataLoader(transformed_dataset,
                            batch_size=batch_size,
                            shuffle=False,
                            collate_fn=helper.my_collate,
                            num_workers=4)

    for images, targets in dataloader:
        inp = images.cuda()
        raw_pred = model(inp, torch.cuda.is_available())
        true_pred = util.transform(raw_pred.clone().detach(), pw_ph, cx_cy,
                                   stride)

        sorted_pred = torch.sort(true_pred[:, :, 4] *
                                 (true_pred[:, :, 5:].max(axis=2)[0]),
                                 descending=True)

        pred_mask = sorted_pred[0] > confidence
        indices = [(sorted_pred[1][e, :][pred_mask[e, :]])
                   for e in range(pred_mask.shape[0])]
        pred_final = [true_pred[i, indices[i], :] for i in range(len(indices))]

        pred_final_coord = [
            util.get_abs_coord(pred_final[i].unsqueeze(-2))
            for i in range(len(pred_final))
        ]

        indices = [
            nms_box.nms(pred_final_coord[i][0], pred_final[i][:, 4],
                        iou_threshold) for i in range(len(pred_final))
        ]

        pred_final = [
            pred_final[i][indices[i], :] for i in range(len(pred_final))
        ]

        #     pred_final[:,0:4]=pred_final[:,0:4]/inp_dim
        helper.write_pred(img_name, pred_final, inp_dim, max_detections,
                          coco_version)

    boundingboxes = helper.getBoundingBoxes(coco_version)

    evaluator = Evaluator()

    metricsPerClass = evaluator.GetPascalVOCMetrics(boundingboxes,
                                                    IOUThreshold=0.75)
    # Loop through classes to obtain their metrics
    mAP = 0
    counter = 0
    for mc in metricsPerClass:
        # Get metric values per each class
        c = mc['class']
        precision = mc['precision']
        recall = mc['recall']
        average_precision = mc['AP']
        ipre = mc['interpolated precision']
        irec = mc['interpolated recall']
        # Print AP per class
        mAP = average_precision + mAP


#         print('%s: %f' % (c, average_precision))

#     print('map is:',mAP/80)
    return mAP / 80
#createImages(dictGroundTruth, dictDetected)
# Create an evaluator object in order to obtain the metrics
evaluator = Evaluator()
##############################################################
# VOC PASCAL Metrics
##############################################################
# Plot Precision x Recall curve
evaluator.PlotPrecisionRecallCurve(
    'object',  # Class to show
    boundingboxes,  # Object containing all bounding boxes (ground truths and detections)
    IOUThreshold=0.3,  # IOU threshold
    showAP=True,  # Show Average Precision in the title of the plot
    showInterpolatedPrecision=False
)  # Don't plot the interpolated precision curve
# Get metrics with PASCAL VOC metrics
metricsPerClass = evaluator.GetPascalVOCMetrics(
    boundingboxes,  # Object containing all bounding boxes (ground truths and detections)
    IOUThreshold=0.3)  # IOU threshold
print("Average precision values per class:\n")
# Loop through classes to obtain their metrics
for mc in metricsPerClass:
    # Get metric values per each class
    c = mc['class']
    precision = mc['precision']
    recall = mc['recall']
    average_precision = mc['AP']
    ipre = mc['interpolated precision']
    irec = mc['interpolated recall']
    # Print AP per class
    print('%s: %f' % (c, average_precision))
allBoundingBoxes, allClasses = getBoundingBoxes(detFolder,
                                                False,
                                                detFormat,
                                                detCoordType,
                                                allBoundingBoxes,
                                                allClasses,
                                                imgSize=imgSize)
allClasses.sort()

evaluator = Evaluator()
acc_AP = 0
validClasses = 0

# Plot Precision x Recall curve
detections = evaluator.GetPascalVOCMetrics(
    allBoundingBoxes, iouThreshold,
    MethodAveragePrecision.EveryPointInterpolation)

# detections = evaluator.PlotPrecisionRecallCurve(
#     allBoundingBoxes,  # Object containing all bounding boxes (ground truths and detections)
#     IOUThreshold=iouThreshold,  # IOU threshold
#     method=MethodAveragePrecision.EveryPointInterpolation,
#     showAP=True,  # Show Average Precision in the title of the plot
#     showInterpolatedPrecision=False,  # Don't plot the interpolated precision curve
#     savePath=savePath,
#     showGraphic=showPlot)

print('Average Precision (AP), Precision and Recall per class:')

# each detection is a class
for metricsPerClass in detections:
Example #10
0
class PascalVOCMetric(Callback):

    def __init__(self, anchors, size, metric_names: list, detect_thresh: float=0.3, nms_thresh: float=0.3
                 , images_per_batch: int=-1):
        self.ap = 'AP'
        self.anchors = anchors
        self.size = size
        self.detect_thresh = detect_thresh
        self.nms_thresh = nms_thresh

        self.images_per_batch = images_per_batch
        self.metric_names_original = metric_names
        self.metric_names = ["{}-{}".format(self.ap, i) for i in metric_names]

        self.evaluator = Evaluator()
        if (self.anchors.shape[-1]==4):
            self.boundingObjects = BoundingBoxes()
        else:
            self.boundingObjects = BoundingCircles()


    def on_epoch_begin(self, **kwargs):
        self.boundingObjects.removeAllBoundingObjects()
        self.imageCounter = 0


    def on_batch_end(self, last_output, last_target, **kwargs):
#        print('Last target:',last_target)

        bbox_gt_batch, class_gt_batch = last_target[:2]
        class_pred_batch, bbox_pred_batch = last_output[:2]

        self.images_per_batch = self.images_per_batch if self.images_per_batch > 0 else class_pred_batch.shape[0]
        for bbox_gt, class_gt, clas_pred, bbox_pred in \
                list(zip(bbox_gt_batch, class_gt_batch, class_pred_batch, bbox_pred_batch))[: self.images_per_batch]:

            out = process_output(clas_pred, bbox_pred, self.anchors, self.detect_thresh)
            bbox_pred, scores, preds = out['bbox_pred'], out['scores'], out['preds']
            if bbox_pred is None:# or len(preds) > 3 * len(bbox_gt):
                continue

            #image = np.zeros((512, 512, 3), np.uint8)

            # if the number is to hight evaluation is very slow
            total_nms_examples = len(class_gt) * 3
            bbox_pred = bbox_pred[:total_nms_examples]
            scores = scores[:total_nms_examples]
            preds = preds[:total_nms_examples]
            to_keep = nms(bbox_pred, scores, self.nms_thresh)
            bbox_pred, preds, scores = bbox_pred[to_keep].cpu(), preds[to_keep].cpu(), scores[to_keep].cpu()

            t_sz = torch.Tensor([(self.size, self.size)])[None].cpu()
            bbox_gt = bbox_gt[np.nonzero(class_gt)].squeeze(dim=1).cpu()
            class_gt = class_gt[class_gt > 0]
            # change gt from x,y,x2,y2 -> x,y,w,h
            if (bbox_gt.shape[-1] == 4):
                bbox_gt[:, 2:] = bbox_gt[:, 2:] - bbox_gt[:, :2]

            bbox_gt = to_np(rescale_boxes(bbox_gt, t_sz))
            bbox_pred = to_np(rescale_boxes(bbox_pred, t_sz))
            # change from center to top left
            if (bbox_gt.shape[-1] == 4):
                bbox_pred[:, :2] = bbox_pred[:, :2] - bbox_pred[:, 2:] / 2

            class_gt = to_np(class_gt) - 1
            preds = to_np(preds)
            scores = to_np(scores)

            for box, cla in zip(bbox_gt, class_gt):
                if (bbox_gt.shape[-1] == 4):
                    temp = BoundingBox(imageName=str(self.imageCounter), classId='Mit', x=box[0], y=box[1],
                                   w=box[2], h=box[3], typeCoordinates=CoordinatesType.Absolute,
                                   bbType=BBType.GroundTruth, format=BBFormat.XYWH, imgSize=(self.size,self.size))

                    self.boundingObjects.addBoundingBox(temp)


                else:
                    temp = BoundingCircle(imageName=str(self.imageCounter), classId='Mit', x=box[0], y=box[1],
                                   r=box[2], typeCoordinates=CoordinatesType.Absolute,
                                   bbType=BBType.GroundTruth, imgSize=(self.size,self.size))

                
                
                    self.boundingObjects.addBoundingCircle(temp)

            # to reduce math complexity take maximal three times the number of gt boxes
            num_boxes = len(bbox_gt) * 3
            for box, cla, scor in list(zip(bbox_pred, preds, scores))[:num_boxes]:
                if (bbox_gt.shape[-1] == 4):
                    temp = BoundingBox(imageName=str(self.imageCounter), classId='Mit', x=box[0], y=box[1],
                                       w=box[2], h=box[3], typeCoordinates=CoordinatesType.Absolute, classConfidence=scor,
                                       bbType=BBType.Detected, format=BBFormat.XYWH, imgSize=(self.size, self.size))

                    self.boundingObjects.addBoundingBox(temp)
                else:
                    temp = BoundingCircle(imageName=str(self.imageCounter), classId='Mit', x=box[0], y=box[1],
                                   r=box[2], typeCoordinates=CoordinatesType.Absolute, classConfidence=scor,
                                   bbType=BBType.Detected, imgSize=(self.size,self.size))

                
                
                    self.boundingObjects.addBoundingCircle(temp)


            #image = self.boundingObjects.drawAllBoundingBoxes(image, str(self.imageCounter))
            self.imageCounter += 1

    def on_epoch_end(self, last_metrics, **kwargs):
        if self.boundingObjects.count() > 0:

            self.metrics = {}
            metricsPerClass = self.evaluator.GetPascalVOCMetrics(self.boundingObjects, IOUThreshold=0.3)
            self.metric = max(sum([mc[self.ap] for mc in metricsPerClass]) / len(metricsPerClass), 0)

            for mc in metricsPerClass:
                self.metrics['{}-{}'.format(self.ap, mc['class'])] = max(mc[self.ap], 0)

            return {'last_metrics': last_metrics + [self.metric]}
        else:
            self.metrics = dict(zip(self.metric_names, [0 for i in range(len(self.metric_names))]))
            return {'last_metrics': last_metrics + [0]}
Example #11
0
                                                gtCoordType,
                                                imgSize=imgSize)
# Get detected boxes
allBoundingBoxes, allClasses = getBoundingBoxes(detFolder,
                                                False,
                                                detFormat,
                                                detCoordType,
                                                allBoundingBoxes,
                                                allClasses,
                                                imgSize=imgSize)
allClasses.sort()

evaluator = Evaluator()

results_c, results_g = evaluator.GetPascalVOCMetrics(
    allBoundingBoxes, confThreshold, iouThreshold,
    MethodAveragePrecision.EveryPointInterpolation, showPlot)

validClasses = 0
acc_AP = 0

name = args.saveName

map = results_g['map']
best_thr = results_g['conf_thr']
best_rec = results_g['recall']
best_prec = results_g['precision']

# ----------------------------------

# WRITTE ON TXT AND CSV
Example #12
0
            if score > score_thr:
                bbox_lst.append([img_name, category_id, score, bbox])
    return bbox_lst


if __name__ == '__main__':
    root = r'C:\Users\EDZ\Desktop\chongqing1_round1_train1_20191223'
    gt_json = os.path.join(root, r'train_val\val.json')
    det_json = os.path.join(root, r'model_predict\result_concat.json')
    gt_lst = get_bbox(gt_json, is_gt=True)
    det_lst = get_bbox(det_json, is_gt=False, score_thr=0.01)

    evaluator = Evaluator()
    ret, mAP = evaluator.GetPascalVOCMetrics(
        gt_lst,
        det_lst,
        method='EveryPointInterpolation'
    )
    for metricsPerClass in ret:
        # Get metric values per each class
        cl = metricsPerClass['class']
        ap = metricsPerClass['AP']
        precision = metricsPerClass['precision']
        recall = metricsPerClass['recall']
        totalPositives = metricsPerClass['total positives']
        total_TP = metricsPerClass['total TP']
        total_FP = metricsPerClass['total FP']
        if totalPositives > 0:
            ap_str = "{0:.2f}%".format(ap * 100)
            print('AP: %s (%s)' % (ap_str, cl))
    mAP_str = "{0:.2f}%".format(mAP * 100)
def evaluate_agent(experiment_path, n_samples=100, agent_dir='best',
    visualize_episodes=True
):
    logging.basicConfig(level=logging.INFO, stream=sys.stdout, format='')
    print_config()

    dataset = load_dataset(CONFIG['dataset'], CONFIG['dataset_path'])

    # Always playout full episodes during testing
    CONFIG['playout_episode'] = True
    CONFIG['premasking'] = False
    env = create_env(dataset, CONFIG, mode='test')

    # Load agent from given path
    agent_path = os.path.join(experiment_path, agent_dir)
    agent = create_agent(env, CONFIG, from_path=agent_path)

    # Plot training summary (if it doesn't exist yet)
    if not os.path.exists(os.path.join(experiment_path, 'training')):
        plot_training_summary(experiment_path)

    # Create new evaluation folder
    eval_dirname = 'evaluation'
    eval_path = os.path.join(experiment_path, eval_dirname)
    ensure_folder(eval_path)

    # Use sampling to speed up evaluation if needed
    sample_size = len(dataset)
    if n_samples is not None and n_samples > -1:
        sample_size = min(sample_size, n_samples)

    collector = DetectionMetrics(eval_path)
    hooks = []
    hooks.append(collector)
    if visualize_episodes:
        gif_path = os.path.join(eval_path, 'episodes')
        hooks.append(EpisodeRenderer(gif_path))

    run_agent(agent, env, sample_size, hooks=hooks)

    print("Write bbox files")
    def _write_bbox_file(name, bbox_map):
        dir_path = os.path.join(eval_path, name)
        ensure_folder(dir_path)
        for image_idx, bboxes in bbox_map.items():
            image_name = dataset.get_image_name(image_idx)
            print(image_name)
            image_txt = ''
            for bbox in bboxes:
                image_txt += 'text ' # object class name
                # Ensure bounding boxes are saved as integers
                image_txt += str(int(bbox[0])) + ' '
                image_txt += str(int(bbox[1])) + ' '
                image_txt += str(int(bbox[2])) + ' '
                image_txt += str(int(bbox[3])) + ' '
                image_txt += '\n'
            print(image_txt)
            txt_fpath = os.path.join(dir_path, f'{image_name}.txt')
            with open(txt_fpath, 'w+') as f:
                f.write(image_txt)
    _write_bbox_file('predictions', collector.image_pred_bboxes)
    _write_bbox_file('groundtruths', collector.image_true_bboxes)

    print("Evaluating predictions against ground truth")

    def _generate_lib_bboxes(bb_type, bbox_map, confidence=None):
        boxes = []
        for image_idx, bboxes in bbox_map.items():
            image_name = dataset.get_image_name(image_idx)
            for bbox in bboxes:
                box = BoundingBox(
                    image_name,
                    'text',  # object class name
                    int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]),
                    typeCoordinates=CoordinatesType.Absolute,
                    classConfidence=confidence,
                    bbType=bb_type,
                    format=BBFormat.XYX2Y2
                )
                boxes.append(box)
        return boxes

    true_boxes = _generate_lib_bboxes(BBType.GroundTruth, collector.image_true_bboxes)
    # Set default confidence as .01 for now (since agent doesn't score regions)
    pred_boxes = _generate_lib_bboxes(
        BBType.Detected, collector.image_pred_bboxes, confidence=.01
    )

    all_boxes = BoundingBoxes()
    for bbox in pred_boxes:
        all_boxes.addBoundingBox(bbox)
    for bbox in true_boxes:
        all_boxes.addBoundingBox(bbox)

    evaluator = Evaluator()
    # Mapping from IoU treshold to metrics calculated at this threshold
    iou_metrics = {}
    iou_thresholds = [round(x, 2) for x in np.arange(0, 1, .05)]

    all_actions = list(itertools.chain(*collector.image_actions.values()))
    action_counter = Counter(all_actions)
    n_actions = len(action_counter.keys())

    for iou_threshold in iou_thresholds:
        metrics_per_class = evaluator.GetPascalVOCMetrics(
            all_boxes,
            IOUThreshold=iou_threshold,
            method=MethodAveragePrecision.EveryPointInterpolation
        )
        text_metrics = metrics_per_class[0]  # class = 'text'
        metrics = {
            'precision': text_metrics['precision'][-1],
            'recall': text_metrics['recall'][-1],
            'ap': text_metrics['AP'],
            'num_p_total': text_metrics['total positives'],
            'num_tp': text_metrics['total TP'],
            'num_fp': text_metrics['total FP'],
        }
        metrics['f1'] = f1(metrics['precision'], metrics['recall'])
        if len(collector.image_avg_iou) > 0:
            metrics['avg_iou'] = sum(list(collector.image_avg_iou.values())) / len(collector.image_avg_iou)
        else:
            metrics['avg_iou'] = 0

        metrics['total_actions'] = sum(list(collector.image_num_actions.values()))
        if len(collector.image_num_actions) > 0:
            metrics['avg_actions'] = sum(list(collector.image_num_actions.values())) / len(collector.image_num_actions)
        else:
            metrics['avg_actions'] = 0
        print(collector.image_num_actions_per_subepisode)
        avg_actions_subepisode = [sum(x) / len(x) if len(x) else 0 for x in collector.image_num_actions_per_subepisode.values()]
        print(avg_actions_subepisode)
        metrics['mean_avg_actions_subepisode'] = sum(avg_actions_subepisode) / len(avg_actions_subepisode)
        print(metrics['mean_avg_actions_subepisode'])

        for action, count in action_counter.items():
            action_name = str(action)
            metrics[f'total_action_{action_name}'] = count

        iou_metrics[iou_threshold] = metrics

    # Save metrics as CSV
    iou_metrics_df = pd.DataFrame.from_dict(iou_metrics, orient='index')
    iou_metrics_df.index.name = 'iou_threshold'
    iou_metrics_df.to_csv(os.path.join(eval_path, 'metrics.csv'))

    print("Generating plots")

    plots_path = os.path.join(eval_path, 'plots')
    ensure_folder(plots_path)

    # Histogram of agent's actions
    fig, ax = plt.subplots()
    ax.hist(all_actions, bins=n_actions, orientation='horizontal', color='#0504aa')
    ax.set(xlabel='Frequency (Total)', ylabel='Action', title='Agent Actions')
    fig.savefig(os.path.join(plots_path, 'action_hist.png'))

    # Precision-Recall curves at different IoU thresholds
    for iou_threshold in [0.5, 0.75]:
        iou_fname_str = str(iou_threshold).replace('.', '')
        plot_path = os.path.join(plots_path, f'ap_{iou_fname_str}')
        ensure_folder(plot_path)
        evaluator.PlotPrecisionRecallCurve(
            all_boxes,
            IOUThreshold=iou_threshold,
            method=MethodAveragePrecision.EveryPointInterpolation,
            showAP=True,
            showInterpolatedPrecision=True,
            savePath=plot_path,
            showGraphic=False
        )

    # Recall-IoU curve
    x = iou_metrics_df.index.values
    y = iou_metrics_df['recall'].values
    fig, ax = plt.subplots()
    ax.plot(x, y, '-o')
    ax.set(xlabel='Intersection over Union (IoU)', ylabel='Recall', title='Recall-IoU')
    ax.grid()
    fig.savefig(os.path.join(plots_path, 'recall_iou.png'))

    # Precision-IoU curve
    x = iou_metrics_df.index.values
    y = iou_metrics_df['precision'].values
    fig, ax = plt.subplots()
    ax.plot(x, y, '-o')
    ax.set(xlabel='Intersection over Union (IoU)', ylabel='Precision', title='Precision-IoU')
    ax.grid()
    fig.savefig(os.path.join(plots_path, 'precision_iou.png'))

    print("Drawing images with predictions and ground truths")

    images_path = os.path.join(eval_path, 'images')
    ensure_folder(images_path)

    for image_idx in range(sample_size):
        image_path, _ = dataset.get(image_idx, as_image=False)
        image_name = dataset.get_image_name(image_idx)
        image = cv2.imread(image_path)
        image = all_boxes.drawAllBoundingBoxes(image, image_name)
        image_fname = Path(image_path).name
        cv2.imwrite(os.path.join(images_path, image_fname), image)
        print('Image %s created successfully!' % image_name)
Example #14
0
class PascalVOCMetricByDistance(Callback):
    def __init__(self,
                 anchors,
                 size,
                 metric_names: list,
                 detect_thresh: float = 0.3,
                 nms_thresh: float = 0.5,
                 radius: float = 25,
                 images_per_batch: int = -1):
        self.ap = 'AP'
        self.anchors = anchors
        self.size = size
        self.detect_thresh = detect_thresh
        self.nms_thresh = nms_thresh
        self.radius = radius

        self.images_per_batch = images_per_batch
        self.metric_names_original = metric_names
        self.metric_names = ["{}-{}".format(self.ap, i) for i in metric_names]

        self.evaluator = Evaluator()
        self.boundingBoxes = BoundingBoxes()

    def on_epoch_begin(self, **kwargs):
        self.boundingBoxes.removeAllBoundingBoxes()
        self.imageCounter = 0

    def on_batch_end(self, last_output, last_target, **kwargs):
        bbox_gt_batch, class_gt_batch = last_target
        class_pred_batch, bbox_pred_batch = last_output[:2]

        self.images_per_batch = self.images_per_batch if self.images_per_batch > 0 else class_pred_batch.shape[
            0]
        for bbox_gt, class_gt, clas_pred, bbox_pred in \
                list(zip(bbox_gt_batch, class_gt_batch, class_pred_batch, bbox_pred_batch))[: self.images_per_batch]:

            bbox_pred, scores, preds = process_output(clas_pred, bbox_pred,
                                                      self.anchors,
                                                      self.detect_thresh)
            if bbox_pred is None:  # or len(preds) > 3 * len(bbox_gt):
                continue

            #image = np.zeros((512, 512, 3), np.uint8)
            t_sz = torch.Tensor([(self.size, self.size)])[None].cpu()
            bbox_pred = to_np(rescale_boxes(bbox_pred.cpu(), t_sz))
            # change from center to top left
            bbox_pred[:, :2] = bbox_pred[:, :2] - bbox_pred[:, 2:] / 2

            temp_boxes = np.copy(bbox_pred)
            temp_boxes[:, 2] = temp_boxes[:, 0] + temp_boxes[:, 2]
            temp_boxes[:, 3] = temp_boxes[:, 1] + temp_boxes[:, 3]

            to_keep = non_max_suppression_by_distance(temp_boxes,
                                                      to_np(scores),
                                                      self.radius,
                                                      return_ids=True)
            bbox_pred, preds, scores = bbox_pred[to_keep], preds[to_keep].cpu(
            ), scores[to_keep].cpu()

            bbox_gt = bbox_gt[np.nonzero(class_gt)].squeeze(dim=1).cpu()
            class_gt = class_gt[class_gt > 0]
            # change gt from x,y,x2,y2 -> x,y,w,h
            bbox_gt[:, 2:] = bbox_gt[:, 2:] - bbox_gt[:, :2]

            bbox_gt = to_np(rescale_boxes(bbox_gt, t_sz))

            class_gt = to_np(class_gt) - 1
            preds = to_np(preds)
            scores = to_np(scores)

            for box, cla in zip(bbox_gt, class_gt):
                temp = BoundingBox(imageName=str(self.imageCounter),
                                   classId=self.metric_names_original[cla],
                                   x=box[0],
                                   y=box[1],
                                   w=box[2],
                                   h=box[3],
                                   typeCoordinates=CoordinatesType.Absolute,
                                   bbType=BBType.GroundTruth,
                                   format=BBFormat.XYWH,
                                   imgSize=(self.size, self.size))

                self.boundingBoxes.addBoundingBox(temp)

            # to reduce math complexity take maximal three times the number of gt boxes
            num_boxes = len(bbox_gt) * 3
            for box, cla, scor in list(zip(bbox_pred, preds,
                                           scores))[:num_boxes]:
                temp = BoundingBox(imageName=str(self.imageCounter),
                                   classId=self.metric_names_original[cla],
                                   x=box[0],
                                   y=box[1],
                                   w=box[2],
                                   h=box[3],
                                   typeCoordinates=CoordinatesType.Absolute,
                                   classConfidence=scor,
                                   bbType=BBType.Detected,
                                   format=BBFormat.XYWH,
                                   imgSize=(self.size, self.size))

                self.boundingBoxes.addBoundingBox(temp)

            #image = self.boundingBoxes.drawAllBoundingBoxes(image, str(self.imageCounter))
            self.imageCounter += 1

    def on_epoch_end(self, last_metrics, **kwargs):
        if self.boundingBoxes.count() > 0:
            self.metrics = {}
            metricsPerClass = self.evaluator.GetPascalVOCMetrics(
                self.boundingBoxes, IOUThreshold=self.nms_thresh)
            self.metric = max(
                sum([mc[self.ap]
                     for mc in metricsPerClass]) / len(metricsPerClass), 0)

            for mc in metricsPerClass:
                self.metrics['{}-{}'.format(self.ap, mc['class'])] = max(
                    mc[self.ap], 0)

            return {'last_metrics': last_metrics + [self.metric]}
        else:
            self.metrics = dict(
                zip(self.metric_names,
                    [0 for i in range(len(self.metric_names))]))
            return {'last_metrics': last_metrics + [0]}