Ejemplo n.º 1
0
def generate_predictions(gt_path: str,
                         preds_path: str,
                         images_path: str = None,
                         category_idxs: List[int] = None,
                         **kwargs) -> pd.DataFrame:
    """
    create a list that contains the comparison between the predictions
        and the ground truth to be used to compute all the metrics

    Args:
        gt_path (str): the path of the ground truth annotations
        preds_path (str): the path of the prediction annotations
        images_path (str): the path were are saved the images

    Raises:
        Exception: returns an execption if the image idx of the files are not aligned

    Returns:
        pd.DataFrame: [description]
    """
    if images_path is None:
        images_path = Path(gt_path).parent / 'images'

    gt_ds = CocoDataset(gt_path, images_path)
    pred_ds = CocoDataset(preds_path, images_path)
    return generate_predictions_from_ds(gt_ds, pred_ds, category_idxs)
Ejemplo n.º 2
0
def generate_predictions_from_ds(
        gt_ds: CocoDataset,
        pred_ds: CocoDataset,
        category_idxs: List[int] = None) -> pd.DataFrame:

    gt_ds.reindex()
    pred_ds.reindex()

    results = []

    for img_idx, gt_img_meta in tqdm(gt_ds.imgs.items()):

        gt_anns = gt_ds.get_annotations(img_idx, category_idxs)
        pred_img_meta = pred_ds.imgs[img_idx]

        if gt_img_meta['file_name'] != pred_img_meta['file_name']:
            raise Exception("images path compared are different")

        img_path = gt_img_meta['file_name']

        pred_anns = pred_ds.get_annotations(img_idx, category_idxs)
        # create a set with all the prediction that will be used to find FP
        pred_idx_dict = {ann['id']: ann for ann in pred_anns}

        if (len(gt_anns) == 0) and (len(pred_anns) == 0):
            results.append([
                img_path, -1, -1, -1, -1, 0, 0, 0, 1, sys.float_info.max,
                sys.float_info.max, 'true_negative'
            ])

        # iterate of the gr annotations
        for gt_ann in gt_anns:
            gt_mask = maskutils.polygons_to_mask(gt_ann['segmentation'],
                                                 gt_img_meta['height'],
                                                 gt_img_meta['width'])
            gt_ann_id = gt_ann['id']
            gt_class_id = gt_ann['category_id']

            pred_ann_id, row = __best_match(pred_anns, gt_img_meta, gt_ann_id,
                                            gt_mask, img_path, gt_class_id,
                                            gt_ann['area'])
            results.append(row)
            if pred_ann_id in pred_idx_dict:
                del pred_idx_dict[pred_ann_id]
                pred_anns = pred_idx_dict.values()

        # add the false positive
        for pred_ann_id, pred_ann in pred_idx_dict.items():
            #put a false positive with high score in order to not remove it from metrics
            results.append([
                img_path, -1, pred_ann_id, -1, pred_ann['category_id'], 0, 0,
                0, pred_ann['score'], sys.float_info.max, pred_ann['area'],
                'false_positive'
            ])

    return pd.DataFrame(results, columns=REPORT_HEADER)
Ejemplo n.º 3
0
def test_mean_average_precision_and_recall_per_class_with_name_min_score():
    ds_path = BASE_PATH / 'hair_drier_toaster_bear.json'

    ds = CocoDataset(ds_path)
    idx_class_name = {
        idx: cat_meta['name'] for idx, cat_meta in ds.cats.items()
    }

    df = cocoeval.generate_predictions(ds_path, ds_path)
    class_idx_metrics = cocoeval.mean_average_precision_and_recall_per_class(
        df, idx_class_dict=idx_class_name, min_score=0.5)
    class_idxs = df['true_class_id'].unique()
    class_idxs = class_idxs[class_idxs > 0]
    print(class_idx_metrics)
    assert len(class_idx_metrics) == len(class_idxs)
Ejemplo n.º 4
0
def generate_predictions(gt_path: str,
                         preds_path: str,
                         images_path: str = None,
                         **kwargs) -> pd.DataFrame:
    """
    create a list that contains the comparison between the predictions
        and the ground truth to be used to compute all the metrics

    Args:
        gt_path (str): the path of the ground truth annotations
        preds_path (str): the path of the prediction annotations
        images_path (str): tthe path were are saved the images

    Raises:
        Exception: returns an execption if the image idx of the files are not aligned

    Returns:
        pd.DataFrame: [description]
    """
    if images_path is None:
        images_path = Path(gt_path).parent / 'images'

    gt_ds = CocoDataset(gt_path, images_path)
    gt_ds.reindex()
    pred_ds = CocoDataset(preds_path, images_path)
    pred_ds.reindex()

    results = []

    for img_idx, gt_img_meta in tqdm(gt_ds.imgs.items()):
        gt_anns = gt_ds.get_annotations(img_idx)
        pred_img_meta = pred_ds.imgs[img_idx]

        if gt_img_meta['file_name'] != pred_img_meta['file_name']:
            raise Exception("images path compared are different")

        img_path = gt_img_meta['file_name']

        pred_anns = pred_ds.get_annotations(img_idx)
        # create a set with all the prediction that will be used to find FP
        pred_idx_dict = {ann['id']: ann for ann in pred_anns}
        # iterate of the gr annotations
        for gt_ann in gt_anns:
            gt_mask = maskutils.polygons_to_mask(gt_ann['segmentation'],
                                                 gt_img_meta['height'],
                                                 gt_img_meta['width'])
            gt_ann_id = gt_ann['id']
            gt_class_id = gt_ann['category_id']

            pred_ann_id, row = __best_match(pred_anns, gt_img_meta, gt_ann_id,
                                            gt_mask, img_path, gt_class_id)
            results.append(row)
            if pred_ann_id in pred_idx_dict:
                del pred_idx_dict[pred_ann_id]
                pred_anns = pred_idx_dict.values()

        # add the false positive
        for pred_ann_id, pred_ann in pred_idx_dict.items():
            #put a false positive with high score in order to not remove it from metrics
            results.append([
                img_path, -1, pred_ann_id, 0, pred_ann['category_id'], 0, 0, 0,
                1
            ])

    return pd.DataFrame(results, columns=REPORT_HEADER)