def eval_instance_segmentation_voc(pred_masks,
                                   pred_labels,
                                   pred_scores,
                                   gt_masks,
                                   gt_labels,
                                   iou_thresh=0.5,
                                   use_07_metric=False):
    """Calculate average precisions based on evaluation code of PASCAL VOC.

    This function evaluates predicted masks obtained from a dataset
    which has :math:`N` images by using average precision for each class.
    The code is based on the evaluation code used in `FCIS`_.

    .. _`FCIS`: https://arxiv.org/abs/1611.07709

    Args:
        pred_masks (iterable of numpy.ndarray): See the table below.
        pred_labels (iterable of numpy.ndarray): See the table below.
        pred_scores (iterable of numpy.ndarray): See the table below.
        gt_masks (iterable of numpy.ndarray): See the table below.
        gt_labels (iterable of numpy.ndarray): See the table below.
        iou_thresh (float): A prediction is correct if its Intersection over
            Union with the ground truth is above this value.
        use_07_metric (bool): Whether to use PASCAL VOC 2007 evaluation metric
            for calculating average precision. The default value is
            :obj:`False`.

    .. csv-table::
        :header: name, shape, dtype, format

        :obj:`pred_masks`, ":math:`[(R, H, W)]`", :obj:`bool`, --
        :obj:`pred_labels`, ":math:`[(R,)]`", :obj:`int32`, \
        ":math:`[0, \#fg\_class - 1]`"
        :obj:`pred_scores`, ":math:`[(R,)]`", :obj:`float32`, \
        --
        :obj:`gt_masks`, ":math:`[(R, H, W)]`", :obj:`bool`, --
        :obj:`gt_labels`, ":math:`[(R,)]`", :obj:`int32`, \
        ":math:`[0, \#fg\_class - 1]`"

    Returns:
        dict:

        The keys, value-types and the description of the values are listed
        below.

        * **ap** (*numpy.ndarray*): An array of average precisions. \
            The :math:`l`-th value corresponds to the average precision \
            for class :math:`l`. If class :math:`l` does not exist in \
            either :obj:`pred_labels` or :obj:`gt_labels`, the corresponding \
            value is set to :obj:`numpy.nan`.
        * **map** (*float*): The average of Average Precisions over classes.

    """

    prec, rec = calc_instance_segmentation_voc_prec_rec(
        pred_masks, pred_labels, pred_scores, gt_masks, gt_labels, iou_thresh)

    ap = calc_detection_voc_ap(prec, rec, use_07_metric=use_07_metric)

    return {'ap': ap, 'map': np.nanmean(ap)}
Exemple #2
0
def eval_instance_segmentation_voc(pred_masks,
                                   pred_labels,
                                   pred_scores,
                                   gt_masks,
                                   gt_labels,
                                   iou_thresh=0.5,
                                   use_07_metric=False,
                                   n_pos=None,
                                   score=None,
                                   match=None):

    n_pos, score, match = cal_running_instance(pred_masks, pred_labels,
                                               pred_scores, gt_masks,
                                               gt_labels, iou_thresh, n_pos,
                                               score, match)

    prec, rec = calc_instance_segmentation_voc_prec_rec(n_pos, score, match)

    ap = calc_detection_voc_ap(prec, rec, use_07_metric=use_07_metric)

    return {
        'ap': ap,
        'map': np.nanmean(ap),
        "n_pos": n_pos,
        "score": score,
        "match": match
    }
Exemple #3
0
def eval_instseg_voc(pred_masks,
                     pred_labels,
                     pred_scores,
                     gt_masks,
                     gt_labels,
                     gt_difficults=None,
                     iou_thresh=0.5,
                     use_07_metric=False):
    assert not use_07_metric

    prec, rec, sq, dq, pq = calc_instseg_voc_prec_rec(pred_masks,
                                                      pred_labels,
                                                      pred_scores,
                                                      gt_masks,
                                                      gt_labels,
                                                      gt_difficults,
                                                      iou_thresh=iou_thresh)

    ap = calc_detection_voc_ap(prec, rec, use_07_metric=use_07_metric)

    return {
        'map': np.nanmean(ap),
        'msq': np.nanmean(np.mean(sq, axis=1)),
        'mdq': np.nanmean(dq),
        'mpq': np.nanmean(pq),
        'msq/vis': np.nanmean(sq[:, 0]),
        'msq/occ': np.nanmean(sq[:, 1]),
    }
def eval_instance_segmentation_voc(
        generator, iou_threshs=(0.5, 0.7), use_07_metric=False):

    prec, rec = calc_instseg_voc_prec_rec(generator, iou_threshs)

    ap = {}
    for iou_thresh in iou_threshs:
        ap_thresh = calc_detection_voc_ap(
            prec[iou_thresh], rec[iou_thresh], use_07_metric=use_07_metric)
        ap['ap{}'.format(iou_thresh)] = ap_thresh
        ap['map{}'.format(iou_thresh)] = np.nanmean(ap_thresh)

    return ap
def eval_instseg_voc(
        pred_masks, pred_labels, pred_scores, gt_masks, gt_labels,
        gt_difficults=None,
        iou_thresh=0.5, use_07_metric=False):

    prec, rec = calc_instseg_voc_prec_rec(
        pred_masks, pred_labels, pred_scores,
        gt_masks, gt_labels, gt_difficults,
        iou_thresh=iou_thresh)

    ap = calc_detection_voc_ap(prec, rec, use_07_metric=use_07_metric)

    return {'ap': ap, 'map': np.nanmean(ap)}
    def test_calc_detection_voc_ap(self):
        ap = calc_detection_voc_ap(
            self.prec, self.rec, use_07_metric=self.use_07_metric)

        np.testing.assert_almost_equal(ap, self.ap)
Exemple #7
0
    def test_calc_detection_voc_ap(self):
        ap = calc_detection_voc_ap(self.prec,
                                   self.rec,
                                   use_07_metric=self.use_07_metric)

        np.testing.assert_almost_equal(ap, self.ap)
Exemple #8
0
def eval_multi_label_classification(pred_labels, pred_scores, gt_labels):
    prec, rec = calc_multi_label_classification_prec_rec(
        pred_labels, pred_scores, gt_labels)
    ap = calc_detection_voc_ap(prec, rec)
    return {'ap': ap, 'map': np.nanmean(ap)}
def eval_instance_segmentation_voc(pred_masks,
                                   pred_labels,
                                   pred_scores,
                                   gt_masks,
                                   gt_labels,
                                   iou_thresh=0.5,
                                   use_07_metric=False):
    """Calculate average precisions based on evaluation code of PASCAL VOC.

    This function evaluates predicted masks obtained from a dataset
    which has :math:`N` images by using average precision for each class.
    The code is based on the evaluation code used in `FCIS`_.

    .. _`FCIS`: https://arxiv.org/abs/1611.07709

    Args:
        pred_masks (iterable of numpy.ndarray): An iterable of :math:`N`
            sets of masks. Its index corresponds to an index for the base
            dataset. Each element of :obj:`pred_masks` is an object mask
            and is an array whose shape is :math:`(R, H, W)`,
            where :math:`R` corresponds
            to the number of masks, which may vary among images.
        pred_labels (iterable of numpy.ndarray): An iterable of labels.
            Similar to :obj:`pred_masks`, its index corresponds to an
            index for the base dataset. Its length is :math:`N`.
        pred_scores (iterable of numpy.ndarray): An iterable of confidence
            scores for predicted masks. Similar to :obj:`pred_masks`,
            its index corresponds to an index for the base dataset.
            Its length is :math:`N`.
        gt_masks (iterable of numpy.ndarray): An iterable of ground truth
            masks whose length is :math:`N`. An element of :obj:`gt_masks` is
            an object mask whose shape is :math:`(R, H, W)`. Note that the
            number of masks :math:`R` in each image does not need to be
            same as the number of corresponding predicted masks.
        gt_labels (iterable of numpy.ndarray): An iterable of ground truth
            labels which are organized similarly to :obj:`gt_masks`. Its
            length is :math:`N`.
        iou_thresh (float): A prediction is correct if its Intersection over
            Union with the ground truth is above this value.
        use_07_metric (bool): Whether to use PASCAL VOC 2007 evaluation metric
            for calculating average precision. The default value is
            :obj:`False`.

    Returns:
        dict:

        The keys, value-types and the description of the values are listed
        below.

        * **ap** (*numpy.ndarray*): An array of average precisions. \
            The :math:`l`-th value corresponds to the average precision \
            for class :math:`l`. If class :math:`l` does not exist in \
            either :obj:`pred_labels` or :obj:`gt_labels`, the corresponding \
            value is set to :obj:`numpy.nan`.
        * **map** (*float*): The average of Average Precisions over classes.

    """

    prec, rec = calc_instance_segmentation_voc_prec_rec(
        pred_masks, pred_labels, pred_scores, gt_masks, gt_labels, iou_thresh)

    ap = calc_detection_voc_ap(prec, rec, use_07_metric=use_07_metric)

    return {'ap': ap, 'map': np.nanmean(ap)}
def eval_multi_label_classification(
        pred_labels, pred_scores, gt_labels):
    prec, rec = calc_multi_label_classification_prec_rec(
        pred_labels, pred_scores, gt_labels)
    ap = calc_detection_voc_ap(prec, rec)
    return {'ap': ap, 'map': np.nanmean(ap)}