Example #1
0
    def evaluate(self,
                 results,
                 metric='hmean-iou',
                 score_thr=None,
                 min_score_thr=0.3,
                 max_score_thr=0.9,
                 step=0.1,
                 rank_list=None,
                 logger=None,
                 **kwargs):
        """Evaluate the dataset.

        Args:
            results (list): Testing results of the dataset.
            metric (str | list[str]): Metrics to be evaluated.
            score_thr (float): Deprecated. Please use min_score_thr instead.
            min_score_thr (float): Minimum score threshold of prediction map.
            max_score_thr (float): Maximum score threshold of prediction map.
            step (float): The spacing between score thresholds.
            logger (logging.Logger | str | None): Logger used for printing
                related information during evaluation. Default: None.
            rank_list (str): json file used to save eval result
                of each image after ranking.
        Returns:
            dict[str: float]
        """
        metrics = metric if isinstance(metric, list) else [metric]
        allowed_metrics = ['hmean-iou', 'hmean-ic13']
        metrics = set(metrics) & set(allowed_metrics)

        img_infos = []
        ann_infos = []
        for i in range(len(self)):
            img_ann_info = self.data_infos[i]
            img_info = {'filename': img_ann_info['file_name']}
            ann_info = self._parse_anno_info(img_ann_info['annotations'])
            img_infos.append(img_info)
            ann_infos.append(ann_info)

        eval_results = eval_hmean(results,
                                  img_infos,
                                  ann_infos,
                                  metrics=metrics,
                                  score_thr=score_thr,
                                  min_score_thr=min_score_thr,
                                  max_score_thr=max_score_thr,
                                  step=step,
                                  logger=logger,
                                  rank_list=rank_list)

        return eval_results
Example #2
0
def test_eval_hmean():
    metrics = set(['hmean-iou', 'hmean-ic13'])
    results = [{
        'boundary_result': [[50, 70, 80, 70, 80, 100, 50, 100, 1],
                            [120, 140, 200, 140, 200, 200, 120, 200, 1]]
    }]

    img_infos = [{'file_name': 'sample1.jpg'}]
    ann_infos = _create_dummy_ann_infos()

    # test invalid arguments
    with pytest.raises(AssertionError):
        eval_hmean(results, [[]], ann_infos, metrics=metrics)
    with pytest.raises(AssertionError):
        eval_hmean(results, img_infos, [[]], metrics=metrics)
    with pytest.raises(AssertionError):
        eval_hmean([[]], img_infos, ann_infos, metrics=metrics)
    with pytest.raises(AssertionError):
        eval_hmean(results, img_infos, ann_infos, metrics='hmean-iou')

    eval_results = eval_hmean(results, img_infos, ann_infos, metrics=metrics)

    assert eval_results['hmean-iou:hmean'] == 1
    assert eval_results['hmean-ic13:hmean'] == 1
Example #3
0
    def evaluate(self,
                 results,
                 metric='hmean-iou',
                 logger=None,
                 score_thr=0.3,
                 rank_list=None,
                 **kwargs):
        """Evaluate the hmean metric.

        Args:
            results (list[dict]): Testing results of the dataset.
            metric (str | list[str]): Metrics to be evaluated.
            logger (logging.Logger | str | None): Logger used for printing
                related information during evaluation. Default: None.
            rank_list (str): json file used to save eval result
                of each image after ranking.
        Returns:
            dict[dict[str: float]]: The evaluation results.
        """
        assert utils.is_type_list(results, dict)

        metrics = metric if isinstance(metric, list) else [metric]
        allowed_metrics = ['hmean-iou', 'hmean-ic13']
        metrics = set(metrics) & set(allowed_metrics)

        img_infos = []
        ann_infos = []
        for i in range(len(self)):
            img_info = {'filename': self.data_infos[i]['file_name']}
            img_infos.append(img_info)
            ann_infos.append(self.get_ann_info(i))

        eval_results = eval_hmean(
            results,
            img_infos,
            ann_infos,
            metrics=metrics,
            score_thr=score_thr,
            logger=logger,
            rank_list=rank_list)

        return eval_results