Ejemplo n.º 1
0
 def test_compute_sample(self, input_data, expected_value):
     params = input_data.copy()
     vals = {}
     vals["y_pred"] = params.pop("y_pred")
     vals["y"] = params.pop("y")
     metric = ConfusionMatrixMetric(**params)
     metric(**vals)
     result, _ = metric.aggregate()[0]
     np.testing.assert_allclose(result, expected_value, atol=1e-4, rtol=1e-4)
Ejemplo n.º 2
0
 def test_clf_with_nan(self, input_data, expected_value):
     params = input_data.copy()
     vals = {}
     vals["y_pred"] = params.pop("y_pred")
     vals["y"] = params.pop("y")
     metric = ConfusionMatrixMetric(**params)
     result = metric(**vals)
     np.testing.assert_allclose(result, expected_value, atol=1e-4, rtol=1e-4)
     result, _ = metric.aggregate(reduction="mean_channel")[0]
     expected_value, _ = do_metric_reduction(expected_value, "mean_channel")
     expected_value = compute_confusion_matrix_metric("tpr", expected_value)
     np.testing.assert_allclose(result, expected_value, atol=1e-4, rtol=1e-4)
Ejemplo n.º 3
0
 def test_compute_sample_multiple_metrics(self, input_data, expected_values):
     params = input_data.copy()
     vals = {}
     vals["y_pred"] = params.pop("y_pred")
     vals["y"] = params.pop("y")
     metric = ConfusionMatrixMetric(**params)
     metric(**vals)
     results = metric.aggregate()
     for idx in range(len(results)):
         result = results[idx][0]
         expected_value = expected_values[idx]
         np.testing.assert_allclose(result, expected_value, atol=1e-4, rtol=1e-4)
Ejemplo n.º 4
0
    def __init__(
        self,
        include_background: bool = True,
        metric_name: str = "hit_rate",
        output_transform: Callable = lambda x: x,
        device: Optional[torch.device] = None,
    ) -> None:
        """

        Args:
            include_background: whether to skip metric computation on the first channel of
                the predicted output. Defaults to True.
            metric_name: [``"sensitivity"``, ``"specificity"``, ``"precision"``, ``"negative predictive value"``,
                ``"miss rate"``, ``"fall out"``, ``"false discovery rate"``, ``"false omission rate"``,
                ``"prevalence threshold"``, ``"threat score"``, ``"accuracy"``, ``"balanced accuracy"``,
                ``"f1 score"``, ``"matthews correlation coefficient"``, ``"fowlkes mallows index"``,
                ``"informedness"``, ``"markedness"``]
                Some of the metrics have multiple aliases (as shown in the wikipedia page aforementioned),
                and you can also input those names instead.
            output_transform: transform the ignite.engine.state.output into [y_pred, y] pair.
            device: device specification in case of distributed computation usage.

        See also:
            :py:meth:`monai.metrics.confusion_matrix`
        """
        metric_fn = ConfusionMatrixMetric(
            include_background=include_background,
            metric_name=metric_name,
            compute_sample=False,
            reduction=MetricReduction.NONE,
        )
        self.metric_name = metric_name
        super().__init__(metric_fn=metric_fn,
                         output_transform=output_transform,
                         device=device)
 def test_clf_with_nan(self, input_data, expected_value, expected_not_nans):
     params = input_data.copy()
     vals = dict()
     vals["y_pred"] = params.pop("y_pred")
     vals["y"] = params.pop("y")
     metric = ConfusionMatrixMetric(**params)
     result, not_nans = metric(**vals)
     np.testing.assert_allclose(result, expected_value, atol=1e-4)
     np.testing.assert_allclose(not_nans, expected_not_nans, atol=1e-4)
Ejemplo n.º 6
0
    def __init__(
        self,
        include_background: bool = True,
        metric_name: str = "hit_rate",
        compute_sample: bool = False,
        reduction: Union[MetricReduction, str] = MetricReduction.MEAN,
        output_transform: Callable = lambda x: x,
        save_details: bool = True,
    ) -> None:
        """

        Args:
            include_background: whether to skip metric computation on the first channel of
                the predicted output. Defaults to True.
            metric_name: [``"sensitivity"``, ``"specificity"``, ``"precision"``, ``"negative predictive value"``,
                ``"miss rate"``, ``"fall out"``, ``"false discovery rate"``, ``"false omission rate"``,
                ``"prevalence threshold"``, ``"threat score"``, ``"accuracy"``, ``"balanced accuracy"``,
                ``"f1 score"``, ``"matthews correlation coefficient"``, ``"fowlkes mallows index"``,
                ``"informedness"``, ``"markedness"``]
                Some of the metrics have multiple aliases (as shown in the wikipedia page aforementioned),
                and you can also input those names instead.
            compute_sample: when reducing, if ``True``, each sample's metric will be computed based on each confusion matrix first.
                if ``False``, compute reduction on the confusion matrices first, defaults to ``False``.
            reduction: define the mode to reduce metrics, will only execute reduction on `not-nan` values,
                available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
                ``"mean_channel"``, ``"sum_channel"``}, default to ``"mean"``. if "none", will not do reduction.
            output_transform: callable to extract `y_pred` and `y` from `ignite.engine.state.output` then
                construct `(y_pred, y)` pair, where `y_pred` and `y` can be `batch-first` Tensors or
                lists of `channel-first` Tensors. the form of `(y_pred, y)` is required by the `update()`.
                `engine.state` and `output_transform` inherit from the ignite concept:
                https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial:
                https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb.
            save_details: whether to save metric computation details per image, for example: TP/TN/FP/FN of every image.
                default to True, will save to `engine.state.metric_details` dict with the metric name as key.

        See also:
            :py:meth:`monai.metrics.confusion_matrix`
        """
        metric_fn = ConfusionMatrixMetric(
            include_background=include_background,
            metric_name=metric_name,
            compute_sample=compute_sample,
            reduction=reduction,
        )
        self.metric_name = metric_name
        super().__init__(metric_fn=metric_fn,
                         output_transform=output_transform,
                         save_details=save_details)
Ejemplo n.º 7
0
    def __init__(
        self,
        include_background: bool = True,
        metric_name: str = "hit_rate",
        output_transform: Callable = lambda x: x,
        save_details: bool = True,
    ) -> None:
        """

        Args:
            include_background: whether to skip metric computation on the first channel of
                the predicted output. Defaults to True.
            metric_name: [``"sensitivity"``, ``"specificity"``, ``"precision"``, ``"negative predictive value"``,
                ``"miss rate"``, ``"fall out"``, ``"false discovery rate"``, ``"false omission rate"``,
                ``"prevalence threshold"``, ``"threat score"``, ``"accuracy"``, ``"balanced accuracy"``,
                ``"f1 score"``, ``"matthews correlation coefficient"``, ``"fowlkes mallows index"``,
                ``"informedness"``, ``"markedness"``]
                Some of the metrics have multiple aliases (as shown in the wikipedia page aforementioned),
                and you can also input those names instead.
            output_transform: callable to extract `y_pred` and `y` from `ignite.engine.state.output` then
                construct `(y_pred, y)` pair, where `y_pred` and `y` can be `batch-first` Tensors or
                lists of `channel-first` Tensors. the form of `(y_pred, y)` is required by the `update()`.
                for example: if `ignite.engine.state.output` is `{"pred": xxx, "label": xxx, "other": xxx}`,
                output_transform can be `lambda x: (x["pred"], x["label"])`.
            save_details: whether to save metric computation details per image, for example: TP/TN/FP/FN of every image.
                default to True, will save to `engine.state.metric_details` dict with the metric name as key.

        See also:
            :py:meth:`monai.metrics.confusion_matrix`
        """
        metric_fn = ConfusionMatrixMetric(
            include_background=include_background,
            metric_name=metric_name,
            compute_sample=False,
            reduction=MetricReduction.MEAN,
        )
        self.metric_name = metric_name
        super().__init__(
            metric_fn=metric_fn,
            output_transform=output_transform,
            save_details=save_details,
        )
Ejemplo n.º 8
0
    def __init__(
        self,
        include_background: bool = True,
        metric_name: str = "hit_rate",
        compute_sample: bool = True,
        output_class: bool = False,
        output_transform: Callable = lambda x: x,
        device: Optional[torch.device] = None,
    ) -> None:
        """

        Args:
            include_background: whether to skip metric computation on the first channel of
                the predicted output. Defaults to True.
            metric_name: [``"sensitivity"``, ``"specificity"``, ``"precision"``, ``"negative predictive value"``,
                ``"miss rate"``, ``"fall out"``, ``"false discovery rate"``, ``"false omission rate"``,
                ``"prevalence threshold"``, ``"threat score"``, ``"accuracy"``, ``"balanced accuracy"``,
                ``"f1 score"``, ``"matthews correlation coefficient"``, ``"fowlkes mallows index"``,
                ``"informedness"``, ``"markedness"``]
                Some of the metrics have multiple aliases (as shown in the wikipedia page aforementioned),
                and you can also input those names instead.
            compute_sample: if ``True``, each sample's metric will be computed first. Defaults to ``True``.
            output_class: if ``True``, scores for each class will be returned. The final result is each class's score.
                If average these scores, you will get the macro average of each class. Otherwise, the micro average score
                of each class will be returned. Defaults to ``False``.
            output_transform: transform the ignite.engine.state.output into [y_pred, y] pair.
            device: device specification in case of distributed computation usage.

        See also:
            :py:meth:`monai.metrics.confusion_matrix`
        """
        super().__init__(output_transform, device=device)
        self.confusion_matrix = ConfusionMatrixMetric(
            include_background=include_background,
            metric_name=metric_name,
            compute_sample=compute_sample,
            output_class=output_class,
        )
        self._sum = 0.0
        self._num_examples = 0