def test_metrics_dict_with_default_hue() -> None: hue_name = "foo" metrics_dict = MetricsDict(hues=[hue_name, MetricsDict.DEFAULT_HUE_KEY]) assert metrics_dict.get_hue_names(include_default=True) == [ hue_name, MetricsDict.DEFAULT_HUE_KEY ] assert metrics_dict.get_hue_names(include_default=False) == [hue_name]
def test_metrics_dict_get_hues() -> None: """ Test to make sure metrics dict is configured properly with/without hues """ m = MetricsDict() assert m.get_hue_names() == [MetricsDict.DEFAULT_HUE_KEY] assert m.get_hue_names(include_default=False) == [] _hues = ["A", "B", "C"] m = MetricsDict(hues=_hues) assert m.get_hue_names() == _hues + [MetricsDict.DEFAULT_HUE_KEY] assert m.get_hue_names(include_default=False) == _hues
def test_metrics_dict1() -> None: """ Test insertion of scalar values into a MetricsDict. """ m = MetricsDict() assert m.get_hue_names() == [MetricsDict.DEFAULT_HUE_KEY] name = "foo" v1 = 2.7 v2 = 3.14 m.add_metric(name, v1) m.add_metric(name, v2) assert m.values()[name] == [v1, v2] with pytest.raises(ValueError) as ex: # noinspection PyTypeChecker m.add_metric(name, [1.0]) # type: ignore assert "Expected the metric to be a scalar" in str(ex) assert m.skip_nan_when_averaging[name] is False v3 = 3.0 name2 = "bar" m.add_metric(name2, v3, skip_nan_when_averaging=True) assert m.skip_nan_when_averaging[name2] is True # Expected average: Metric "foo" averages over two values v1 and v2. For "bar", we only inserted one value anyhow average = m.average() mean_v1_v2 = mean([v1, v2]) assert average.values() == {name: [mean_v1_v2], name2: [v3]} num_entries = m.num_entries() assert num_entries == {name: 2, name2: 1}
def test_delete_hue() -> None: h1 = "a" h2 = "b" a = MetricsDict(hues=[h1, h2]) a.add_metric("foo", 1.0, hue=h1) a.add_metric("bar", 2.0, hue=h2) a.delete_hue(h1) assert a.get_hue_names(include_default=False) == [h2] assert list(a.enumerate_single_values()) == [(h2, "bar", 2.0)]
def add_average_foreground_dice(metrics: MetricsDict) -> None: """ If the given metrics dictionary contains an entry for Dice score, and only one value for the Dice score per class, then add an average Dice score for all foreground classes to the metrics dictionary (modified in place). :param metrics: The object that holds metrics. The average Dice score will be written back into this object. """ all_dice = [] for structure_name in metrics.get_hue_names(include_default=False): if structure_name != BACKGROUND_CLASS_NAME: all_dice.append(metrics.get_single_metric(MetricType.DICE, hue=structure_name)) metrics.add_metric(MetricType.DICE, np.nanmean(all_dice).item())