Ejemplo n.º 1
0
def test_multilabel_accuracy_mean(
    outputs_list: Iterable[torch.Tensor],
    targets_list: Iterable[torch.Tensor],
    thresholds: Union[float, torch.Tensor],
    true_values_list: Iterable[float],
) -> None:
    """
    This test checks that all the intermediate metrics values are correct during accumulation.

    Args:
        outputs_list: list of output tensors
        targets_list: list of true answer tensors
        thresholds: threshold
        true_values_list: true intermediate metric results
    """
    metric = MultilabelAccuracyMetric(threshold=thresholds)
    for outputs, targets, true_value in zip(outputs_list, targets_list,
                                            true_values_list):
        metric.update(outputs=outputs, targets=targets)
        mean, _ = metric.compute()
        assert np.isclose(mean, true_value)
Ejemplo n.º 2
0
def test_multilabel_accuracy_std(
    outputs_list: Iterable[torch.Tensor],
    targets_list: Iterable[torch.Tensor],
    thresholds: Union[float, torch.Tensor],
    true_values_list: Iterable[float],
) -> None:
    """
    This test checks that all the intermediate metrics values are correct during accumulation.
    Note that now `accuracy/std` is not std exactly so it can fail if you fix it.

    Args:
        outputs_list: list of output tensors
        targets_list: list of true answer tensors
        thresholds: threshold
        true_values_list: true intermediate metric results
    """
    metric = MultilabelAccuracyMetric(threshold=thresholds)
    for outputs, targets, true_value in zip(outputs_list, targets_list,
                                            true_values_list):
        metric.update(outputs=outputs, targets=targets)
        _, std = metric.compute()
        assert np.isclose(std, true_value)