def test_metrics_dict_roc() -> None:
    """
    Test if adding ROC entries to a MetricsDict instance works, and returns the correct AUC.
    """
    # Prepare a vector of predictions and labels. We can compute AUC off those to compare.
    # MetricsDict will get that supplied in 3 chunks, and should return the same AUC value.
    predictions = np.array([0.5, 0.6, 0.1, 0.8, 0.2, 0.9])
    labels = np.array([0, 1.0, 0, 0, 1, 1], dtype=np.float)
    split_length = [3, 2, 1]
    assert sum(split_length) == len(predictions)
    summed = np.cumsum(split_length)
    m = MetricsDict()
    for i, end in enumerate(summed):
        start = 0 if i == 0 else summed[i - 1]
        pred = predictions[start:end]
        label = labels[start:end]
        subject_ids = list(map(str, range(len(pred))))
        m.add_predictions(subject_ids, pred, label)
    assert m.has_prediction_entries
    actual_auc = m.get_roc_auc()
    expected_auc = roc_auc_score(labels, predictions)
    assert actual_auc == pytest.approx(expected_auc, 1e-6)
    actual_pr_auc = m.get_pr_auc()
    expected_pr_auc = 0.7111111
    assert actual_pr_auc == pytest.approx(expected_pr_auc, 1e-6)
def test_metrics_dict_roc_degenerate() -> None:
    """
    Test if adding ROC entries to a MetricsDict instance works, if there is only 1 class present.
    """
    # Prepare a vector of predictions and labels. We can compute AUC off those to compare.
    # MetricsDict will get that supplied in 3 chunks, and should return the same AUC value.
    predictions = np.array([0.5, 0.6, 0.1, 0.8, 0.2, 0.9])
    m = MetricsDict()
    subject_ids = list(map(str, range(len(predictions))))
    m.add_predictions(subject_ids, predictions, np.ones_like(predictions))
    assert m.has_prediction_entries
    assert m.get_roc_auc() == 1.0
    assert m.get_pr_auc() == 1.0