예제 #1
0
def test_encode_util(values, expected):
    uniques = _encode(values)
    assert_array_equal(uniques, expected)
    uniques, encoded = _encode(values, encode=True)
    assert_array_equal(uniques, expected)
    assert_array_equal(encoded, np.array([1, 0, 2, 0, 2]))
    _, encoded = _encode(values, uniques, encode=True)
    assert_array_equal(encoded, np.array([1, 0, 2, 0, 2]))
예제 #2
0
def test_encode_check_unknown():
    # test for the check_unknown parameter of _encode()
    uniques = np.array([1, 2, 3])
    values = np.array([1, 2, 3, 4])

    # Default is True, raise error
    with pytest.raises(ValueError,
                       match='y contains previously unseen labels'):
        _encode(values, uniques, encode=True, check_unknown=True)

    # dont raise error if False
    _encode(values, uniques, encode=True, check_unknown=False)

    # parameter is ignored for object dtype
    uniques = np.array(['a', 'b', 'c'], dtype=object)
    values = np.array(['a', 'b', 'c', 'd'], dtype=object)
    with pytest.raises(ValueError,
                       match='y contains previously unseen labels'):
        _encode(values, uniques, encode=True, check_unknown=False)
예제 #3
0
def multiclass_roc_auc_score(
    y_true,
    y_score,
    labels,
    multi_class,
    average,
    sample_weight=None,
    invalid_proba_tolerance: float = 1e-6,
):
    """Multiclass roc auc score (copied from sklearn)

    Parameters
    ----------
    y_true : array-like of shape (n_samples,)
        True multiclass labels.

    y_score : array-like of shape (n_samples, n_classes)
        Target scores corresponding to probability estimates of a sample
        belonging to a particular class

    labels : array, shape = [n_classes] or None, optional (default=None)
        List of labels to index ``y_score`` used for multiclass. If ``None``,
        the lexical order of ``y_true`` is used to index ``y_score``.

    multi_class : string, 'ovr' or 'ovo'
        Determines the type of multiclass configuration to use.
        ``'ovr'``:
            Calculate metrics for the multiclass case using the one-vs-rest
            approach.
        ``'ovo'``:
            Calculate metrics for the multiclass case using the one-vs-one
            approach.

    average : 'macro' or 'weighted', optional (default='macro')
        Determines the type of averaging performed on the pairwise binary
        metric scores
        ``'macro'``:
            Calculate metrics for each label, and find their unweighted
            mean. This does not take label imbalance into account. Classes
            are assumed to be uniformly distributed.
        ``'weighted'``:
            Calculate metrics for each label, taking into account the
            prevalence of the classes.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    :param invalid_proba_tolerance: float in [0, 1]
        The proportion of samples that can eventually be ignored if their class scores do not sum up to 1.
    """
    # validation of the input y_score
    are_close = np.isclose(1, y_score.sum(axis=1))

    # I added this try-except to deal with cases where a very small amount of voxels have an issue
    # to sum the probabilities to 1, which might happen (probably, i suppose) because I use float16 instead of 64
    try:
        if not np.all(are_close):
            raise ValueError(
                "Target scores need to be probabilities for multiclass "
                "roc_auc, i.e. they should sum up to 1.0 over classes")

    except ValueError as ex:

        logger.exception(ex)

        assert 0 <= invalid_proba_tolerance <= 1, f"{invalid_proba_tolerance=}"

        nsamples_not_close = int((~are_close).sum())
        percentage_samples_not_close = nsamples_not_close / are_close.size

        logger.warning(
            f"{nsamples_not_close=} ({percentage_samples_not_close=:.7%})")

        if percentage_samples_not_close > invalid_proba_tolerance:
            raise ValueError(
                f"Too many samples are not close 1 {nsamples_not_close=} {percentage_samples_not_close=:.7%} {invalid_proba_tolerance=:.7%}."
            )

        else:
            logger.warning(
                f"The amount of probabilities not summing up to 1 will be tolerated "
                f"{percentage_samples_not_close=:.7%} {invalid_proba_tolerance=:.7%}. "
                f"The bad samples will be ignored!")

            y_true = y_true[are_close]
            y_score = y_score[are_close, :]

    # validation for multiclass parameter specifications
    average_options = ("macro", "weighted")
    if average not in average_options:
        raise ValueError("average must be one of {0} for "
                         "multiclass problems".format(average_options))

    multiclass_options = ("ovo", "ovr")
    if multi_class not in multiclass_options:
        raise ValueError("multi_class='{0}' is not supported "
                         "for multiclass ROC AUC, multi_class must be "
                         "in {1}".format(multi_class, multiclass_options))

    from sklearn.utils import column_or_1d
    from sklearn.preprocessing._label import _encode
    from sklearn.metrics._base import _average_multiclass_ovo_score
    from sklearn.preprocessing import label_binarize
    from sklearn.metrics._ranking import _binary_roc_auc_score
    from sklearn.metrics._base import _average_binary_score

    if labels is not None:
        labels = column_or_1d(labels)
        classes = _encode(labels)
        if len(classes) != len(labels):
            raise ValueError("Parameter 'labels' must be unique")
        if not np.array_equal(classes, labels):
            raise ValueError("Parameter 'labels' must be ordered")
        if len(classes) != y_score.shape[1]:
            raise ValueError(
                "Number of given labels, {0}, not equal to the number "
                "of columns in 'y_score', {1}".format(len(classes),
                                                      y_score.shape[1]))
        if len(np.setdiff1d(y_true, classes)):
            raise ValueError(
                "'y_true' contains labels not in parameter 'labels'")
    else:
        classes = _encode(y_true)
        if len(classes) != y_score.shape[1]:
            raise ValueError(
                "Number of classes in y_true not equal to the number of "
                "columns in 'y_score'")

    if multi_class == "ovo":
        if sample_weight is not None:
            raise ValueError("sample_weight is not supported "
                             "for multiclass one-vs-one ROC AUC, "
                             "'sample_weight' must be None in this case.")
        _, y_true_encoded = _encode(y_true, uniques=classes, encode=True)
        # Hand & Till (2001) implementation (ovo)
        return _average_multiclass_ovo_score(_binary_roc_auc_score,
                                             y_true_encoded,
                                             y_score,
                                             average=average)
    else:
        # ovr is same as multi-label
        y_true_multilabel = label_binarize(y_true, classes=classes)
        return _average_binary_score(_binary_roc_auc_score,
                                     y_true_multilabel,
                                     y_score,
                                     average,
                                     sample_weight=sample_weight)