def default_metrics(self): return [ metrics.Accuracy(), metrics.CrossEntropy(), metrics.MacroPrecision(), metrics.MacroRecall(), metrics.MacroF1(), metrics.MicroPrecision(), metrics.MicroRecall(), metrics.MicroF1() ]
import math import pytest from creme import metrics from sklearn import metrics as sk_metrics @pytest.mark.parametrize( 'metric, sk_metric, y_true, y_pred', [(metrics.Precision(), sk_metrics.precision_score, [True, False, True, True, True], [True, True, False, True, True]), (metrics.MacroPrecision(), functools.partial(sk_metrics.precision_score, average='macro'), [0, 1, 2, 2, 2], [0, 0, 2, 2, 1]), (metrics.MicroPrecision(), functools.partial(sk_metrics.precision_score, average='micro'), [0, 1, 2, 2, 2], [0, 0, 2, 2, 1]), (metrics.Recall(), sk_metrics.recall_score, [True, False, True, True, True], [True, True, False, True, True]), (metrics.MacroRecall(), functools.partial(sk_metrics.recall_score, average='macro'), [0, 1, 2, 2, 2], [0, 0, 2, 2, 1]), (metrics.MicroRecall(), functools.partial(sk_metrics.recall_score, average='micro'), [0, 1, 2, 2, 2], [0, 0, 2, 2, 1]), (metrics.FBeta(beta=0.5), functools.partial(sk_metrics.fbeta_score, beta=0.5), [True, False, True, True, True], [True, True, False, True, True]), (metrics.MacroFBeta(beta=0.5), functools.partial(sk_metrics.fbeta_score, beta=0.5,
y_pred = [np.random.dirichlet(np.ones(3)).tolist() for _ in range(n)] yield y_true, y_pred, sample_weights if isinstance(metric, base.RegressionMetric): yield ( [random.random() for _ in range(n)], [random.random() for _ in range(n)], sample_weights ) TEST_CASES = [ (metrics.Accuracy(), sk_metrics.accuracy_score), (metrics.Precision(), sk_metrics.precision_score), (metrics.MacroPrecision(), functools.partial(sk_metrics.precision_score, average='macro')), (metrics.MicroPrecision(), functools.partial(sk_metrics.precision_score, average='micro')), (metrics.WeightedPrecision(), functools.partial(sk_metrics.precision_score, average='weighted')), (metrics.Recall(), sk_metrics.recall_score), (metrics.MacroRecall(), functools.partial(sk_metrics.recall_score, average='macro')), (metrics.MicroRecall(), functools.partial(sk_metrics.recall_score, average='micro')), (metrics.WeightedRecall(), functools.partial(sk_metrics.recall_score, average='weighted')), (metrics.FBeta(beta=.5), functools.partial(sk_metrics.fbeta_score, beta=.5)), (metrics.MacroFBeta(beta=.5), functools.partial(sk_metrics.fbeta_score, beta=.5, average='macro')), (metrics.MicroFBeta(beta=.5), functools.partial(sk_metrics.fbeta_score, beta=.5, average='micro')), (metrics.WeightedFBeta(beta=.5), functools.partial(sk_metrics.fbeta_score, beta=.5, average='weighted')), (metrics.F1(), sk_metrics.f1_score), (metrics.MacroF1(), functools.partial(sk_metrics.f1_score, average='macro')), (metrics.MicroF1(), functools.partial(sk_metrics.f1_score, average='micro')), (metrics.WeightedF1(), functools.partial(sk_metrics.f1_score, average='weighted')), (metrics.MCC(), sk_metrics.matthews_corrcoef), (metrics.MAE(), sk_metrics.mean_absolute_error),