def test_compose(): with pytest.raises(ValueError): _ = metrics.MSE() + metrics.LogLoss() with pytest.raises(ValueError): _ = metrics.MSE() + metrics.MAE() + metrics.LogLoss()
def load_metrics(): """Yields all the metrics.""" for name, obj in inspect.getmembers( importlib.import_module('creme.metrics'), inspect.isclass): if issubclass(obj, metrics.PerClass): yield obj(metric=metrics.Precision()) continue elif issubclass(obj, metrics.Rolling): yield obj(metric=metrics.MSE(), window_size=42) continue elif name == 'RegressionMultiOutput': yield obj(metric=metrics.MSE()) continue try: sig = inspect.signature(obj) yield obj( **{ param.name: param.default if param.default != param.empty else 5 for param in sig.parameters.values() }) except ValueError: yield obj()
def test_compose(): metrics.MAE() + metrics.MSE() metrics.Accuracy() + metrics.LogLoss() metrics.Accuracy() + metrics.ConfusionMatrix() with pytest.raises(ValueError): _ = metrics.MSE() + metrics.LogLoss() with pytest.raises(ValueError): _ = metrics.MSE() + metrics.MAE() + metrics.LogLoss()
def load_metrics(): for name, obj in inspect.getmembers( importlib.import_module('creme.metrics'), inspect.isclass): if name == 'RegressionMultiOutput': yield obj(metric=metrics.MSE()) continue try: sig = inspect.signature(obj) yield obj(**{arg: 5 for arg in sig.parameters}) except ValueError: yield obj()
def load_metrics(): for name, obj in inspect.getmembers( importlib.import_module('creme.metrics'), inspect.isclass): if name == 'RegressionMultiOutput': yield obj(metric=metrics.MSE()) continue try: sig = inspect.signature(obj) yield obj( **{ param.name: param.default if param.default != param.empty else 5 for param in sig.parameters.values() }) except ValueError: yield obj()
(metrics.MacroFBeta(beta=.5), functools.partial(sk_metrics.fbeta_score, beta=.5, average='macro')), (metrics.MicroFBeta(beta=.5), functools.partial(sk_metrics.fbeta_score, beta=.5, average='micro')), (metrics.WeightedFBeta(beta=.5), functools.partial(sk_metrics.fbeta_score, beta=.5, average='weighted')), (metrics.F1(), sk_metrics.f1_score), (metrics.MacroF1(), functools.partial(sk_metrics.f1_score, average='macro')), (metrics.MicroF1(), functools.partial(sk_metrics.f1_score, average='micro')), (metrics.WeightedF1(), functools.partial(sk_metrics.f1_score, average='weighted')), (metrics.MCC(), sk_metrics.matthews_corrcoef), (metrics.MAE(), sk_metrics.mean_absolute_error), (metrics.MSE(), sk_metrics.mean_squared_error), ] @pytest.mark.parametrize('metric, sk_metric', TEST_CASES) @pytest.mark.filterwarnings('ignore::RuntimeWarning') @pytest.mark.filterwarnings( 'ignore::sklearn.metrics.classification.UndefinedMetricWarning') def test_metric(metric, sk_metric): # Check str works str(metric) for y_true, y_pred, sample_weights in generate_test_cases(metric=metric, n=30): m = copy.deepcopy(metric)