def test_build_unsupported_metrics(metric, dataset_type): """ Tests build_uncertainty_evaluator function's unsupported error for unknown metric strings. """ with pytest.raises(NotImplementedError): build_uncertainty_evaluator(metric, None, None, dataset_type, None, None)
def test_build_classification_metric(classification_metric): """ Tests the build_uncertainty_evaluator function's acceptance of the different classification evaluators. """ assert build_uncertainty_evaluator(classification_metric, None, None, "classification", None, None)
def test_build_multiclass_metric(multiclass_metric): """ Tests the build_uncertainty_evaluator function's acceptance of the different multiclass evaluators. """ assert build_uncertainty_evaluator(multiclass_metric, None, None, "multiclass", None, None)
def test_build_regression_metric(regression_metric): """ Tests the build_uncertainty_evaluator function's acceptance of the different regression evaluators. """ assert build_uncertainty_evaluator(regression_metric, None, None, "regression", None, None)
def spearman_evaluator(): return build_uncertainty_evaluator("spearman", None, "ensemble", "regression", "mse", None)
def miscal_regression_evaluator(): return build_uncertainty_evaluator("miscalibration_area", None, "ensemble", "regression", "mse", None)
def nll_classification_evaluator(): return build_uncertainty_evaluator("nll", None, "classification", "classification", "binary_cross_entropy", None)
def nll_regression_evaluator(): return build_uncertainty_evaluator("nll", None, "ensemble", "regression", "mse", None)