data_set.predictions = predict(data_set, model)

# calculate metrics
from mercury_ml.common import CustomMetrics
from mercury_ml.common.tasks import evaluate_metrics
custom_metrics = ["evaluate_numpy_micro_auc", "evaluate_numpy_macro_auc"]
custom_metrics_dict = {
    custom_metric_name: getattr(CustomMetrics, custom_metric_name)
    for custom_metric_name in custom_metrics
}
print("Metric functions")
for metric_name, metric_function in custom_metrics_dict.items():
    print(metric_name, metric_function)
print("")

metrics = evaluate_metrics(data_set, custom_metrics_dict)

import json
print("Metrics:")
print(json.dumps(metrics, indent=2), "\n")

# calculate label metrics
from mercury_ml.common import CustomLabelMetrics
from mercury_ml.common.tasks import evaluate_label_metrics
custom_label_metrics = ["evaluate_numpy_auc", "evaluate_numpy_accuracy"]
custom_label_metrics_dict = {
    custom_label_metric_name: getattr(CustomLabelMetrics,
                                      custom_label_metric_name)
    for custom_label_metric_name in custom_label_metrics
}
print("Label Metric functions")
Пример #2
0
print("Data transformed with following parameters: \n")
print_dict(config["exec"]["evaluate_custom_metrics"].get(
    "pre_execution_transformation"))

print("Transformed data_bunch consists of: \n")
print_data_bunch(data_bunch_custom_metrics)

# ##### Calculate custom metrics
#

custom_metrics = {}
for data_set_name in config["exec"]["evaluate_custom_metrics"][
        "data_set_names"]:
    data_set = getattr(data_bunch_custom_metrics, data_set_name)
    custom_metrics[data_set_name] = tasks.evaluate_metrics(
        data_set, custom_metrics_dict)

print("Resulting custom metrics: \n")
print_dict(custom_metrics)

# ##### Calculate custom label metrics

custom_label_metrics = {}
for data_set_name in config["exec"]["evaluate_custom_label_metrics"][
        "data_set_names"]:
    data_set = getattr(data_bunch_custom_metrics, data_set_name)
    custom_label_metrics[data_set_name] = tasks.evaluate_label_metrics(
        data_set, custom_label_metrics_dict)

print("Resulting custom label metrics: \n")
print_dict(custom_label_metrics)
Пример #3
0
def test_evaluate_metrics(data_set, custom_metrics_dict, expected_dict):
    metrics = evaluate_metrics(data_set, custom_metrics_dict)
    for metric_name, expected_value in expected_dict.items():
        assert metrics[metric_name] == expected_value