Ejemplo n.º 1
0
for data_set_name in config["exec"]["evaluate_custom_metrics"][
        "data_set_names"]:
    data_set = getattr(data_bunch_custom_metrics, data_set_name)
    custom_metrics[data_set_name] = tasks.evaluate_metrics(
        data_set, custom_metrics_dict)

print("Resulting custom metrics: \n")
print_dict(custom_metrics)

# ##### Calculate custom label metrics

custom_label_metrics = {}
for data_set_name in config["exec"]["evaluate_custom_label_metrics"][
        "data_set_names"]:
    data_set = getattr(data_bunch_custom_metrics, data_set_name)
    custom_label_metrics[data_set_name] = tasks.evaluate_label_metrics(
        data_set, custom_label_metrics_dict)

print("Resulting custom label metrics: \n")
print_dict(custom_label_metrics)

for data_set_name, params in config["exec"]["save_custom_metrics"][
        "data_sets"].items():
    tasks.store_artifacts(store_artifact_locally, copy_from_local_to_remote,
                          custom_metrics[data_set_name], **params)

print("Custom metrics saved with following parameters: \n")
print_dict(config["exec"]["save_custom_metrics"])

for data_set_name, params in config["exec"]["save_custom_label_metrics"][
        "data_sets"].items():
    tasks.store_artifacts(store_artifact_locally, copy_from_local_to_remote,
Ejemplo n.º 2
0
def test_evaluate_label_metrics(data_set, label_specific_custom_metrics_dict,
                                expected_label_dict):
    metrics = evaluate_label_metrics(data_set,
                                     label_specific_custom_metrics_dict)
    for metric_name, expected_value in expected_label_dict.items():
        assert metrics[metric_name] == expected_value
Ejemplo n.º 3
0
}
print("Metric functions")
for metric_name, metric_function in custom_metrics_dict.items():
    print(metric_name, metric_function)
print("")

metrics = evaluate_metrics(data_set, custom_metrics_dict)

import json
print("Metrics:")
print(json.dumps(metrics, indent=2), "\n")

# calculate label metrics
from mercury_ml.common import CustomLabelMetrics
from mercury_ml.common.tasks import evaluate_label_metrics
custom_label_metrics = ["evaluate_numpy_auc", "evaluate_numpy_accuracy"]
custom_label_metrics_dict = {
    custom_label_metric_name: getattr(CustomLabelMetrics,
                                      custom_label_metric_name)
    for custom_label_metric_name in custom_label_metrics
}
print("Label Metric functions")
for metric_name, metric_function in custom_label_metrics_dict.items():
    print(metric_name, metric_function)
print("")

label_metrics = evaluate_label_metrics(data_set, custom_label_metrics_dict)

import json
print("Label Metrics:")
print(json.dumps(label_metrics, indent=2))
print(json_tricks.dumps(metrics, indent=2))

# + {"pycharm": {"metadata": false, "name": "#%% md\n"}, "cell_type": "markdown"}
# Next we use the mercury_ml.common.tasks API to produce cutom metric evaluations. For these, we will evaluate metrics based on Numpy calculations, and therefore need to first transform our data_bunch to Numpy:
# -

transformation_params = {
    "data_set_names": ["test"],
    "params": {
        "transform_to": "numpy",
        "data_wrapper_params": {
            "predictions": {},
            "index": {},
            "targets": {}
        }
    }
}
data_bunch_metric = data_bunch_fit.transform(**transformation_params)

print(data_bunch_metric)

custom_label_metrics_dict = {
    "evaluate_numpy_accuracy": CustomLabelMetrics.evaluate_numpy_accuracy,
    "evaluate_numpy_auc": CustomLabelMetrics.evaluate_numpy_auc
}

# + {"pycharm": {"metadata": false, "name": "#%%\n"}}
custom_label_metrics = tasks.evaluate_label_metrics(data_bunch_metric.test,
                                                    custom_label_metrics_dict)
print(json_tricks.dumps(custom_label_metrics, indent=2))