def print_eval(pred: pd.DataFrame, name: str, log: Dict) -> None:
    """ Prints evaluation metrices to StdOut.

    Most side-effecting output of evaluation metrices (Precision, Recall &
    F1-Score.

    Parameters
    ----------
    pred : pd.DataFrame
        Predicted values
    name : str
        Model name
    log : Dict
        Model training log

    Returns
    -------
    None

    """

    print("Model: {}".format(name))
    print("Parameters",
          log["nlp_logistic_classification_learner"]["parameters"])
    precision = precision_evaluator(pred)
    print("Precision", precision["precision_evaluator__target"])
    recall = recall_evaluator(pred)
    print("Recall", recall["recall_evaluator__target"])
    f1_score = fbeta_score_evaluator(pred)
    print("F1 Score", f1_score["fbeta_evaluator__target"])
    print("Training time",
          log["nlp_logistic_classification_learner"]["running_time"], "\n")
Exemplo n.º 2
0
def test_recall_evaluator():
    predictions = pd.DataFrame({
        'target': [0, 1, 0, 1],
        'prediction': [.2, .9, .3, .3]
    })

    eval_fn = recall_evaluator(prediction_column="prediction",
                               threshold=0.5,
                               target_column="target",
                               eval_name="eval_name")

    result = eval_fn(predictions)

    assert result["eval_name"] == 0.5