Ejemplo n.º 1
0
def rating_metrics_pyspark(test, predictions):
    rating_eval = SparkRatingEvaluation(test, predictions, **COL_DICT)
    return {
        "RMSE": rating_eval.rmse(),
        "MAE": rating_eval.mae(),
        "R2": rating_eval.exp_var(),
        "Explained Variance": rating_eval.rsquared()
    }
Ejemplo n.º 2
0
def rating_metrics_pyspark(test, predictions):
    rating_eval = SparkRatingEvaluation(test, predictions, **COL_DICT)
    return {
        "RMSE": rating_eval.rmse(),
        "MAE": rating_eval.mae(),
        "R2": rating_eval.exp_var(),
        "Explained Variance": rating_eval.rsquared(),
    }
Ejemplo n.º 3
0
def test_spark_exp_var(spark_data, target_metrics):
    df_true, df_pred = spark_data

    evaluator1 = SparkRatingEvaluation(df_true, df_true, col_prediction="rating")
    assert evaluator1.exp_var() == pytest.approx(1.0, TOL)

    evaluator2 = SparkRatingEvaluation(df_true, df_pred)
    assert evaluator2.exp_var() == target_metrics["exp_var"]
Ejemplo n.º 4
0
def test_spark_mae(spark_data, target_metrics):
    df_true, df_pred = spark_data

    evaluator1 = SparkRatingEvaluation(df_true, df_true, col_prediction="rating")
    assert evaluator1.mae() == 0

    evaluator2 = SparkRatingEvaluation(df_true, df_pred)
    assert evaluator2.mae() == target_metrics["mae"]
Ejemplo n.º 5
0
def test_init_spark_rating_eval(spark_data):
    df_true, df_pred = spark_data
    evaluator = SparkRatingEvaluation(df_true, df_pred)

    assert evaluator is not None