def test_python_errors(python_data):
    rating_true, rating_pred, _ = python_data(binary_rating=False)

    with pytest.raises(ValueError):
        rmse(rating_true, rating_true, col_user="******")

    with pytest.raises(ValueError):
        mae(rating_pred,
            rating_pred,
            col_rating=PREDICTION_COL,
            col_user="******")

    with pytest.raises(ValueError):
        rsquared(rating_true, rating_pred, col_item="not_item")

    with pytest.raises(ValueError):
        exp_var(rating_pred,
                rating_pred,
                col_rating=PREDICTION_COL,
                col_item="not_item")

    with pytest.raises(ValueError):
        precision_at_k(rating_true, rating_pred, col_rating="not_rating")

    with pytest.raises(ValueError):
        recall_at_k(rating_true, rating_pred, col_prediction="not_prediction")

    with pytest.raises(ValueError):
        ndcg_at_k(rating_true, rating_true, col_user="******")

    with pytest.raises(ValueError):
        map_at_k(rating_pred,
                 rating_pred,
                 col_rating=PREDICTION_COL,
                 col_user="******")
Пример #2
0
def test_python_mae(rating_true, rating_pred):
    assert (mae(
        rating_true=rating_true,
        rating_pred=rating_true,
        col_prediction=DEFAULT_RATING_COL,
    ) == 0)
    assert mae(rating_true, rating_pred) == pytest.approx(6.375, TOL)
Пример #3
0
def test_python_errors(rating_true, rating_pred):
    with pytest.raises(ValueError):
        rmse(rating_true, rating_true, col_user="******")

    with pytest.raises(ValueError):
        mae(rating_pred,
            rating_pred,
            col_rating=DEFAULT_PREDICTION_COL,
            col_user="******")

    with pytest.raises(ValueError):
        rsquared(rating_true, rating_pred, col_item="not_item")

    with pytest.raises(ValueError):
        exp_var(rating_pred,
                rating_pred,
                col_rating=DEFAULT_PREDICTION_COL,
                col_item="not_item")

    with pytest.raises(ValueError):
        precision_at_k(rating_true, rating_pred, col_rating="not_rating")

    with pytest.raises(ValueError):
        recall_at_k(rating_true, rating_pred, col_prediction="not_prediction")

    with pytest.raises(ValueError):
        ndcg_at_k(rating_true, rating_true, col_user="******")

    with pytest.raises(ValueError):
        map_at_k(rating_pred,
                 rating_pred,
                 col_rating=DEFAULT_PREDICTION_COL,
                 col_user="******")
def test_python_errors(rating_true, rating_pred):
    with pytest.raises(ValueError):
        rmse(rating_true, rating_true, col_user="******")

    with pytest.raises(ValueError):
        mae(rating_pred, rating_pred, col_rating=DEFAULT_PREDICTION_COL, col_user="******")

    with pytest.raises(ValueError):
        rsquared(rating_true, rating_pred, col_item="not_item")

    with pytest.raises(ValueError):
        exp_var(
            rating_pred, rating_pred, col_rating=DEFAULT_PREDICTION_COL, col_item="not_item"
        )

    with pytest.raises(ValueError):
        precision_at_k(rating_true, rating_pred, col_rating="not_rating")

    with pytest.raises(ValueError):
        recall_at_k(rating_true, rating_pred, col_prediction="not_prediction")

    with pytest.raises(ValueError):
        ndcg_at_k(rating_true, rating_true, col_user="******")

    with pytest.raises(ValueError):
        map_at_k(
            rating_pred, rating_pred, col_rating=DEFAULT_PREDICTION_COL, col_user="******"
        )
Пример #5
0
def test_python_mae(python_data, target_metrics):
    rating_true, rating_pred, _ = python_data
    assert (
        mae(rating_true=rating_true, rating_pred=rating_true, col_prediction="rating")
        == 0
    )
    assert mae(rating_true, rating_pred) == target_metrics["mae"]
def test_python_mae(rating_true, rating_pred):
    assert (
        mae(
            rating_true=rating_true,
            rating_pred=rating_true,
            col_prediction=DEFAULT_RATING_COL,
        )
        == 0
    )
    assert mae(rating_true, rating_pred) == pytest.approx(6.375, TOL)
Пример #7
0
def rating_metrics_python(test, predictions):
    return {
        "RMSE": rmse(test, predictions, **COL_DICT),
        "MAE": mae(test, predictions, **COL_DICT),
        "R2": rsquared(test, predictions, **COL_DICT),
        "Explained Variance": exp_var(test, predictions, **COL_DICT),
    }
Пример #8
0
def rating_metrics_python(test, predictions):
    return {
        "RMSE": rmse(test, predictions, **COL_DICT),
        "MAE": mae(test, predictions, **COL_DICT),
        "R2": rsquared(test, predictions, **COL_DICT),
        "Explained Variance": exp_var(test, predictions, **COL_DICT)
    }
Пример #9
0
def test_predict_rating(rating_true):
    train_set = cornac.data.Dataset.from_uir(rating_true.itertuples(index=False), seed=42)
    mf = cornac.models.MF(k=100, max_iter=10000, seed=42).fit(train_set)

    preds = predict_rating(mf, rating_true)

    assert set(preds.columns) == {"userID", "itemID", "prediction"}
    assert preds["userID"].dtypes == rating_true["userID"].dtypes
    assert preds["itemID"].dtypes == rating_true["itemID"].dtypes
    assert .02 > mae(rating_true, preds)  # ~0.018
    assert .03 > rmse(rating_true, preds)  # ~0.021
def test_python_mae(python_data, target_metrics):
    rating_true, rating_pred, _ = python_data(binary_rating=False)
    assert mae(rating_true=rating_true,
               rating_pred=rating_true,
               col_prediction=DEFAULT_RATING_COL) == 0
    assert mae(rating_true, rating_pred) == target_metrics["mae"]
Пример #11
0
                                col_rating='rating',
                                k=TOP_K)
eval_recall = recall_at_k(test,
                          top_k,
                          col_user='******',
                          col_item='itemID',
                          col_rating='rating',
                          k=TOP_K)
eval_rmse = rmse(test,
                 top_k,
                 col_user='******',
                 col_item='itemID',
                 col_rating='rating')
eval_mae = mae(test,
               top_k,
               col_user='******',
               col_item='itemID',
               col_rating='rating')
eval_rsquared = rsquared(test,
                         top_k,
                         col_user='******',
                         col_item='itemID',
                         col_rating='rating')
eval_exp_var = exp_var(test,
                       top_k,
                       col_user='******',
                       col_item='itemID',
                       col_rating='rating')

positivity_threshold = 2
test_bin = test.copy()
Пример #12
0
# calculate some regression metrics
eval_r2 = rsquared(test_df,
                   scores,
                   col_user=USER,
                   col_item=ITEM,
                   col_rating=RATING,
                   col_prediction=PREDICTION)
eval_rmse = rmse(test_df,
                 scores,
                 col_user=USER,
                 col_item=ITEM,
                 col_rating=RATING,
                 col_prediction=PREDICTION)
eval_mae = mae(test_df,
               scores,
               col_user=USER,
               col_item=ITEM,
               col_rating=RATING,
               col_prediction=PREDICTION)
eval_exp_var = exp_var(test_df,
                       scores,
                       col_user=USER,
                       col_item=ITEM,
                       col_rating=RATING,
                       col_prediction=PREDICTION)

# print("Model:\t" + learn.__class__.__name__,
#       "RMSE:\t%f" % eval_rmse,
#       "MAE:\t%f" % eval_mae,
#       "Explained variance:\t%f" % eval_exp_var,
#       "R squared:\t%f" % eval_r2, sep='\n')