def test_python_errors(python_data):
    rating_true, rating_pred, _ = python_data(binary_rating=False)

    with pytest.raises(ValueError):
        rmse(rating_true, rating_true, col_user="******")

    with pytest.raises(ValueError):
        mae(rating_pred,
            rating_pred,
            col_rating=PREDICTION_COL,
            col_user="******")

    with pytest.raises(ValueError):
        rsquared(rating_true, rating_pred, col_item="not_item")

    with pytest.raises(ValueError):
        exp_var(rating_pred,
                rating_pred,
                col_rating=PREDICTION_COL,
                col_item="not_item")

    with pytest.raises(ValueError):
        precision_at_k(rating_true, rating_pred, col_rating="not_rating")

    with pytest.raises(ValueError):
        recall_at_k(rating_true, rating_pred, col_prediction="not_prediction")

    with pytest.raises(ValueError):
        ndcg_at_k(rating_true, rating_true, col_user="******")

    with pytest.raises(ValueError):
        map_at_k(rating_pred,
                 rating_pred,
                 col_rating=PREDICTION_COL,
                 col_user="******")
Esempio n. 2
0
def test_python_rmse(rating_true, rating_pred):
    assert (rmse(
        rating_true=rating_true,
        rating_pred=rating_true,
        col_prediction=DEFAULT_RATING_COL,
    ) == 0)
    assert rmse(rating_true, rating_pred) == pytest.approx(7.254309, TOL)
Esempio n. 3
0
def test_python_errors(rating_true, rating_pred):
    with pytest.raises(ValueError):
        rmse(rating_true, rating_true, col_user="******")

    with pytest.raises(ValueError):
        mae(rating_pred,
            rating_pred,
            col_rating=DEFAULT_PREDICTION_COL,
            col_user="******")

    with pytest.raises(ValueError):
        rsquared(rating_true, rating_pred, col_item="not_item")

    with pytest.raises(ValueError):
        exp_var(rating_pred,
                rating_pred,
                col_rating=DEFAULT_PREDICTION_COL,
                col_item="not_item")

    with pytest.raises(ValueError):
        precision_at_k(rating_true, rating_pred, col_rating="not_rating")

    with pytest.raises(ValueError):
        recall_at_k(rating_true, rating_pred, col_prediction="not_prediction")

    with pytest.raises(ValueError):
        ndcg_at_k(rating_true, rating_true, col_user="******")

    with pytest.raises(ValueError):
        map_at_k(rating_pred,
                 rating_pred,
                 col_rating=DEFAULT_PREDICTION_COL,
                 col_user="******")
def test_python_errors(rating_true, rating_pred):
    with pytest.raises(ValueError):
        rmse(rating_true, rating_true, col_user="******")

    with pytest.raises(ValueError):
        mae(rating_pred, rating_pred, col_rating=DEFAULT_PREDICTION_COL, col_user="******")

    with pytest.raises(ValueError):
        rsquared(rating_true, rating_pred, col_item="not_item")

    with pytest.raises(ValueError):
        exp_var(
            rating_pred, rating_pred, col_rating=DEFAULT_PREDICTION_COL, col_item="not_item"
        )

    with pytest.raises(ValueError):
        precision_at_k(rating_true, rating_pred, col_rating="not_rating")

    with pytest.raises(ValueError):
        recall_at_k(rating_true, rating_pred, col_prediction="not_prediction")

    with pytest.raises(ValueError):
        ndcg_at_k(rating_true, rating_true, col_user="******")

    with pytest.raises(ValueError):
        map_at_k(
            rating_pred, rating_pred, col_rating=DEFAULT_PREDICTION_COL, col_user="******"
        )
def test_python_rmse(python_data, target_metrics):
    rating_true, rating_pred, _ = python_data
    assert (
        rmse(rating_true=rating_true, rating_pred=rating_true, col_prediction="rating")
        == 0
    )
    assert rmse(rating_true, rating_pred) == target_metrics["rmse"]
def test_python_rmse(rating_true, rating_pred):
    assert (
        rmse(
            rating_true=rating_true,
            rating_pred=rating_true,
            col_prediction=DEFAULT_RATING_COL,
        )
        == 0
    )
    assert rmse(rating_true, rating_pred) == pytest.approx(7.254309, TOL)
Esempio n. 7
0
def rating_metrics_python(test, predictions):
    return {
        "RMSE": rmse(test, predictions, **COL_DICT),
        "MAE": mae(test, predictions, **COL_DICT),
        "R2": rsquared(test, predictions, **COL_DICT),
        "Explained Variance": exp_var(test, predictions, **COL_DICT),
    }
Esempio n. 8
0
def rating_metrics_python(test, predictions):
    return {
        "RMSE": rmse(test, predictions, **COL_DICT),
        "MAE": mae(test, predictions, **COL_DICT),
        "R2": rsquared(test, predictions, **COL_DICT),
        "Explained Variance": exp_var(test, predictions, **COL_DICT)
    }
def test_predict_rating(rating_true):
    train_set = cornac.data.Dataset.from_uir(rating_true.itertuples(index=False), seed=42)
    mf = cornac.models.MF(k=100, max_iter=10000, seed=42).fit(train_set)

    preds = predict_rating(mf, rating_true)

    assert set(preds.columns) == {"userID", "itemID", "prediction"}
    assert preds["userID"].dtypes == rating_true["userID"].dtypes
    assert preds["itemID"].dtypes == rating_true["itemID"].dtypes
    assert .02 > mae(rating_true, preds)  # ~0.018
    assert .03 > rmse(rating_true, preds)  # ~0.021
def test_python_rmse(python_data, target_metrics):
    rating_true, rating_pred, _ = python_data(binary_rating=False)
    assert rmse(rating_true=rating_true,
                rating_pred=rating_true,
                col_prediction=DEFAULT_RATING_COL) == 0
    assert rmse(rating_true, rating_pred) == target_metrics["rmse"]
Esempio n. 11
0
                      k=TOP_K)
eval_precision = precision_at_k(test,
                                top_k,
                                col_user='******',
                                col_item='itemID',
                                col_rating='rating',
                                k=TOP_K)
eval_recall = recall_at_k(test,
                          top_k,
                          col_user='******',
                          col_item='itemID',
                          col_rating='rating',
                          k=TOP_K)
eval_rmse = rmse(test,
                 top_k,
                 col_user='******',
                 col_item='itemID',
                 col_rating='rating')
eval_mae = mae(test,
               top_k,
               col_user='******',
               col_item='itemID',
               col_rating='rating')
eval_rsquared = rsquared(test,
                         top_k,
                         col_user='******',
                         col_item='itemID',
                         col_rating='rating')
eval_exp_var = exp_var(test,
                       top_k,
                       col_user='******',
Esempio n. 12
0
scores = score(learner,
               test_df=test_df.copy(),
               user_col=USER,
               item_col=ITEM,
               prediction_col=PREDICTION)

# calculate some regression metrics
eval_r2 = rsquared(test_df,
                   scores,
                   col_user=USER,
                   col_item=ITEM,
                   col_rating=RATING,
                   col_prediction=PREDICTION)
eval_rmse = rmse(test_df,
                 scores,
                 col_user=USER,
                 col_item=ITEM,
                 col_rating=RATING,
                 col_prediction=PREDICTION)
eval_mae = mae(test_df,
               scores,
               col_user=USER,
               col_item=ITEM,
               col_rating=RATING,
               col_prediction=PREDICTION)
eval_exp_var = exp_var(test_df,
                       scores,
                       col_user=USER,
                       col_item=ITEM,
                       col_rating=RATING,
                       col_prediction=PREDICTION)