예제 #1
0
def test_predictive_evaluation_10(model, test_interactions_ds):
    """Invalid n_test_predictions value (< 0)."""
    try:
        predictive_evaluation(model,
                              test_interactions_ds,
                              count_none_predictions=False,
                              n_test_predictions=-1,
                              skip_errors=True)
        assert False
    except Exception as e:
        assert str(e) == 'The number of test users (-1) should be > 0.'
예제 #2
0
def test_predictive_evaluation_11(model, test_interactions_ds):
    """Invalid metrics value (not a list)."""
    try:
        predictive_evaluation(model,
                              test_interactions_ds,
                              count_none_predictions=False,
                              n_test_predictions=None,
                              skip_errors=True,
                              metrics={})
        assert False
    except Exception as e:
        assert str(e) == 'Expected "metrics" argument to be a list and found <class \'dict\'>. ' \
                         'Should contain instances of PredictiveMetricABC.'
예제 #3
0
def test_predictive_evaluation_3(model, test_interactions_ds):
    """Evaluation without skip errors."""
    try:
        predictive_evaluation(model,
                              test_interactions_ds,
                              count_none_predictions=True,
                              n_test_predictions=None,
                              skip_errors=False) == {
                                  'MSE': 0.75,
                                  'RMSE': 0.866
                              }
        assert False
    except Exception as e:
        assert str(e) == 'Item 6 was not found.'
예제 #4
0
def test_predictive_evaluation_12(model, test_interactions_ds):
    """Invalid metrics value (list with non-PredictiveMetricABC instances)."""
    fun = lambda x: 1
    try:
        predictive_evaluation(model,
                              test_interactions_ds,
                              count_none_predictions=False,
                              n_test_predictions=None,
                              skip_errors=True,
                              metrics=[fun])
        assert False
    except Exception as e:
        assert str(
            e
        ) == f'Expected metric {fun} to be an instance of type PredictiveMetricABC.'
예제 #5
0
def test_predictive_evaluation_8(model):
    """Evaluation on the training set."""
    assert predictive_evaluation(model,
                                 count_none_predictions=False,
                                 n_test_predictions=None,
                                 skip_errors=True) == {
                                     'MSE': 5.2485,
                                     'RMSE': 2.291
                                 }
예제 #6
0
def test_predictive_evaluation_1(model, test_interactions_ds):
    """Evaluation counting None predictions."""
    assert predictive_evaluation(model,
                                 test_interactions_ds,
                                 count_none_predictions=True,
                                 n_test_predictions=None,
                                 skip_errors=True) == {
                                     'MSE': 0.75,
                                     'RMSE': 0.866
                                 }
예제 #7
0
def test_predictive_evaluation_0(model, test_interactions_ds):
    """Evaluation without counting None predictions."""
    assert predictive_evaluation(model,
                                 test_interactions_ds,
                                 count_none_predictions=False,
                                 n_test_predictions=None,
                                 skip_errors=True) == {
                                     'MSE': 0.6667,
                                     'RMSE': 0.8165
                                 }
예제 #8
0
def test_predictive_evaluation_7(model, test_interactions_ds):
    """Evaluation on the first 2 test predictions."""
    assert predictive_evaluation(model,
                                 test_interactions_ds,
                                 count_none_predictions=False,
                                 n_test_predictions=2,
                                 skip_errors=True) == {
                                     'MSE': 0.5,
                                     'RMSE': 0.7071
                                 }
예제 #9
0
def test_predictive_evaluation_6(model, test_interactions_ds):
    """Evaluation on the first test prediction."""
    assert predictive_evaluation(model,
                                 test_interactions_ds,
                                 count_none_predictions=False,
                                 n_test_predictions=1,
                                 skip_errors=True) == {
                                     'MSE': 1.0,
                                     'RMSE': 1.0
                                 }
예제 #10
0
def test_predictive_evaluation_5(model, test_interactions_ds):
    """Evaluation using the MSE metric only."""
    assert predictive_evaluation(model,
                                 test_interactions_ds,
                                 count_none_predictions=False,
                                 n_test_predictions=None,
                                 skip_errors=True,
                                 metrics=[MSE()]) == {
                                     'MSE': 0.6667
                                 }
예제 #11
0
from DRecPy.Recommender.Baseline import ItemKNN
from DRecPy.Dataset import get_train_dataset
from DRecPy.Dataset import get_test_dataset
from DRecPy.Evaluation.Processes import predictive_evaluation
import time

ds_train = get_train_dataset('ml-100k')
ds_test = get_test_dataset('ml-100k')

start_train = time.time()
item_cf = ItemKNN(k=15,
                  m=1,
                  shrinkage=100,
                  sim_metric='adjusted_cosine',
                  verbose=True)
item_cf.fit(ds_train)
print("Training took", time.time() - start_train)

start_evaluation = time.time()
print(predictive_evaluation(item_cf, ds_test))
print("Evaluation took", time.time() - start_evaluation)