示例#1
0
def test_LeaveOneOut():

    reader = Reader(line_format='user item rating',
                    sep=' ',
                    skip_lines=3,
                    rating_scale=(1, 5))
    custom_dataset_path = (os.path.dirname(os.path.realpath(__file__)) +
                           '/custom_dataset')
    data = Dataset.load_from_file(file_path=custom_dataset_path, reader=reader)

    loo = LeaveOneOut()
    with pytest.raises(ValueError):
        next(loo.split(data))  # Each user only has 1 item so trainsets fail

    reader = Reader('ml-100k')
    custom_dataset_path = (os.path.dirname(os.path.realpath(__file__)) +
                           '/u1_ml100k_test')
    data = Dataset.load_from_file(file_path=custom_dataset_path, reader=reader)

    # Test random_state parameter
    # If random_state is None, you get different split each time (conditioned
    # by rng of course)
    loo = LeaveOneOut(random_state=None)
    testsets_a = [testset for (_, testset) in loo.split(data)]
    testsets_b = [testset for (_, testset) in loo.split(data)]
    assert testsets_a != testsets_b
    # Repeated called to split when random_state is set lead to the same folds
    loo = LeaveOneOut(random_state=1)
    testsets_a = [testset for (_, testset) in loo.split(data)]
    testsets_b = [testset for (_, testset) in loo.split(data)]
    assert testsets_a == testsets_b

    # Make sure only one rating per user is present in the testset
    loo = LeaveOneOut()
    for _, testset in loo.split(data):
        cnt = Counter([uid for (uid, _, _) in testset])
        assert all(val == 1 for val in itervalues(cnt))

    # test the min_n_ratings parameter
    loo = LeaveOneOut(min_n_ratings=5)
    for trainset, _ in loo.split(data):
        assert all(len(ratings) >= 5 for ratings in itervalues(trainset.ur))

    loo = LeaveOneOut(min_n_ratings=10)
    for trainset, _ in loo.split(data):
        assert all(len(ratings) >= 10 for ratings in itervalues(trainset.ur))

    loo = LeaveOneOut(min_n_ratings=10000)  # too high
    with pytest.raises(ValueError):
        next(loo.split(data))
示例#2
0
def test_gridsearchcv_same_splits():
    """Ensure that all parameter combinations are tested on the same splits (we
    check their RMSE scores are the same once averaged over the splits, which
    should be enough). We use as much parallelism as possible."""

    data_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_test')
    data = Dataset.load_from_file(data_file, reader=Reader('ml-100k'))
    kf = KFold(3, shuffle=True, random_state=4)

    # all RMSE should be the same (as param combinations are the same)
    param_grid = {
        'n_epochs': [5],
        'lr_all': [.2, .2],
        'reg_all': [.4, .4],
        'n_factors': [5],
        'random_state': [0]
    }
    gs = GridSearchCV(SVD, param_grid, measures=['RMSE'], cv=kf, n_jobs=1)
    gs.fit(data)

    rmse_scores = [m for m in gs.cv_results['mean_test_rmse']]
    assert len(set(rmse_scores)) == 1  # assert rmse_scores are all equal

    # Note: actually, even when setting random_state=None in kf, the same folds
    # are used because we use product(param_comb, kf.split(...)). However, it's
    # needed to have the same folds when calling fit again:
    gs.fit(data)
    rmse_scores += [m for m in gs.cv_results['mean_test_rmse']]
    assert len(set(rmse_scores)) == 1  # assert rmse_scores are all equal
示例#3
0
def test_unknown_user_or_item():
    """Ensure that all algorithms act gracefully when asked to predict a rating
    of an unknown user, an unknown item, and when both are unknown.
    """

    reader = Reader(line_format='user item rating',
                    sep=' ',
                    skip_lines=3,
                    rating_scale=(1, 5))

    file_path = os.path.dirname(os.path.realpath(__file__)) + '/custom_dataset'

    data = Dataset.load_from_file(file_path=file_path, reader=reader)
    trainset = data.build_full_trainset()

    klasses = (NormalPredictor, BaselineOnly, KNNBasic, KNNWithMeans,
               KNNBaseline, SVD, SVDpp, NMF, SlopeOne, CoClustering,
               KNNWithZScore)
    for klass in klasses:
        algo = klass()
        algo.fit(trainset)
        algo.predict('user0', 'unknown_item', None)
        algo.predict('unkown_user', 'item0', None)
        algo.predict('unkown_user', 'unknown_item', None)

    # unrelated, but test the fit().test() one-liner:
    trainset, testset = train_test_split(data, test_size=2)
    for klass in klasses:
        algo = klass()
        algo.fit(trainset).test(testset)
        with pytest.warns(UserWarning):
            algo.train(trainset).test(testset)
示例#4
0
def test_dump():
    """Train an algorithm, compute its predictions then dump them.
    Ensure that the predictions that are loaded back are the correct ones, and
    that the predictions of the dumped algorithm are also equal to the other
    ones."""

    random.seed(0)

    train_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_train')
    test_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_test')
    data = Dataset.load_from_folds([(train_file, test_file)],
                                   Reader('ml-100k'))
    pkf = PredefinedKFold()

    trainset, testset = next(pkf.split(data))

    algo = BaselineOnly()
    algo.fit(trainset)
    predictions = algo.test(testset)

    with tempfile.NamedTemporaryFile() as tmp_file:
        dump.dump(tmp_file.name, predictions, algo)
        predictions_dumped, algo_dumped = dump.load(tmp_file.name)

        predictions_algo_dumped = algo_dumped.test(testset)
        assert predictions == predictions_dumped
        assert predictions == predictions_algo_dumped
示例#5
0
def test_performances():
    """Test the returned dict. Also do dumping."""

    current_dir = os.path.dirname(os.path.realpath(__file__))
    folds_files = [(current_dir + '/custom_train',
                    current_dir + '/custom_test')]

    reader = Reader(line_format='user item rating',
                    sep=' ',
                    skip_lines=3,
                    rating_scale=(1, 5))
    data = Dataset.load_from_folds(folds_files=folds_files, reader=reader)

    algo = NormalPredictor()
    tmp_dir = tempfile.mkdtemp()  # create tmp dir
    with pytest.warns(UserWarning):
        performances = evaluate(algo,
                                data,
                                measures=['RmSe', 'Mae'],
                                with_dump=True,
                                dump_dir=tmp_dir,
                                verbose=2)
    shutil.rmtree(tmp_dir)  # remove tmp dir

    assert performances['RMSE'] is performances['rmse']
    assert performances['MaE'] is performances['mae']
示例#6
0
def test_gridsearchcv_best_estimator():
    """Ensure that the best estimator is the one giving the best score (by
    re-running it)"""

    train_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_train')
    test_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_test')
    data = Dataset.load_from_folds([(train_file, test_file)],
                                   Reader('ml-100k'))

    param_grid = {
        'n_epochs': [5],
        'lr_all': [0.002, 0.005],
        'reg_all': [0.4, 0.6],
        'n_factors': [1],
        'init_std_dev': [0]
    }
    gs = GridSearchCV(SVD,
                      param_grid,
                      measures=['mae'],
                      cv=PredefinedKFold(),
                      joblib_verbose=100)
    gs.fit(data)
    best_estimator = gs.best_estimator['mae']

    # recompute MAE of best_estimator
    mae = cross_validate(best_estimator,
                         data,
                         measures=['MAE'],
                         cv=PredefinedKFold())['test_mae']

    assert mae == gs.best_score['mae']
示例#7
0
def test_cross_validate():

    # First test with a specified CV iterator.
    current_dir = os.path.dirname(os.path.realpath(__file__))
    folds_files = [(current_dir + '/custom_train',
                    current_dir + '/custom_test')]

    reader = Reader(line_format='user item rating',
                    sep=' ',
                    skip_lines=3,
                    rating_scale=(1, 5))
    data = Dataset.load_from_folds(folds_files=folds_files, reader=reader)

    algo = NormalPredictor()
    pkf = ms.PredefinedKFold()
    ret = ms.cross_validate(algo,
                            data,
                            measures=['rmse', 'mae'],
                            cv=pkf,
                            verbose=1)
    # Basically just test that keys (dont) exist as they should
    assert len(ret['test_rmse']) == 1
    assert len(ret['test_mae']) == 1
    assert len(ret['fit_time']) == 1
    assert len(ret['test_time']) == 1
    assert 'test_fcp' not in ret
    assert 'train_rmse' not in ret
    assert 'train_mae' not in ret

    # Test that 5 fold CV is used when cv=None
    # Also check that train_* key exist when return_train_measures is True.
    data = Dataset.load_from_file(current_dir + '/custom_dataset', reader)
    ret = ms.cross_validate(algo,
                            data,
                            measures=['rmse', 'mae'],
                            cv=None,
                            return_train_measures=True,
                            verbose=True)
    assert len(ret['test_rmse']) == 5
    assert len(ret['test_mae']) == 5
    assert len(ret['fit_time']) == 5
    assert len(ret['test_time']) == 5
    assert len(ret['train_rmse']) == 5
    assert len(ret['train_mae']) == 5
示例#8
0
def test_load_form_df():
    """Ensure reading dataset from pandas dataframe is OK."""

    # DF creation.
    ratings_dict = {
        'itemID': [1, 1, 1, 2, 2],
        'userID': [9, 32, 2, 45, '10000'],
        'rating': [3, 2, 4, 3, 1]
    }
    df = pd.DataFrame(ratings_dict)

    reader = Reader(rating_scale=(1, 5))
    data = Dataset.load_from_df(df[['userID', 'itemID', 'rating']], reader)

    # Assert split and folds can be used without problems
    with pytest.warns(UserWarning):
        data.split(2)
        assert sum(1 for _ in data.folds()) == 2

    # assert users and items are correctly mapped
    trainset = data.build_full_trainset()
    assert trainset.knows_user(trainset.to_inner_uid(9))
    assert trainset.knows_user(trainset.to_inner_uid('10000'))
    assert trainset.knows_item(trainset.to_inner_iid(2))

    # assert r(9, 1) = 3 and r(2, 1) = 4
    uid9 = trainset.to_inner_uid(9)
    uid2 = trainset.to_inner_uid(2)
    iid1 = trainset.to_inner_iid(1)
    assert trainset.ur[uid9] == [(iid1, 3)]
    assert trainset.ur[uid2] == [(iid1, 4)]

    # assert at least rating file or dataframe must be specified
    with pytest.raises(ValueError):
        data = Dataset.load_from_df(None, None)

    # mess up the column ordering and assert that users are not correctly
    # mapped
    data = Dataset.load_from_df(df[['rating', 'itemID', 'userID']], reader)
    trainset = data.build_full_trainset()
    with pytest.raises(ValueError):
        trainset.to_inner_uid('10000')
示例#9
0
def test_build_full_trainset():
    """Test the build_full_trainset method."""

    custom_dataset_path = (os.path.dirname(os.path.realpath(__file__)) +
                           '/custom_dataset')
    data = Dataset.load_from_file(file_path=custom_dataset_path, reader=reader)

    trainset = data.build_full_trainset()

    assert len(trainset.ur) == 5
    assert len(trainset.ir) == 2
    assert trainset.n_users == 5
    assert trainset.n_items == 2
示例#10
0
def test_train_test_split():
    reader = Reader(line_format='user item rating',
                    sep=' ',
                    skip_lines=3,
                    rating_scale=(1, 5))
    custom_dataset_path = (os.path.dirname(os.path.realpath(__file__)) +
                           '/custom_dataset')
    data = Dataset.load_from_file(file_path=custom_dataset_path, reader=reader)

    # test test_size to int and train_size to None (complement)
    trainset, testset = train_test_split(data, test_size=2, train_size=None)
    assert len(testset) == 2
    assert trainset.n_ratings == 3

    # test test_size to float and train_size to None (complement)
    trainset, testset = train_test_split(data, test_size=.2, train_size=None)
    assert len(testset) == 1
    assert trainset.n_ratings == 4

    # test test_size to int and train_size to int
    trainset, testset = train_test_split(data, test_size=2, train_size=3)
    assert len(testset) == 2
    assert trainset.n_ratings == 3

    # test test_size to None (complement) and train_size to int
    trainset, testset = train_test_split(data, test_size=None, train_size=2)
    assert len(testset) == 3
    assert trainset.n_ratings == 2

    # test test_size to None (complement) and train_size to float
    trainset, testset = train_test_split(data, test_size=None, train_size=.2)
    assert len(testset) == 4
    assert trainset.n_ratings == 1

    # Test random_state parameter
    # If random_state is None, you get different split each time (conditioned
    # by rng of course)
    _, testset_a = train_test_split(data, random_state=None)
    _, testset_b = train_test_split(data, random_state=None)
    assert testset_a != testset_b

    # Repeated called to split when random_state is set lead to the same folds
    _, testset_a = train_test_split(data, random_state=1)
    _, testset_b = train_test_split(data, random_state=1)
    assert testset_a == testset_b

    # Test shuffle parameter, if False then splits are the same regardless of
    # random_state.
    _, testset_a = train_test_split(data, random_state=1, shuffle=None)
    _, testset_b = train_test_split(data, random_state=1, shuffle=None)
    assert testset_a == testset_b
示例#11
0
def test_no_call_to_split():
    """Ensure, as mentioned in the split() docstring, that even if split is not
    called then the data is split with 5 folds after being shuffled."""

    custom_dataset_path = (os.path.dirname(os.path.realpath(__file__)) +
                           '/custom_dataset')
    data = Dataset.load_from_file(file_path=custom_dataset_path, reader=reader)

    with pytest.warns(UserWarning):
        assert len(list(data.folds())) == 5

    # make sure data has been shuffled. If not shuffled, the users in the
    # testsets would be 0, 1, 2... 4 (in that order).
    with pytest.warns(UserWarning):
        users = [int(testset[0][0][-1]) for (_, testset) in data.folds()]
    assert users != list(range(5))
示例#12
0
def test_split():
    """Test the split method."""

    custom_dataset_path = (os.path.dirname(os.path.realpath(__file__)) +
                           '/custom_dataset')
    data = Dataset.load_from_file(file_path=custom_dataset_path, reader=reader)

    # Test the shuffle parameter
    # Make sure data has not been shuffled. If not shuffled, the users in the
    # testsets are 0, 1, 2... 4 (in that order).
    with pytest.warns(UserWarning):
        data.split(n_folds=5, shuffle=False)
        users = [int(testset[0][0][-1]) for (_, testset) in data.folds()]
        assert users == list(range(5))

    # Test the shuffle parameter
    # Make sure that when called two times without shuffling, folds are the
    # same.
    with pytest.warns(UserWarning):
        data.split(n_folds=3, shuffle=False)
        testsets_a = [testset for (_, testset) in data.folds()]
        data.split(n_folds=3, shuffle=False)
        testsets_b = [testset for (_, testset) in data.folds()]
        assert testsets_a == testsets_b

    # We'll now shuffle b and check that folds are different.
    with pytest.warns(UserWarning):
        data.split(n_folds=3, shuffle=True)
        testsets_b = [testset for (_, testset) in data.folds()]
        assert testsets_a != testsets_b

    # Ensure that folds are the same if split is not called again
    with pytest.warns(UserWarning):
        testsets_a = [testset for (_, testset) in data.folds()]
        testsets_b = [testset for (_, testset) in data.folds()]
        assert testsets_a == testsets_b

    # Test n_folds parameter
    with pytest.warns(UserWarning):
        data.split(5)
        assert len(list(data.folds())) == 5

    with pytest.raises(ValueError):
        data.split(10)  # Too big (greater than number of ratings)

    with pytest.raises(ValueError):
        data.split(1)  # Too low (must be >= 2)
示例#13
0
def test_nearest_neighbors():
    """Ensure the nearest neighbors are different when using user-user
    similarity vs item-item."""

    reader = Reader(line_format='user item rating',
                    sep=' ',
                    skip_lines=3,
                    rating_scale=(1, 5))

    data_file = os.path.dirname(os.path.realpath(__file__)) + '/custom_train'
    data = Dataset.load_from_file(data_file, reader)
    trainset = data.build_full_trainset()

    algo_ub = KNNBasic(sim_options={'user_based': True})
    algo_ub.fit(trainset)
    algo_ib = KNNBasic(sim_options={'user_based': False})
    algo_ib.fit(trainset)
    assert algo_ub.get_neighbors(0, k=10) != algo_ib.get_neighbors(0, k=10)
示例#14
0
def test_same_splits():
    """Ensure that all parameter combinations are tested on the same splits (we
    check that average RMSE scores are the same, which should be enough)."""

    data_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_train')
    data = Dataset.load_from_file(data_file, reader=Reader('ml-100k'))
    data.split(3)

    # all RMSE should be the same (as param combinations are the same)
    param_grid = {'n_epochs': [1, 1], 'lr_all': [.5, .5]}
    with pytest.warns(UserWarning):
        grid_search = GridSearch(SVD, param_grid, measures=['RMSE'], n_jobs=-1)
    grid_search.evaluate(data)

    scores = ['%.1f' % s['RMSE'] for s in grid_search.cv_results['scores']]
    assert len(set(scores)) == 1  # assert rmse_scores are all equal

    # evaluate grid search again, to make sure that splits are still the same.
    grid_search.evaluate(data)
    scores += ['%.1f' % s['RMSE'] for s in grid_search.cv_results['scores']]
    assert len(set(scores)) == 1
示例#15
0
def test_PredifinedKFold():

    reader = Reader(line_format='user item rating',
                    sep=' ',
                    skip_lines=3,
                    rating_scale=(1, 5))

    current_dir = os.path.dirname(os.path.realpath(__file__))
    folds_files = [(current_dir + '/custom_train',
                    current_dir + '/custom_test')]

    data = Dataset.load_from_folds(folds_files=folds_files, reader=reader)

    # Make sure rating files are read correctly
    pkf = PredefinedKFold()
    trainset, testset = next(pkf.split(data))
    assert trainset.n_ratings == 6
    assert len(testset) == 3

    # Make sure pkf returns the same folds as the deprecated data.folds()
    with pytest.warns(UserWarning):
        trainset_, testset_ = next(data.folds())
    assert testset_ == testset
示例#16
0
def test_RepeatedCV():

    reader = Reader(line_format='user item rating',
                    sep=' ',
                    skip_lines=3,
                    rating_scale=(1, 5))
    custom_dataset_path = (os.path.dirname(os.path.realpath(__file__)) +
                           '/custom_dataset')
    data = Dataset.load_from_file(file_path=custom_dataset_path, reader=reader)

    # test n_splits and n_repeats parameters
    rkf = RepeatedKFold(n_splits=3, n_repeats=2)
    assert len(list(rkf.split(data))) == 3 * 2
    rkf = RepeatedKFold(n_splits=3, n_repeats=4)
    assert len(list(rkf.split(data))) == 3 * 4
    rkf = RepeatedKFold(n_splits=4, n_repeats=3)
    assert len(list(rkf.split(data))) == 4 * 3

    # Make sure folds different between 2 repetitions (even if
    # random_state is set, random_state controls the whole sequence)
    rkf = RepeatedKFold(n_splits=3, n_repeats=2, random_state=3)
    testsets = list(testset for (_, testset) in rkf.split(data))
    for i in range(3):
        assert testsets[i] != testsets[i + 3]

    # Make sure folds are same when same cv iterator is called on same data (if
    # random_state is set)
    rkf = RepeatedKFold(n_splits=3, n_repeats=2, random_state=3)
    testsets_a = list(testset for (_, testset) in rkf.split(data))
    testsets_b = list(testset for (_, testset) in rkf.split(data))
    assert testsets_a == testsets_b

    # Make sure folds are different when random_state is None
    rkf = RepeatedKFold(n_splits=3, n_repeats=2, random_state=None)
    testsets_a = list(testset for (_, testset) in rkf.split(data))
    testsets_b = list(testset for (_, testset) in rkf.split(data))
    assert testsets_a != testsets_b
示例#17
0
def test_knns():
    """Ensure the k and min_k parameters are effective for knn algorithms."""

    # the test and train files are from the ml-100k dataset (10% of u1.base and
    # 10 % of u1.test)
    train_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_train')
    test_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_test')
    data = Dataset.load_from_folds([(train_file, test_file)],
                                   Reader('ml-100k'))
    pkf = PredefinedKFold()

    # Actually, as KNNWithMeans and KNNBaseline have back up solutions for when
    # there are not enough neighbors, we can't really test them...
    klasses = (KNNBasic, )  # KNNWithMeans, KNNBaseline)

    k, min_k = 20, 5
    for klass in klasses:
        algo = klass(k=k, min_k=min_k)
        for trainset, testset in pkf.split(data):
            algo.fit(trainset)
            predictions = algo.test(testset)
            for pred in predictions:
                if not pred.details['was_impossible']:
                    assert min_k <= pred.details['actual_k'] <= k
示例#18
0
def test_build_anti_testset():
    ratings_dict = {
        'itemID': [1, 2, 3, 4, 5, 6, 7, 8, 9],
        'userID': [1, 2, 3, 4, 5, 6, 7, 8, 9],
        'rating': [1, 2, 3, 4, 5, 6, 7, 8, 9]
    }
    df = pd.DataFrame(ratings_dict)

    reader = Reader(rating_scale=(1, 5))
    data = Dataset.load_from_df(df[['userID', 'itemID', 'rating']], reader)
    with pytest.warns(UserWarning):
        data.split(2)
        trainset, __testset = next(data.folds())
    # fill with some specific value
    for fillvalue in (0, 42., -1):
        anti = trainset.build_anti_testset(fill=fillvalue)
        for (u, i, r) in anti:
            assert r == fillvalue
    # fill with global_mean
    anti = trainset.build_anti_testset(fill=None)
    for (u, i, r) in anti:
        assert r == trainset.global_mean
    expect = trainset.n_users * trainset.n_items
    assert trainset.n_ratings + len(anti) == expect
示例#19
0
def test_wrong_file_name():
    """Ensure file names are checked when creating a (custom) Dataset."""
    wrong_files = [('does_not_exist', 'does_not_either')]

    with pytest.raises(ValueError):
        Dataset.load_from_folds(folds_files=wrong_files, reader=reader)
示例#20
0
def test_KFold():

    reader = Reader(line_format='user item rating',
                    sep=' ',
                    skip_lines=3,
                    rating_scale=(1, 5))
    custom_dataset_path = (os.path.dirname(os.path.realpath(__file__)) +
                           '/custom_dataset')
    data = Dataset.load_from_file(file_path=custom_dataset_path, reader=reader)

    # Test n_folds parameter
    kf = KFold(n_splits=5)
    assert len(list(kf.split(data))) == 5

    with pytest.raises(ValueError):
        kf = KFold(n_splits=10)
        next(kf.split(data))  # Too big (greater than number of ratings)

    with pytest.raises(ValueError):
        kf = KFold(n_splits=1)
        next(kf.split(data))  # Too low (must be >= 2)

    # Make sure data has not been shuffled. If not shuffled, the users in the
    # testsets are 0, 1, 2... 4 (in that order).
    kf = KFold(n_splits=5, shuffle=False)
    users = [int(testset[0][0][-1]) for (_, testset) in kf.split(data)]
    assert users == list(range(5))

    # Make sure that when called two times without shuffling, folds are the
    # same.
    kf = KFold(n_splits=5, shuffle=False)
    testsets_a = [testset for (_, testset) in kf.split(data)]
    testsets_b = [testset for (_, testset) in kf.split(data)]
    assert testsets_a == testsets_b
    # test once again with another KFold instance
    kf = KFold(n_splits=5, shuffle=False)
    testsets_a = [testset for (_, testset) in kf.split(data)]
    assert testsets_a == testsets_b

    # We'll now shuffle b and check that folds are different.
    # (this is conditioned by seed setting at the beginning of file)
    kf = KFold(n_splits=5, random_state=None, shuffle=True)
    testsets_b = [testset for (_, testset) in kf.split(data)]
    assert testsets_a != testsets_b
    # test once again: two calls to kf.split make different splits when
    # random_state=None
    testsets_a = [testset for (_, testset) in kf.split(data)]
    assert testsets_a != testsets_b

    # Make sure that folds are the same when same KFold instance is used with
    # suffle is True but random_state is set to some value
    kf = KFold(n_splits=5, random_state=1, shuffle=True)
    testsets_a = [testset for (_, testset) in kf.split(data)]
    testsets_b = [testset for (_, testset) in kf.split(data)]
    assert testsets_a == testsets_b

    # Make sure raw ratings are not shuffled by KFold
    old_raw_ratings = copy(data.raw_ratings)
    kf = KFold(n_splits=5, shuffle=True)
    next(kf.split(data))
    assert old_raw_ratings == data.raw_ratings

    # Make sure kf.split() and the old data.split() have the same folds.
    np.random.seed(3)
    with pytest.warns(UserWarning):
        data.split(2, shuffle=True)
        testsets_a = [testset for (_, testset) in data.folds()]
    kf = KFold(n_splits=2, random_state=3, shuffle=True)
    testsets_b = [testset for (_, testset) in kf.split(data)]
示例#21
0
'''Testing renaming of train() into fit()'''
import os

import pytest

from idly import Dataset
from idly import Reader
from idly import AlgoBase
from idly.model_selection import KFold


data_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_train')
data = Dataset.load_from_file(data_file, Reader('ml-100k'))
kf = KFold(n_splits=2)


def test_new_style_algo():
    '''Test that new algorithms (i.e. algoritms that only define fit()) can
    support both calls to fit() and to train()
    - algo.fit() is the new way of doing things
    - supporting algo.train() is needed for the (unlikely?) case where a user
    has defined custom tools that use algo.train().
    '''

    class CustomAlgoFit(AlgoBase):

        def __init__(self):
            AlgoBase.__init__(self)
            self.cnt = -1

        def fit(self, trainset):
示例#22
0
def test_ShuffleSplit():

    reader = Reader(line_format='user item rating',
                    sep=' ',
                    skip_lines=3,
                    rating_scale=(1, 5))
    custom_dataset_path = (os.path.dirname(os.path.realpath(__file__)) +
                           '/custom_dataset')
    data = Dataset.load_from_file(file_path=custom_dataset_path, reader=reader)

    with pytest.raises(ValueError):
        ss = ShuffleSplit(n_splits=0)

    with pytest.raises(ValueError):
        ss = ShuffleSplit(test_size=10)
        next(ss.split(data))

    with pytest.raises(ValueError):
        ss = ShuffleSplit(train_size=10)
        next(ss.split(data))

    with pytest.raises(ValueError):
        ss = ShuffleSplit(test_size=3, train_size=3)
        next(ss.split(data))

    with pytest.raises(ValueError):
        ss = ShuffleSplit(test_size=3, train_size=0)
        next(ss.split(data))

    with pytest.raises(ValueError):
        ss = ShuffleSplit(test_size=0, train_size=3)
        next(ss.split(data))

    # No need to cover the entire dataset
    ss = ShuffleSplit(test_size=1, train_size=1)
    next(ss.split(data))

    # test test_size to int and train_size to None (complement)
    ss = ShuffleSplit(test_size=1)
    assert all(len(testset) == 1 for (_, testset) in ss.split(data))
    assert all(trainset.n_ratings == 4 for (trainset, _) in ss.split(data))

    # test test_size to float and train_size to None (complement)
    ss = ShuffleSplit(test_size=.2)  # 20% of 5 = 1
    assert all(len(testset) == 1 for (_, testset) in ss.split(data))
    assert all(trainset.n_ratings == 4 for (trainset, _) in ss.split(data))

    # test test_size to int and train_size to int
    ss = ShuffleSplit(test_size=2, train_size=2)
    assert all(len(testset) == 2 for (_, testset) in ss.split(data))
    assert all(trainset.n_ratings == 2 for (trainset, _) in ss.split(data))

    # test test_size to None (complement) and train_size to int
    ss = ShuffleSplit(test_size=None, train_size=2)
    assert all(len(testset) == 3 for (_, testset) in ss.split(data))
    assert all(trainset.n_ratings == 2 for (trainset, _) in ss.split(data))

    # test test_size to None (complement) and train_size to float
    ss = ShuffleSplit(test_size=None, train_size=.2)
    assert all(len(testset) == 4 for (_, testset) in ss.split(data))
    assert all(trainset.n_ratings == 1 for (trainset, _) in ss.split(data))

    # test default parameters: 5 splits, test_size = .2, train_size = None
    ss = ShuffleSplit()
    assert len(list(ss.split(data))) == 5
    assert all(len(testset) == 1 for (_, testset) in ss.split(data))
    assert all(trainset.n_ratings == 4 for (trainset, _) in ss.split(data))

    # Test random_state parameter
    # If random_state is None, you get different split each time (conditioned
    # by rng of course)
    ss = ShuffleSplit(random_state=None)
    testsets_a = [testset for (_, testset) in ss.split(data)]
    testsets_b = [testset for (_, testset) in ss.split(data)]
    assert testsets_a != testsets_b
    # Repeated called to split when random_state is set lead to the same folds
    ss = ShuffleSplit(random_state=1)
    testsets_a = [testset for (_, testset) in ss.split(data)]
    testsets_b = [testset for (_, testset) in ss.split(data)]
    assert testsets_a == testsets_b

    # Test shuffle parameter, if False then splits are the same regardless of
    # random_state.
    ss = ShuffleSplit(random_state=1, shuffle=False)
    testsets_a = [testset for (_, testset) in ss.split(data)]
    testsets_b = [testset for (_, testset) in ss.split(data)]
    assert testsets_a == testsets_b
示例#23
0
def test_gridsearchcv_refit():
    """Test refit function of GridSearchCV."""

    data_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_test')
    data = Dataset.load_from_file(data_file, Reader('ml-100k'))

    param_grid = {
        'n_epochs': [5],
        'lr_all': [0.002, 0.005],
        'reg_all': [0.4, 0.6],
        'n_factors': [2]
    }

    # assert gs.fit() and gs.test will use best estimator for mae (first
    # appearing in measures)
    gs = GridSearchCV(SVD,
                      param_grid,
                      measures=['mae', 'rmse'],
                      cv=2,
                      refit=True)
    gs.fit(data)
    gs_preds = gs.test(data.construct_testset(data.raw_ratings))
    mae_preds = gs.best_estimator['mae'].test(
        data.construct_testset(data.raw_ratings))
    assert gs_preds == mae_preds

    # assert gs.fit() and gs.test will use best estimator for rmse
    gs = GridSearchCV(SVD,
                      param_grid,
                      measures=['mae', 'rmse'],
                      cv=2,
                      refit='rmse')
    gs.fit(data)
    gs_preds = gs.test(data.construct_testset(data.raw_ratings))
    rmse_preds = gs.best_estimator['rmse'].test(
        data.construct_testset(data.raw_ratings))
    assert gs_preds == rmse_preds
    # test that predict() can be called
    gs.predict(2, 4)

    # assert test() and predict() cannot be used when refit is false
    gs = GridSearchCV(SVD,
                      param_grid,
                      measures=['mae', 'rmse'],
                      cv=2,
                      refit=False)
    gs.fit(data)
    with pytest.raises(ValueError):
        gs_preds = gs.test(data.construct_testset(data.raw_ratings))
    with pytest.raises(ValueError):
        gs.predict('1', '2')

    # test that error is raised if used with load_from_folds
    train_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_train')
    test_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_test')
    data = Dataset.load_from_folds([(train_file, test_file)],
                                   Reader('ml-100k'))
    gs = GridSearchCV(SVD,
                      param_grid,
                      measures=['mae', 'rmse'],
                      cv=2,
                      refit=True)
    with pytest.raises(ValueError):
        gs.fit(data)
示例#24
0
def test_randomizedsearchcv_refit():
    """Test refit method of RandomizedSearchCV class."""

    data_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_test')
    data = Dataset.load_from_file(data_file, Reader('ml-100k'))

    param_distributions = {
        'n_epochs': [5],
        'lr_all': uniform(0.002, 0.003),
        'reg_all': uniform(0.4, 0.2),
        'n_factors': [2]
    }

    # assert rs.fit() and rs.test will use best estimator for mae (first
    # appearing in measures)
    rs = RandomizedSearchCV(SVD,
                            param_distributions,
                            measures=['mae', 'rmse'],
                            cv=2,
                            refit=True)
    rs.fit(data)
    rs_preds = rs.test(data.construct_testset(data.raw_ratings))
    mae_preds = rs.best_estimator['mae'].test(
        data.construct_testset(data.raw_ratings))
    assert rs_preds == mae_preds

    # assert rs.fit() and rs.test will use best estimator for rmse
    rs = RandomizedSearchCV(SVD,
                            param_distributions,
                            measures=['mae', 'rmse'],
                            cv=2,
                            refit='rmse')
    rs.fit(data)
    rs_preds = rs.test(data.construct_testset(data.raw_ratings))
    rmse_preds = rs.best_estimator['rmse'].test(
        data.construct_testset(data.raw_ratings))
    assert rs_preds == rmse_preds
    # test that predict() can be called
    rs.predict(2, 4)

    # assert test() and predict() cannot be used when refit is false
    rs = RandomizedSearchCV(SVD,
                            param_distributions,
                            measures=['mae', 'rmse'],
                            cv=2,
                            refit=False)
    rs.fit(data)
    with pytest.raises(ValueError):
        rs.test(data.construct_testset(data.raw_ratings))
    with pytest.raises(ValueError):
        rs.predict('1', '2')

    # test that error is raised if used with load_from_folds
    train_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_train')
    test_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_test')
    data = Dataset.load_from_folds([(train_file, test_file)],
                                   Reader('ml-100k'))
    rs = RandomizedSearchCV(SVD,
                            param_distributions,
                            measures=['mae', 'rmse'],
                            cv=2,
                            refit=True)
    with pytest.raises(ValueError):
        rs.fit(data)
示例#25
0
def test_randomizedsearchcv_cv_results():
    """Test the cv_results attribute"""

    f = os.path.join(os.path.dirname(__file__), './u1_ml100k_test')
    data = Dataset.load_from_file(f, Reader('ml-100k'))
    kf = KFold(3, shuffle=True, random_state=4)
    param_distributions = {
        'n_epochs': [5],
        'lr_all': uniform(.2, .3),
        'reg_all': uniform(.4, .3),
        'n_factors': [5],
        'random_state': [0]
    }
    n_iter = 5
    rs = RandomizedSearchCV(SVD,
                            param_distributions,
                            n_iter=n_iter,
                            measures=['RMSE', 'mae'],
                            cv=kf,
                            return_train_measures=True)
    rs.fit(data)

    # test keys split*_test_rmse, mean and std dev.
    assert rs.cv_results['split0_test_rmse'].shape == (n_iter, )
    assert rs.cv_results['split1_test_rmse'].shape == (n_iter, )
    assert rs.cv_results['split2_test_rmse'].shape == (n_iter, )
    assert rs.cv_results['mean_test_rmse'].shape == (n_iter, )
    assert np.allclose(
        rs.cv_results['mean_test_rmse'],
        np.mean([
            rs.cv_results['split0_test_rmse'],
            rs.cv_results['split1_test_rmse'],
            rs.cv_results['split2_test_rmse']
        ],
                axis=0))
    assert np.allclose(
        rs.cv_results['std_test_rmse'],
        np.std([
            rs.cv_results['split0_test_rmse'],
            rs.cv_results['split1_test_rmse'],
            rs.cv_results['split2_test_rmse']
        ],
               axis=0))

    # test keys split*_train_mae, mean and std dev.
    assert rs.cv_results['split0_train_rmse'].shape == (n_iter, )
    assert rs.cv_results['split1_train_rmse'].shape == (n_iter, )
    assert rs.cv_results['split2_train_rmse'].shape == (n_iter, )
    assert rs.cv_results['mean_train_rmse'].shape == (n_iter, )
    assert np.allclose(
        rs.cv_results['mean_train_rmse'],
        np.mean([
            rs.cv_results['split0_train_rmse'],
            rs.cv_results['split1_train_rmse'],
            rs.cv_results['split2_train_rmse']
        ],
                axis=0))
    assert np.allclose(
        rs.cv_results['std_train_rmse'],
        np.std([
            rs.cv_results['split0_train_rmse'],
            rs.cv_results['split1_train_rmse'],
            rs.cv_results['split2_train_rmse']
        ],
               axis=0))

    # test fit and train times dimensions.
    assert rs.cv_results['mean_fit_time'].shape == (n_iter, )
    assert rs.cv_results['std_fit_time'].shape == (n_iter, )
    assert rs.cv_results['mean_test_time'].shape == (n_iter, )
    assert rs.cv_results['std_test_time'].shape == (n_iter, )

    assert rs.cv_results['params'] is rs.param_combinations

    # assert that best parameter in rs.cv_results['rank_test_measure'] is
    # indeed the best_param attribute.
    best_index = np.argmin(rs.cv_results['rank_test_rmse'])
    assert rs.cv_results['params'][best_index] == rs.best_params['rmse']
    best_index = np.argmin(rs.cv_results['rank_test_mae'])
    assert rs.cv_results['params'][best_index] == rs.best_params['mae']
示例#26
0
from __future__ import (absolute_import, division, print_function,
                        unicode_literals)
import os

from idly import CoClustering
from idly import Dataset
from idly import Reader
from idly.model_selection import cross_validate
from idly.model_selection import PredefinedKFold

# the test and train files are from the ml-100k dataset (10% of u1.base and
# 10 % of u1.test)
train_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_train')
test_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_test')
data = Dataset.load_from_folds([(train_file, test_file)], Reader('ml-100k'))
pkf = PredefinedKFold()


def test_CoClustering_parameters():
    """Ensure that all parameters are taken into account."""

    # The baseline against which to compare.
    algo = CoClustering(n_epochs=1, random_state=1)
    rmse_default = cross_validate(algo, data, ['rmse'], pkf)['test_rmse']

    # n_cltr_u
    algo = CoClustering(n_cltr_u=1, n_epochs=1, random_state=1)
    rmse_n_cltr_u = cross_validate(algo, data, ['rmse'], pkf)['test_rmse']
    assert rmse_default != rmse_n_cltr_u
示例#27
0
def test_trainset_testset():
    """Test the construct_trainset and construct_testset methods."""

    current_dir = os.path.dirname(os.path.realpath(__file__))
    folds_files = [(current_dir + '/custom_train',
                    current_dir + '/custom_test')]

    data = Dataset.load_from_folds(folds_files=folds_files, reader=reader)

    with pytest.warns(UserWarning):
        trainset, testset = next(data.folds())

    # test ur
    ur = trainset.ur
    assert ur[0] == [(0, 4)]
    assert ur[1] == [(0, 4), (1, 2)]
    assert ur[40] == []  # not in the trainset

    # test ir
    ir = trainset.ir
    assert ir[0] == [(0, 4), (1, 4), (2, 1)]
    assert ir[1] == [(1, 2), (2, 1), (3, 5)]
    assert ir[20000] == []  # not in the trainset

    # test n_users, n_items, n_ratings, rating_scale
    assert trainset.n_users == 4
    assert trainset.n_items == 2
    assert trainset.n_ratings == 6
    assert trainset.rating_scale == (1, 5)

    # test raw2inner
    for i in range(4):
        assert trainset.to_inner_uid('user' + str(i)) == i
    with pytest.raises(ValueError):
        trainset.to_inner_uid('unkown_user')

    for i in range(2):
        assert trainset.to_inner_iid('item' + str(i)) == i
    with pytest.raises(ValueError):
        trainset.to_inner_iid('unkown_item')

    # test inner2raw
    assert trainset._inner2raw_id_users is None
    assert trainset._inner2raw_id_items is None
    for i in range(4):
        assert trainset.to_raw_uid(i) == 'user' + str(i)
    for i in range(2):
        assert trainset.to_raw_iid(i) == 'item' + str(i)
    assert trainset._inner2raw_id_users is not None
    assert trainset._inner2raw_id_items is not None

    # Test the build_testset() method
    algo = BaselineOnly()
    algo.fit(trainset)
    testset = trainset.build_testset()
    algo.test(testset)  # ensure an algorithm can manage the data
    assert ('user0', 'item0', 4) in testset
    assert ('user3', 'item1', 5) in testset
    assert ('user3', 'item1', 0) not in testset

    # Test the build_anti_testset() method
    algo = BaselineOnly()
    algo.fit(trainset)
    testset = trainset.build_anti_testset()
    algo.test(testset)  # ensure an algorithm can manage the data
    assert ('user0', 'item0', trainset.global_mean) not in testset
    assert ('user3', 'item1', trainset.global_mean) not in testset
    assert ('user0', 'item1', trainset.global_mean) in testset
    assert ('user3', 'item0', trainset.global_mean) in testset