Beispiel #1
0
def test_gridsearchcv_same_splits():
    """Ensure that all parameter combinations are tested on the same splits (we
    check their RMSE scores are the same once averaged over the splits, which
    should be enough). We use as much parallelism as possible."""

    data_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_test')
    data = Dataset.load_from_file(data_file, reader=Reader('ml-100k'))
    kf = KFold(3, shuffle=True, random_state=4)

    # all RMSE should be the same (as param combinations are the same)
    param_grid = {
        'n_epochs': [5],
        'lr_all': [.2, .2],
        'reg_all': [.4, .4],
        'n_factors': [5],
        'random_state': [0]
    }
    gs = GridSearchCV(SVD, param_grid, measures=['RMSE'], cv=kf, n_jobs=1)
    gs.fit(data)

    rmse_scores = [m for m in gs.cv_results['mean_test_rmse']]
    assert len(set(rmse_scores)) == 1  # assert rmse_scores are all equal

    # Note: actually, even when setting random_state=None in kf, the same folds
    # are used because we use product(param_comb, kf.split(...)). However, it's
    # needed to have the same folds when calling fit again:
    gs.fit(data)
    rmse_scores += [m for m in gs.cv_results['mean_test_rmse']]
    assert len(set(rmse_scores)) == 1  # assert rmse_scores are all equal
Beispiel #2
0
def test_get_cv():

    get_cv(None)
    get_cv(4)
    get_cv(KFold())

    with pytest.raises(ValueError):
        get_cv(23.2)
    with pytest.raises(ValueError):
        get_cv('bad')
Beispiel #3
0
def test_KFold():

    reader = Reader(line_format='user item rating',
                    sep=' ',
                    skip_lines=3,
                    rating_scale=(1, 5))
    custom_dataset_path = (os.path.dirname(os.path.realpath(__file__)) +
                           '/custom_dataset')
    data = Dataset.load_from_file(file_path=custom_dataset_path, reader=reader)

    # Test n_folds parameter
    kf = KFold(n_splits=5)
    assert len(list(kf.split(data))) == 5

    with pytest.raises(ValueError):
        kf = KFold(n_splits=10)
        next(kf.split(data))  # Too big (greater than number of ratings)

    with pytest.raises(ValueError):
        kf = KFold(n_splits=1)
        next(kf.split(data))  # Too low (must be >= 2)

    # Make sure data has not been shuffled. If not shuffled, the users in the
    # testsets are 0, 1, 2... 4 (in that order).
    kf = KFold(n_splits=5, shuffle=False)
    users = [int(testset[0][0][-1]) for (_, testset) in kf.split(data)]
    assert users == list(range(5))

    # Make sure that when called two times without shuffling, folds are the
    # same.
    kf = KFold(n_splits=5, shuffle=False)
    testsets_a = [testset for (_, testset) in kf.split(data)]
    testsets_b = [testset for (_, testset) in kf.split(data)]
    assert testsets_a == testsets_b
    # test once again with another KFold instance
    kf = KFold(n_splits=5, shuffle=False)
    testsets_a = [testset for (_, testset) in kf.split(data)]
    assert testsets_a == testsets_b

    # We'll now shuffle b and check that folds are different.
    # (this is conditioned by seed setting at the beginning of file)
    kf = KFold(n_splits=5, random_state=None, shuffle=True)
    testsets_b = [testset for (_, testset) in kf.split(data)]
    assert testsets_a != testsets_b
    # test once again: two calls to kf.split make different splits when
    # random_state=None
    testsets_a = [testset for (_, testset) in kf.split(data)]
    assert testsets_a != testsets_b

    # Make sure that folds are the same when same KFold instance is used with
    # suffle is True but random_state is set to some value
    kf = KFold(n_splits=5, random_state=1, shuffle=True)
    testsets_a = [testset for (_, testset) in kf.split(data)]
    testsets_b = [testset for (_, testset) in kf.split(data)]
    assert testsets_a == testsets_b

    # Make sure raw ratings are not shuffled by KFold
    old_raw_ratings = copy(data.raw_ratings)
    kf = KFold(n_splits=5, shuffle=True)
    next(kf.split(data))
    assert old_raw_ratings == data.raw_ratings

    # Make sure kf.split() and the old data.split() have the same folds.
    np.random.seed(3)
    with pytest.warns(UserWarning):
        data.split(2, shuffle=True)
        testsets_a = [testset for (_, testset) in data.folds()]
    kf = KFold(n_splits=2, random_state=3, shuffle=True)
    testsets_b = [testset for (_, testset) in kf.split(data)]
Beispiel #4
0
def test_randomizedsearchcv_cv_results():
    """Test the cv_results attribute"""

    f = os.path.join(os.path.dirname(__file__), './u1_ml100k_test')
    data = Dataset.load_from_file(f, Reader('ml-100k'))
    kf = KFold(3, shuffle=True, random_state=4)
    param_distributions = {
        'n_epochs': [5],
        'lr_all': uniform(.2, .3),
        'reg_all': uniform(.4, .3),
        'n_factors': [5],
        'random_state': [0]
    }
    n_iter = 5
    rs = RandomizedSearchCV(SVD,
                            param_distributions,
                            n_iter=n_iter,
                            measures=['RMSE', 'mae'],
                            cv=kf,
                            return_train_measures=True)
    rs.fit(data)

    # test keys split*_test_rmse, mean and std dev.
    assert rs.cv_results['split0_test_rmse'].shape == (n_iter, )
    assert rs.cv_results['split1_test_rmse'].shape == (n_iter, )
    assert rs.cv_results['split2_test_rmse'].shape == (n_iter, )
    assert rs.cv_results['mean_test_rmse'].shape == (n_iter, )
    assert np.allclose(
        rs.cv_results['mean_test_rmse'],
        np.mean([
            rs.cv_results['split0_test_rmse'],
            rs.cv_results['split1_test_rmse'],
            rs.cv_results['split2_test_rmse']
        ],
                axis=0))
    assert np.allclose(
        rs.cv_results['std_test_rmse'],
        np.std([
            rs.cv_results['split0_test_rmse'],
            rs.cv_results['split1_test_rmse'],
            rs.cv_results['split2_test_rmse']
        ],
               axis=0))

    # test keys split*_train_mae, mean and std dev.
    assert rs.cv_results['split0_train_rmse'].shape == (n_iter, )
    assert rs.cv_results['split1_train_rmse'].shape == (n_iter, )
    assert rs.cv_results['split2_train_rmse'].shape == (n_iter, )
    assert rs.cv_results['mean_train_rmse'].shape == (n_iter, )
    assert np.allclose(
        rs.cv_results['mean_train_rmse'],
        np.mean([
            rs.cv_results['split0_train_rmse'],
            rs.cv_results['split1_train_rmse'],
            rs.cv_results['split2_train_rmse']
        ],
                axis=0))
    assert np.allclose(
        rs.cv_results['std_train_rmse'],
        np.std([
            rs.cv_results['split0_train_rmse'],
            rs.cv_results['split1_train_rmse'],
            rs.cv_results['split2_train_rmse']
        ],
               axis=0))

    # test fit and train times dimensions.
    assert rs.cv_results['mean_fit_time'].shape == (n_iter, )
    assert rs.cv_results['std_fit_time'].shape == (n_iter, )
    assert rs.cv_results['mean_test_time'].shape == (n_iter, )
    assert rs.cv_results['std_test_time'].shape == (n_iter, )

    assert rs.cv_results['params'] is rs.param_combinations

    # assert that best parameter in rs.cv_results['rank_test_measure'] is
    # indeed the best_param attribute.
    best_index = np.argmin(rs.cv_results['rank_test_rmse'])
    assert rs.cv_results['params'][best_index] == rs.best_params['rmse']
    best_index = np.argmin(rs.cv_results['rank_test_mae'])
    assert rs.cv_results['params'][best_index] == rs.best_params['mae']
Beispiel #5
0
'''Testing renaming of train() into fit()'''
import os

import pytest

from idly import Dataset
from idly import Reader
from idly import AlgoBase
from idly.model_selection import KFold


data_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_train')
data = Dataset.load_from_file(data_file, Reader('ml-100k'))
kf = KFold(n_splits=2)


def test_new_style_algo():
    '''Test that new algorithms (i.e. algoritms that only define fit()) can
    support both calls to fit() and to train()
    - algo.fit() is the new way of doing things
    - supporting algo.train() is needed for the (unlikely?) case where a user
    has defined custom tools that use algo.train().
    '''

    class CustomAlgoFit(AlgoBase):

        def __init__(self):
            AlgoBase.__init__(self)
            self.cnt = -1

        def fit(self, trainset):