def test_deprecated_way(): """Test all Dataset constructors without passing rating_scale as a parameter. Make sure we revert back to the Reader object, with a warning message. Also, make sure ValueError is raised if reader has no rating_scale in this context. Not using dataset fixtures here for more control. """ # test load_from_file toy_data_path = (os.path.dirname(os.path.realpath(__file__)) + '/custom_dataset') with pytest.warns(UserWarning): reader = Reader(line_format='user item rating', sep=' ', skip_lines=3, rating_scale=(1, 5)) data = Dataset.load_from_file(file_path=toy_data_path, reader=reader) with pytest.raises(ValueError): reader = Reader(line_format='user item rating', sep=' ', skip_lines=3, rating_scale=None) data = Dataset.load_from_file(file_path=toy_data_path, reader=reader) # test load_from_folds train_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_train') test_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_test') with pytest.warns(UserWarning): reader = Reader(line_format='user item rating timestamp', sep='\t', rating_scale=(1, 5)) data = Dataset.load_from_folds([(train_file, test_file)], reader=reader) with pytest.raises(ValueError): reader = Reader(line_format='user item rating timestamp', sep='\t', rating_scale=None) data = Dataset.load_from_folds([(train_file, test_file)], reader=reader) # test load_from_df ratings_dict = {'itemID': [1, 1, 1, 2, 2], 'userID': [9, 32, 2, 45, '10000'], 'rating': [3, 2, 4, 3, 1]} df = pd.DataFrame(ratings_dict) with pytest.warns(UserWarning): reader = Reader(rating_scale=(1, 5)) data = Dataset.load_from_df(df[['userID', 'itemID', 'rating']], reader=reader) with pytest.raises(ValueError): reader = Reader(rating_scale=None) data = Dataset.load_from_df(df[['userID', 'itemID', 'rating']], # noqa reader=reader)
def test_gridsearchcv_same_splits(): """Ensure that all parameter combinations are tested on the same splits (we check their RMSE scores are the same once averaged over the splits, which should be enough). We use as much parallelism as possible.""" data_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_test') data = Dataset.load_from_file(data_file, reader=Reader('ml-100k'), rating_scale=(1, 5)) kf = KFold(3, shuffle=True, random_state=4) # all RMSE should be the same (as param combinations are the same) param_grid = {'n_epochs': [5], 'lr_all': [.2, .2], 'reg_all': [.4, .4], 'n_factors': [5], 'random_state': [0]} gs = GridSearchCV(SVD, param_grid, measures=['RMSE'], cv=kf, n_jobs=1) gs.fit(data) rmse_scores = [m for m in gs.cv_results['mean_test_rmse']] assert len(set(rmse_scores)) == 1 # assert rmse_scores are all equal # Note: actually, even when setting random_state=None in kf, the same folds # are used because we use product(param_comb, kf.split(...)). However, it's # needed to have the same folds when calling fit again: gs.fit(data) rmse_scores += [m for m in gs.cv_results['mean_test_rmse']] assert len(set(rmse_scores)) == 1 # assert rmse_scores are all equal
def test_randomizedsearchcv_cv_results(): """Test the cv_results attribute""" f = os.path.join(os.path.dirname(__file__), './u1_ml100k_test') data = Dataset.load_from_file(f, Reader('ml-100k'), rating_scale=(1, 5)) kf = KFold(3, shuffle=True, random_state=4) param_distributions = {'n_epochs': [5], 'lr_all': uniform(.2, .3), 'reg_all': uniform(.4, .3), 'n_factors': [5], 'random_state': [0]} n_iter = 5 rs = RandomizedSearchCV(SVD, param_distributions, n_iter=n_iter, measures=['RMSE', 'mae'], cv=kf, return_train_measures=True) rs.fit(data) # test keys split*_test_rmse, mean and std dev. assert rs.cv_results['split0_test_rmse'].shape == (n_iter,) assert rs.cv_results['split1_test_rmse'].shape == (n_iter,) assert rs.cv_results['split2_test_rmse'].shape == (n_iter,) assert rs.cv_results['mean_test_rmse'].shape == (n_iter,) assert np.allclose(rs.cv_results['mean_test_rmse'], np.mean([rs.cv_results['split0_test_rmse'], rs.cv_results['split1_test_rmse'], rs.cv_results['split2_test_rmse']], axis=0)) assert np.allclose(rs.cv_results['std_test_rmse'], np.std([rs.cv_results['split0_test_rmse'], rs.cv_results['split1_test_rmse'], rs.cv_results['split2_test_rmse']], axis=0)) # test keys split*_train_mae, mean and std dev. assert rs.cv_results['split0_train_rmse'].shape == (n_iter,) assert rs.cv_results['split1_train_rmse'].shape == (n_iter,) assert rs.cv_results['split2_train_rmse'].shape == (n_iter,) assert rs.cv_results['mean_train_rmse'].shape == (n_iter,) assert np.allclose(rs.cv_results['mean_train_rmse'], np.mean([rs.cv_results['split0_train_rmse'], rs.cv_results['split1_train_rmse'], rs.cv_results['split2_train_rmse']], axis=0)) assert np.allclose(rs.cv_results['std_train_rmse'], np.std([rs.cv_results['split0_train_rmse'], rs.cv_results['split1_train_rmse'], rs.cv_results['split2_train_rmse']], axis=0)) # test fit and train times dimensions. assert rs.cv_results['mean_fit_time'].shape == (n_iter,) assert rs.cv_results['std_fit_time'].shape == (n_iter,) assert rs.cv_results['mean_test_time'].shape == (n_iter,) assert rs.cv_results['std_test_time'].shape == (n_iter,) assert rs.cv_results['params'] is rs.param_combinations # assert that best parameter in rs.cv_results['rank_test_measure'] is # indeed the best_param attribute. best_index = np.argmin(rs.cv_results['rank_test_rmse']) assert rs.cv_results['params'][best_index] == rs.best_params['rmse'] best_index = np.argmin(rs.cv_results['rank_test_mae']) assert rs.cv_results['params'][best_index] == rs.best_params['mae']
def small_ml(): """Return a Dataset object with 2000 movielens-100k ratings. """ data_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_test') data = Dataset.load_from_file(data_file, Reader('ml-100k'), rating_scale=(1, 5)) return data
def toy_data(toy_data_reader): toy_data_path = (os.path.dirname(os.path.realpath(__file__)) + '/custom_dataset') data = Dataset.load_from_file(file_path=toy_data_path, reader=toy_data_reader, rating_scale=(1, 5)) return data
def test_nearest_neighbors(): """Ensure the nearest neighbors are different when using user-user similarity vs item-item.""" reader = Reader(line_format='user item rating', sep=' ', skip_lines=3) data_file = os.path.dirname(os.path.realpath(__file__)) + '/custom_train' data = Dataset.load_from_file(data_file, reader, rating_scale=(1, 5)) trainset = data.build_full_trainset() algo_ub = KNNBasic(sim_options={'user_based': True}) algo_ub.fit(trainset) algo_ib = KNNBasic(sim_options={'user_based': False}) algo_ib.fit(trainset) assert algo_ub.get_neighbors(0, k=10) != algo_ib.get_neighbors(0, k=10)
def test_randomizedsearchcv_refit(u1_ml100k): """Test refit method of RandomizedSearchCV class.""" data_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_test') data = Dataset.load_from_file(data_file, Reader('ml-100k'), rating_scale=(1, 5)) param_distributions = {'n_epochs': [5], 'lr_all': uniform(0.002, 0.003), 'reg_all': uniform(0.4, 0.2), 'n_factors': [2]} # assert rs.fit() and rs.test will use best estimator for mae (first # appearing in measures) rs = RandomizedSearchCV(SVD, param_distributions, measures=['mae', 'rmse'], cv=2, refit=True) rs.fit(data) rs_preds = rs.test(data.construct_testset(data.raw_ratings)) mae_preds = rs.best_estimator['mae'].test( data.construct_testset(data.raw_ratings)) assert rs_preds == mae_preds # assert rs.fit() and rs.test will use best estimator for rmse rs = RandomizedSearchCV(SVD, param_distributions, measures=['mae', 'rmse'], cv=2, refit='rmse') rs.fit(data) rs_preds = rs.test(data.construct_testset(data.raw_ratings)) rmse_preds = rs.best_estimator['rmse'].test( data.construct_testset(data.raw_ratings)) assert rs_preds == rmse_preds # test that predict() can be called rs.predict(2, 4) # assert test() and predict() cannot be used when refit is false rs = RandomizedSearchCV(SVD, param_distributions, measures=['mae', 'rmse'], cv=2, refit=False) rs.fit(data) with pytest.raises(ValueError): rs.test(data.construct_testset(data.raw_ratings)) with pytest.raises(ValueError): rs.predict('1', '2') # test that error is raised if used with load_from_folds rs = RandomizedSearchCV(SVD, param_distributions, measures=['mae', 'rmse'], cv=2, refit=True) with pytest.raises(ValueError): rs.fit(u1_ml100k)
def test_gridsearchcv_refit(u1_ml100k): """Test refit function of GridSearchCV.""" data_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_test') data = Dataset.load_from_file(data_file, Reader('ml-100k'), rating_scale=(1, 5)) param_grid = {'n_epochs': [5], 'lr_all': [0.002, 0.005], 'reg_all': [0.4, 0.6], 'n_factors': [2]} # assert gs.fit() and gs.test will use best estimator for mae (first # appearing in measures) gs = GridSearchCV(SVD, param_grid, measures=['mae', 'rmse'], cv=2, refit=True) gs.fit(data) gs_preds = gs.test(data.construct_testset(data.raw_ratings)) mae_preds = gs.best_estimator['mae'].test( data.construct_testset(data.raw_ratings)) assert gs_preds == mae_preds # assert gs.fit() and gs.test will use best estimator for rmse gs = GridSearchCV(SVD, param_grid, measures=['mae', 'rmse'], cv=2, refit='rmse') gs.fit(data) gs_preds = gs.test(data.construct_testset(data.raw_ratings)) rmse_preds = gs.best_estimator['rmse'].test( data.construct_testset(data.raw_ratings)) assert gs_preds == rmse_preds # test that predict() can be called gs.predict(2, 4) # assert test() and predict() cannot be used when refit is false gs = GridSearchCV(SVD, param_grid, measures=['mae', 'rmse'], cv=2, refit=False) gs.fit(data) with pytest.raises(ValueError): gs_preds = gs.test(data.construct_testset(data.raw_ratings)) with pytest.raises(ValueError): gs.predict('1', '2') # test that error is raised if used with load_from_folds gs = GridSearchCV(SVD, param_grid, measures=['mae', 'rmse'], cv=2, refit=True) with pytest.raises(ValueError): gs.fit(u1_ml100k)
def test_LeaveOneOut(toy_data): loo = LeaveOneOut() with pytest.raises(ValueError): next(loo.split(toy_data)) # each user only has 1 item so trainsets fail reader = Reader('ml-100k') data_path = (os.path.dirname(os.path.realpath(__file__)) + '/u1_ml100k_test') data = Dataset.load_from_file(file_path=data_path, reader=reader, rating_scale=(1, 5)) # Test random_state parameter # If random_state is None, you get different split each time (conditioned # by rng of course) loo = LeaveOneOut(random_state=None) testsets_a = [testset for (_, testset) in loo.split(data)] testsets_b = [testset for (_, testset) in loo.split(data)] assert testsets_a != testsets_b # Repeated called to split when random_state is set lead to the same folds loo = LeaveOneOut(random_state=1) testsets_a = [testset for (_, testset) in loo.split(data)] testsets_b = [testset for (_, testset) in loo.split(data)] assert testsets_a == testsets_b # Make sure only one rating per user is present in the testset loo = LeaveOneOut() for _, testset in loo.split(data): cnt = Counter([uid for (uid, _, _) in testset]) assert all(val == 1 for val in itervalues(cnt)) # test the min_n_ratings parameter loo = LeaveOneOut(min_n_ratings=5) for trainset, _ in loo.split(data): assert all(len(ratings) >= 5 for ratings in itervalues(trainset.ur)) loo = LeaveOneOut(min_n_ratings=10) for trainset, _ in loo.split(data): assert all(len(ratings) >= 10 for ratings in itervalues(trainset.ur)) loo = LeaveOneOut(min_n_ratings=10000) # too high with pytest.raises(ValueError): next(loo.split(data))
""" This module descibes how to load a custom dataset from a single file. As a custom dataset we will actually use the movielens-100k dataset, but act as if it were not built-in. """ from __future__ import (absolute_import, division, print_function, unicode_literals) import os from amaze import BaselineOnly from amaze import Dataset from amaze import Reader from amaze.model_selection import cross_validate # path to dataset file file_path = os.path.expanduser('~/.amaze_data/ml-100k/ml-100k/u.data') # As we're loading a custom dataset, we need to define a reader. In the # movielens-100k dataset, each line has the following format: # 'user item rating timestamp', separated by '\t' characters. reader = Reader(line_format='user item rating timestamp', sep='\t') data = Dataset.load_from_file(file_path, reader=reader, rating_scale=(1, 5)) # We can now use this dataset as we please, e.g. calling cross_validate cross_validate(BaselineOnly(), data, verbose=True)