Beispiel #1
0
def test_randomizedsearchcv_best_estimator(u1_ml100k):
    """Ensure that the best estimator is the one that gives the best score (by
    re-running it)"""

    param_distributions = {
        'n_epochs': [5],
        'lr_all': uniform(0.002, 0.003),
        'reg_all': uniform(0.04, 0.02),
        'n_factors': [1],
        'init_std_dev': [0]
    }
    rs = RandomizedSearchCV(SVD,
                            param_distributions,
                            measures=['mae'],
                            cv=PredefinedKFold(),
                            joblib_verbose=100)
    rs.fit(u1_ml100k)
    best_estimator = rs.best_estimator['mae']

    # recompute MAE of best_estimator
    mae = cross_validate(best_estimator,
                         u1_ml100k,
                         measures=['MAE'],
                         cv=PredefinedKFold())['test_mae']

    assert mae == rs.best_score['mae']
Beispiel #2
0
def test_randomizedsearchcv_best_estimator():
    """Ensure that the best estimator is the one that gives the best score (by
    re-running it)"""
    train_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_train')
    test_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_test')
    data = Dataset.load_from_folds([(train_file, test_file)],
                                   Reader('ml-100k'))

    param_distributions = {
        'n_epochs': [5],
        'lr_all': uniform(0.002, 0.003),
        'reg_all': uniform(0.04, 0.02),
        'n_factors': [1],
        'init_std_dev': [0]
    }
    rs = RandomizedSearchCV(SVD,
                            param_distributions,
                            measures=['mae'],
                            cv=PredefinedKFold(),
                            joblib_verbose=100)
    rs.fit(data)
    best_estimator = rs.best_estimator['mae']

    # recompute MAE of best_estimator
    mae = cross_validate(best_estimator,
                         data,
                         measures=['MAE'],
                         cv=PredefinedKFold())['test_mae']

    assert mae == rs.best_score['mae']
Beispiel #3
0
def test_randomizedsearchcv_same_splits():
    """Ensure that all parameter combinations are tested on the same splits (we
    check their RMSE scores are the same once averaged over the splits, which
    should be enough). We use as much parallelism as possible."""

    data_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_test')
    data = Dataset.load_from_file(data_file, reader=Reader('ml-100k'),
                                  rating_scale=(1, 5))
    kf = KFold(3, shuffle=True, random_state=4)

    # all RMSE should be the same (as param combinations are the same)
    param_distributions = {'n_epochs': [5], 'lr_all': uniform(.2, 0),
                           'reg_all': uniform(.4, 0), 'n_factors': [5],
                           'random_state': [0]}
    rs = RandomizedSearchCV(SVD, param_distributions, measures=['RMSE'], cv=kf,
                            n_jobs=1)
    rs.fit(data)

    rmse_scores = [m for m in rs.cv_results['mean_test_rmse']]
    assert len(set(rmse_scores)) == 1  # assert rmse_scores are all equal

    # Note: actually, even when setting random_state=None in kf, the same folds
    # are used because we use product(param_comb, kf.split(...)). However, it's
    # needed to have the same folds when calling fit again:
    rs.fit(data)
    rmse_scores += [m for m in rs.cv_results['mean_test_rmse']]
    assert len(set(rmse_scores)) == 1  # assert rmse_scores are all equal
Beispiel #4
0
def test_randomizedsearchcv_cv_results():
    """Test the cv_results attribute"""

    f = os.path.join(os.path.dirname(__file__), './u1_ml100k_test')
    data = Dataset.load_from_file(f, Reader('ml-100k'))
    kf = KFold(3, shuffle=True, random_state=4)
    param_distributions = {'n_epochs': [5], 'lr_all': uniform(.2, .3),
                           'reg_all': uniform(.4, .3), 'n_factors': [5],
                           'random_state': [0]}
    n_iter = 5
    rs = RandomizedSearchCV(SVD, param_distributions, n_iter=n_iter,
                            measures=['RMSE', 'mae'], cv=kf,
                            return_train_measures=True)
    rs.fit(data)

    # test keys split*_test_rmse, mean and std dev.
    assert rs.cv_results['split0_test_rmse'].shape == (n_iter,)
    assert rs.cv_results['split1_test_rmse'].shape == (n_iter,)
    assert rs.cv_results['split2_test_rmse'].shape == (n_iter,)
    assert rs.cv_results['mean_test_rmse'].shape == (n_iter,)
    assert np.allclose(rs.cv_results['mean_test_rmse'],
                       np.mean([rs.cv_results['split0_test_rmse'],
                                rs.cv_results['split1_test_rmse'],
                                rs.cv_results['split2_test_rmse']], axis=0))
    assert np.allclose(rs.cv_results['std_test_rmse'],
                       np.std([rs.cv_results['split0_test_rmse'],
                               rs.cv_results['split1_test_rmse'],
                               rs.cv_results['split2_test_rmse']], axis=0))

    # test keys split*_train_mae, mean and std dev.
    assert rs.cv_results['split0_train_rmse'].shape == (n_iter,)
    assert rs.cv_results['split1_train_rmse'].shape == (n_iter,)
    assert rs.cv_results['split2_train_rmse'].shape == (n_iter,)
    assert rs.cv_results['mean_train_rmse'].shape == (n_iter,)
    assert np.allclose(rs.cv_results['mean_train_rmse'],
                       np.mean([rs.cv_results['split0_train_rmse'],
                                rs.cv_results['split1_train_rmse'],
                                rs.cv_results['split2_train_rmse']], axis=0))
    assert np.allclose(rs.cv_results['std_train_rmse'],
                       np.std([rs.cv_results['split0_train_rmse'],
                               rs.cv_results['split1_train_rmse'],
                               rs.cv_results['split2_train_rmse']], axis=0))

    # test fit and train times dimensions.
    assert rs.cv_results['mean_fit_time'].shape == (n_iter,)
    assert rs.cv_results['std_fit_time'].shape == (n_iter,)
    assert rs.cv_results['mean_test_time'].shape == (n_iter,)
    assert rs.cv_results['std_test_time'].shape == (n_iter,)

    assert rs.cv_results['params'] is rs.param_combinations

    # assert that best parameter in rs.cv_results['rank_test_measure'] is
    # indeed the best_param attribute.
    best_index = np.argmin(rs.cv_results['rank_test_rmse'])
    assert rs.cv_results['params'][best_index] == rs.best_params['rmse']
    best_index = np.argmin(rs.cv_results['rank_test_mae'])
    assert rs.cv_results['params'][best_index] == rs.best_params['mae']
Beispiel #5
0
def test_randomizedsearchcv_cv_results():
    """Test the cv_results attribute"""

    f = os.path.join(os.path.dirname(__file__), './u1_ml100k_test')
    data = Dataset.load_from_file(f, Reader('ml-100k'), rating_scale=(1, 5))
    kf = KFold(3, shuffle=True, random_state=4)
    param_distributions = {'n_epochs': [5], 'lr_all': uniform(.2, .3),
                           'reg_all': uniform(.4, .3), 'n_factors': [5],
                           'random_state': [0]}
    n_iter = 5
    rs = RandomizedSearchCV(SVD, param_distributions, n_iter=n_iter,
                            measures=['RMSE', 'mae'], cv=kf,
                            return_train_measures=True)
    rs.fit(data)

    # test keys split*_test_rmse, mean and std dev.
    assert rs.cv_results['split0_test_rmse'].shape == (n_iter,)
    assert rs.cv_results['split1_test_rmse'].shape == (n_iter,)
    assert rs.cv_results['split2_test_rmse'].shape == (n_iter,)
    assert rs.cv_results['mean_test_rmse'].shape == (n_iter,)
    assert np.allclose(rs.cv_results['mean_test_rmse'],
                       np.mean([rs.cv_results['split0_test_rmse'],
                                rs.cv_results['split1_test_rmse'],
                                rs.cv_results['split2_test_rmse']], axis=0))
    assert np.allclose(rs.cv_results['std_test_rmse'],
                       np.std([rs.cv_results['split0_test_rmse'],
                               rs.cv_results['split1_test_rmse'],
                               rs.cv_results['split2_test_rmse']], axis=0))

    # test keys split*_train_mae, mean and std dev.
    assert rs.cv_results['split0_train_rmse'].shape == (n_iter,)
    assert rs.cv_results['split1_train_rmse'].shape == (n_iter,)
    assert rs.cv_results['split2_train_rmse'].shape == (n_iter,)
    assert rs.cv_results['mean_train_rmse'].shape == (n_iter,)
    assert np.allclose(rs.cv_results['mean_train_rmse'],
                       np.mean([rs.cv_results['split0_train_rmse'],
                                rs.cv_results['split1_train_rmse'],
                                rs.cv_results['split2_train_rmse']], axis=0))
    assert np.allclose(rs.cv_results['std_train_rmse'],
                       np.std([rs.cv_results['split0_train_rmse'],
                               rs.cv_results['split1_train_rmse'],
                               rs.cv_results['split2_train_rmse']], axis=0))

    # test fit and train times dimensions.
    assert rs.cv_results['mean_fit_time'].shape == (n_iter,)
    assert rs.cv_results['std_fit_time'].shape == (n_iter,)
    assert rs.cv_results['mean_test_time'].shape == (n_iter,)
    assert rs.cv_results['std_test_time'].shape == (n_iter,)

    assert rs.cv_results['params'] is rs.param_combinations

    # assert that best parameter in rs.cv_results['rank_test_measure'] is
    # indeed the best_param attribute.
    best_index = np.argmin(rs.cv_results['rank_test_rmse'])
    assert rs.cv_results['params'][best_index] == rs.best_params['rmse']
    best_index = np.argmin(rs.cv_results['rank_test_mae'])
    assert rs.cv_results['params'][best_index] == rs.best_params['mae']
Beispiel #6
0
def hyperparameter_tuning(data):
    reader = Reader()
    data_rs = Dataset.load_from_df(data, reader)

    parameters = {
        'n_factors': [50, 100, 200],
        'n_epochs': [20, 40],
        'lr_all': [0.005, 0.001],
        'reg_all': [0.05, 0.02, 0.01]
    }
    rs = RandomizedSearchCV(SVD, parameters, measures=['rmse'], cv=5)
    rs.fit(data_rs)

    return rs.best_params['rmse'], rs.best_score
Beispiel #7
0
def test_randomizedsearchcv_best_estimator(u1_ml100k):
    """Ensure that the best estimator is the one that gives the best score (by
    re-running it)"""

    param_distributions = {'n_epochs': [5], 'lr_all': uniform(0.002, 0.003),
                           'reg_all': uniform(0.04, 0.02), 'n_factors': [1],
                           'init_std_dev': [0]}
    rs = RandomizedSearchCV(SVD, param_distributions, measures=['mae'],
                            cv=PredefinedKFold(), joblib_verbose=100)
    rs.fit(u1_ml100k)
    best_estimator = rs.best_estimator['mae']

    # recompute MAE of best_estimator
    mae = cross_validate(best_estimator, u1_ml100k, measures=['MAE'],
                         cv=PredefinedKFold())['test_mae']

    assert mae == rs.best_score['mae']
Beispiel #8
0
def test_randomizedsearchcv_parameter_combinations_with_distribution():
    """Ensure the parameter_combinations attribute populates correctly by
    checking its length."""
    param_distributions = {'bsl_options': {'method': ['als', 'sgd'],
                                           'reg': [1, 2]},
                           'k': randint(2, 4),  # min inclusive, max exclusive
                           'sim_options': {'name': ['msd', 'cosine'],
                                           'min_support': [1, 5],
                                           'user_based': [False]}
                           }
    rs = RandomizedSearchCV(SVD, param_distributions, n_iter=10)
    assert len(rs.param_combinations) == 10
Beispiel #9
0
def test_randomizedsearchcv_same_splits():
    """Ensure that all parameter combinations are tested on the same splits (we
    check their RMSE scores are the same once averaged over the splits, which
    should be enough). We use as much parallelism as possible."""

    data_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_test')
    data = Dataset.load_from_file(data_file, reader=Reader('ml-100k'))
    kf = KFold(3, shuffle=True, random_state=4)

    # all RMSE should be the same (as param combinations are the same)
    param_distributions = {'n_epochs': [5], 'lr_all': uniform(.2, 0),
                           'reg_all': uniform(.4, 0), 'n_factors': [5],
                           'random_state': [0]}
    rs = RandomizedSearchCV(SVD, param_distributions, measures=['RMSE'], cv=kf,
                            n_jobs=-1)
    rs.fit(data)

    rmse_scores = [m for m in rs.cv_results['mean_test_rmse']]
    assert len(set(rmse_scores)) == 1  # assert rmse_scores are all equal

    # Note: actually, even when setting random_state=None in kf, the same folds
    # are used because we use product(param_comb, kf.split(...)). However, it's
    # needed to have the same folds when calling fit again:
    rs.fit(data)
    rmse_scores += [m for m in rs.cv_results['mean_test_rmse']]
    assert len(set(rmse_scores)) == 1  # assert rmse_scores are all equal
Beispiel #10
0
def test_randomizedsearchcv_refit():
    """Test refit method of RandomizedSearchCV class."""

    data_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_test')
    data = Dataset.load_from_file(data_file, Reader('ml-100k'))

    param_distributions = {'n_epochs': [5], 'lr_all': uniform(0.002, 0.003),
                           'reg_all': uniform(0.4, 0.2), 'n_factors': [2]}

    # assert rs.fit() and rs.test will use best estimator for mae (first
    # appearing in measures)
    rs = RandomizedSearchCV(SVD, param_distributions, measures=['mae', 'rmse'],
                            cv=2, refit=True)
    rs.fit(data)
    rs_preds = rs.test(data.construct_testset(data.raw_ratings))
    mae_preds = rs.best_estimator['mae'].test(
        data.construct_testset(data.raw_ratings))
    assert rs_preds == mae_preds

    # assert rs.fit() and rs.test will use best estimator for rmse
    rs = RandomizedSearchCV(SVD, param_distributions, measures=['mae', 'rmse'],
                            cv=2, refit='rmse')
    rs.fit(data)
    rs_preds = rs.test(data.construct_testset(data.raw_ratings))
    rmse_preds = rs.best_estimator['rmse'].test(
        data.construct_testset(data.raw_ratings))
    assert rs_preds == rmse_preds
    # test that predict() can be called
    rs.predict(2, 4)

    # assert test() and predict() cannot be used when refit is false
    rs = RandomizedSearchCV(SVD, param_distributions, measures=['mae', 'rmse'],
                            cv=2, refit=False)
    rs.fit(data)
    with pytest.raises(ValueError):
        rs.test(data.construct_testset(data.raw_ratings))
    with pytest.raises(ValueError):
        rs.predict('1', '2')

    # test that error is raised if used with load_from_folds
    train_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_train')
    test_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_test')
    data = Dataset.load_from_folds([(train_file, test_file)],
                                   Reader('ml-100k'))
    rs = RandomizedSearchCV(SVD, param_distributions, measures=['mae', 'rmse'],
                            cv=2, refit=True)
    with pytest.raises(ValueError):
        rs.fit(data)
Beispiel #11
0
'''

current_algo = KNNBaseline

similarity_options = {
    'name': ['pearson_baseline'],  #it is recommended to use Pearson Baseline
    'user_based': [True, False]
}
parameters_distributions = {
    'k': np.arange(1, 60, 2),
    'min_k': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
    'sim_options': similarity_options
}
searchCV = RandomizedSearchCV(current_algo,
                              parameters_distributions,
                              n_iter=30,
                              measures=['rmse'],
                              n_jobs=12,
                              cv=5)
searchCV.fit(data)
end = datetime.datetime.now()
print(end - start, "\nEnd.....")
print(searchCV.best_params['rmse'])

#Second iteration
#0.8864
#{'k': 37, 'min_k': 11, 'sim_options': {'name': 'pearson_baseline', 'user_based': False}}
alg = KNNBaseline(k=37,
                  min_k=11,
                  sim_options={
                      'name': 'pearson_baseline',
                      'user_based': False
Beispiel #12
0
    "name": ["pearson", "msd", "cosine"],
    "min_support": [2, 4, 5],
    "user_based": [False, True],
}

print('\nRUNNING GRID SEARCH')
print('   "name": ["pearson", "msd", "cosine"]')
print('   "min_support": [2, 3, 4, 5]')
print('   "user_based": [False, True]\n')

param_grid = {"sim_options": sim_options}
start_time = time.time()

# GridSearchCV
gs = RandomizedSearchCV(KNNWithZScore,
                        param_grid,
                        measures=["rmse", "mae"],
                        cv=3)
gs.fit(training_data)

print()
print("RMSE:", gs.best_score["rmse"])
print(gs.best_params["rmse"])
print()
print("MAE:", gs.best_score["mae"])
print(gs.best_params["mae"])
print()
print("Time Taken: ", time.time() - start_time)
print()

# OUTPUT:
"""
Beispiel #13
0
def test_randomizedsearchcv_refit(u1_ml100k):
    """Test refit method of RandomizedSearchCV class."""

    data_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_test')
    data = Dataset.load_from_file(data_file, Reader('ml-100k'),
                                  rating_scale=(1, 5))

    param_distributions = {'n_epochs': [5], 'lr_all': uniform(0.002, 0.003),
                           'reg_all': uniform(0.4, 0.2), 'n_factors': [2]}

    # assert rs.fit() and rs.test will use best estimator for mae (first
    # appearing in measures)
    rs = RandomizedSearchCV(SVD, param_distributions, measures=['mae', 'rmse'],
                            cv=2, refit=True)
    rs.fit(data)
    rs_preds = rs.test(data.construct_testset(data.raw_ratings))
    mae_preds = rs.best_estimator['mae'].test(
        data.construct_testset(data.raw_ratings))
    assert rs_preds == mae_preds

    # assert rs.fit() and rs.test will use best estimator for rmse
    rs = RandomizedSearchCV(SVD, param_distributions, measures=['mae', 'rmse'],
                            cv=2, refit='rmse')
    rs.fit(data)
    rs_preds = rs.test(data.construct_testset(data.raw_ratings))
    rmse_preds = rs.best_estimator['rmse'].test(
        data.construct_testset(data.raw_ratings))
    assert rs_preds == rmse_preds
    # test that predict() can be called
    rs.predict(2, 4)

    # assert test() and predict() cannot be used when refit is false
    rs = RandomizedSearchCV(SVD, param_distributions, measures=['mae', 'rmse'],
                            cv=2, refit=False)
    rs.fit(data)
    with pytest.raises(ValueError):
        rs.test(data.construct_testset(data.raw_ratings))
    with pytest.raises(ValueError):
        rs.predict('1', '2')

    # test that error is raised if used with load_from_folds
    rs = RandomizedSearchCV(SVD, param_distributions, measures=['mae', 'rmse'],
                            cv=2, refit=True)
    with pytest.raises(ValueError):
        rs.fit(u1_ml100k)
    'learning_rate': [0.002, 0.005, 0.01],
    'n_epochs': [50, 100, 150],
    'reg': [0.01, 0.02, 0.05]
}  #We set the baseline predictor options options for the RandomizedSearch
grid_of_parameters = {
    'bsl_options': baseline_predictor_options,
    'k': np.arange(10, 50, 2),
    'min_k': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
    'sim_options': similarity_options
}  #The final grid of parameters for the Randomized search
kf = KFold(n_splits=5, random_state=0)  #We set five folds
CV = RandomizedSearchCV(
    KNNBaseline,
    param_distributions=grid_of_parameters,
    n_iter=50,
    measures=['rmse'],
    cv=kf,
    n_jobs=8,
    joblib_verbose=10000
)  #Hyperparameters tuning with randomized search using 8 threads
CV.fit(data)
end = datetime.datetime.now()
print(end - start)
# print(CV.best_params)
##BEST PARAMETERS
# {'rmse': {'bsl_options': {'method': 'sgd',
#    'learning_rate': 0.002,
#    'n_epochs': 100,
#    'reg': 0.05},
#   'k': 40,
#   'min_k': 12,
Beispiel #15
0
    def hyper_tune(self):
        """
        Use Surprises RandomizedSearchCV to tune SVD model hyperparameters.
        
        As recommended by https://surprise.readthedocs.io/en/stable/FAQ.html ,
        split the data set into an A and B set to allow for unbiased accuracy
        evaluation of the tuned parameters.
        
        RandomizedSearchCV is much faster than GridSearchCV when data set is
        not small.

        Returns
        -------
        algo : Tuned Surprise algorithm object
            Can be used to train and test.

        """
        tune_method = self.tune_method
        print('Tuning...')
        # Seperate data into A and B sets for unbiased accuracy evaluation
        raw_ratings = self.data_ml.raw_ratings
        # shuffle ratings
        random.shuffle(raw_ratings)
        # A = 90% of the data, B = 10% of the data
        threshold = int(.9 * len(raw_ratings))
        A_raw_ratings = raw_ratings[:threshold]
        B_raw_ratings = raw_ratings[threshold:]
        # make data_ml the set A
        data_ml = self.data_ml
        data_ml.raw_ratings = A_raw_ratings
        # search grid
        param_grid = {
            'n_factors': [50, 100, 150],
            'n_epochs': [30, 50, 70],
            'lr_all': [0.002, 0.005, 0.01],
            'reg_all': [0.02, 0.1, 0.4, 0.6]
        }
        gs = RandomizedSearchCV(SVD,
                                param_grid,
                                measures=['rmse', 'mae', 'fcp'],
                                cv=self.n_splits)
        # fit
        start_time = time.time()
        gs.fit(data_ml)
        search_time = time.time() - start_time
        print("Took {} seconds for search.".format(search_time))
        # best score
        print('Best score: ' + str(gs.best_score[tune_method]))
        # combination of parameters that gave the best score according to the tune_method
        print('Best params: ' + str(gs.best_params[tune_method]))

        # get resulting algorithm with tuned parameters
        algo = gs.best_estimator[tune_method]

        # retrain on the whole set A
        trainset = data_ml.build_full_trainset()
        algo.fit(trainset)

        # Compute biased accuracy on A
        predictions = algo.test(trainset.build_testset())
        print('Biased accuracy:')
        accuracy.rmse(predictions)
        accuracy.mae(predictions)
        accuracy.fcp(predictions)

        # Compute unbiased accuracy on B
        # make data_ml the set B
        testset = data_ml.construct_testset(B_raw_ratings)
        predictions = algo.test(testset)
        print('Unbiased accuracy:')
        accuracy.rmse(predictions)
        accuracy.mae(predictions)
        accuracy.fcp(predictions)

        return algo