def test_with_cross_validation(self): exp = Experiment(eval_method=CrossValidation(self.data), models=[PMF(1, 0)], metrics=[MAE(), RMSE(), Recall(1), FMeasure(1)], verbose=True) exp.run()
def test_with_cross_validation(): data_file = './tests/data.txt' data = reader.read_uir(data_file) exp = Experiment(eval_method=CrossValidation(data), models=[PMF(1, 0)], metrics=[MAE(), RMSE(), Recall(1), FMeasure(1)], verbose=True) exp.run()
def test_with_cross_validation(self): Experiment(eval_method=CrossValidation( self.data + [(self.data[0][0], self.data[1][1], 5.0)], exclude_unknowns=False, verbose=True), models=[PMF(1, 0)], metrics=[Recall(1), FMeasure(1)], verbose=True).run()
def test_with_ratio_split(self): Experiment(eval_method=RatioSplit( self.data + [(self.data[0][0], self.data[1][1], 5.0)], exclude_unknowns=True, seed=123, verbose=True), models=[PMF(1, 0)], metrics=[MAE(), RMSE()], verbose=True).run() try: Experiment(None, None, None) except ValueError: assert True try: Experiment(None, [PMF(1, 0)], None) except ValueError: assert True
def test_with_ratio_split(): data_file = './tests/data.txt' data = Reader.read_uir_triplets(data_file) exp = Experiment(eval_method=RatioSplit(data, verbose=True), models=[PMF(1, 0)], metrics=[MAE(), RMSE(), Recall(1), FMeasure(1)], verbose=True) exp.run() assert (1, 4) == exp.avg_results.shape assert 1 == len(exp.user_results) assert 4 == len(exp.user_results['PMF']) assert 2 == len(exp.user_results['PMF']['MAE']) assert 2 == len(exp.user_results['PMF']['RMSE']) assert 2 == len(exp.user_results['PMF']['Recall@1']) assert 2 == len(exp.user_results['PMF']['F1@1']) try: Experiment(None, None, None) except ValueError: assert True try: Experiment(None, [PMF(1, 0)], None) except ValueError: assert True
def test_with_ratio_split(self): exp = Experiment(eval_method=RatioSplit(self.data, verbose=True), models=[PMF(1, 0)], metrics=[MAE(), RMSE(), Recall(1), FMeasure(1)], verbose=True) exp.run() try: Experiment(None, None, None) except ValueError: assert True try: Experiment(None, [PMF(1, 0)], None) except ValueError: assert True
print('-------OPEN LOOP EVALUATION-------') # load the closed/open loop datasets ds_closed = yahoo_music.load_feedback(variant='closed_loop') ds_open = yahoo_music.load_feedback(variant='open_loop') # train on closed-loop dataset and evaluate on open loop (random) dataset eval_method = BaseMethod.from_splits(train_data=ds_closed, test_data=ds_open, rating_threshold=4.0, verbose=True) # run the experiment exp_open = Experiment(eval_method=eval_method, models=get_models(variant='large', dims=dims), metrics=get_metrics(variant='large'), verbose=True) exp_open.run() with open('../data/exp_open_yahoo.pkl', 'wb') as exp_file: pickle.dump(exp_open.result, exp_file) print('-------STRATIFIED EVALUATION-------') stra_eval_method = StratifiedEvaluation(data=ds_closed, n_strata=2, rating_threshold=4.0, verbose=True) # run the experiment