Exemplo n.º 1
0
    def test_evaluate(self):
        ratio_split = RatioSplit(self.data,
                                 exclude_unknowns=True,
                                 verbose=True)
        ratio_split.evaluate(MF(), [MAE(), Recall()], user_based=False)

        ratio_split = RatioSplit(self.data,
                                 exclude_unknowns=False,
                                 verbose=True)
        ratio_split.evaluate(MF(), [MAE(), Recall()], user_based=False)

        users = []
        items = []
        for u, i, r in self.data:
            users.append(u)
            items.append(i)
        for u in users:
            for i in items:
                self.data.append((u, i, 5))

        ratio_split = RatioSplit(self.data,
                                 exclude_unknowns=True,
                                 verbose=True)
        ratio_split.evaluate(MF(), [MAE(), Recall()], user_based=True)

        ratio_split = RatioSplit(self.data,
                                 exclude_unknowns=False,
                                 verbose=True)
        ratio_split.evaluate(MF(), [MAE(), Recall()], user_based=True)
Exemplo n.º 2
0
    def test_mae(self):
        mae = MAE()

        self.assertEqual(mae.type, 'rating')
        self.assertEqual(mae.name, 'MAE')

        self.assertEqual(0, mae.compute(np.asarray([0]), np.asarray([0])))
        self.assertEqual(1, mae.compute(np.asarray([0, 1]), np.asarray([1, 0])))
        self.assertEqual(2, mae.compute(np.asarray([0, 1]), np.asarray([2, 3]), np.asarray([1, 3])))
Exemplo n.º 3
0
def test_mae():
    mae = MAE()

    assert mae.type == 'rating'
    assert mae.name == 'MAE'

    assert 0 == mae.compute(np.asarray([0]), np.asarray([0]))
    assert 1 == mae.compute(np.asarray([0, 1]), np.asarray([1, 0]))
    assert 2 == mae.compute(np.asarray([0, 1]), np.asarray([2, 3]),
                            np.asarray([1, 3]))
Exemplo n.º 4
0
def test_with_ratio_split():
    data_file = './tests/data.txt'
    data = Reader.read_uir_triplets(data_file)
    exp = Experiment(eval_method=RatioSplit(data, verbose=True),
                     models=[PMF(1, 0)],
                     metrics=[MAE(), RMSE(),
                              Recall(1), FMeasure(1)],
                     verbose=True)
    exp.run()

    assert (1, 4) == exp.avg_results.shape

    assert 1 == len(exp.user_results)
    assert 4 == len(exp.user_results['PMF'])
    assert 2 == len(exp.user_results['PMF']['MAE'])
    assert 2 == len(exp.user_results['PMF']['RMSE'])
    assert 2 == len(exp.user_results['PMF']['Recall@1'])
    assert 2 == len(exp.user_results['PMF']['F1@1'])

    try:
        Experiment(None, None, None)
    except ValueError:
        assert True

    try:
        Experiment(None, [PMF(1, 0)], None)
    except ValueError:
        assert True
Exemplo n.º 5
0
 def test_with_cross_validation(self):
     exp = Experiment(eval_method=CrossValidation(self.data),
                      models=[PMF(1, 0)],
                      metrics=[MAE(), RMSE(),
                               Recall(1),
                               FMeasure(1)],
                      verbose=True)
     exp.run()
Exemplo n.º 6
0
 def test_evaluate(self, n_strata=2):
     stra_eval_method = PropensityStratifiedEvaluation(
         data=self.ml_100k, val_size=0.1, n_strata=n_strata, rating_threshold=4.0, verbose=True
     )
     model = MF(k=1, max_iter=0)
     result = stra_eval_method.evaluate(
         model, metrics=[MAE(), AUC()], user_based=False
     )
     result.__str__()
Exemplo n.º 7
0
def test_with_cross_validation():
    data_file = './tests/data.txt'
    data = reader.read_uir(data_file)
    exp = Experiment(eval_method=CrossValidation(data),
                     models=[PMF(1, 0)],
                     metrics=[MAE(), RMSE(),
                              Recall(1), FMeasure(1)],
                     verbose=True)
    exp.run()
Exemplo n.º 8
0
    def test_organize_metrics(self):
        bm = BaseMethod()

        rating_metrics, ranking_metrics = bm._organize_metrics([MAE(), AUC()])
        self.assertEqual(len(rating_metrics), 1)  # MAE
        self.assertEqual(len(ranking_metrics), 1)  # AUC

        try:
            bm._organize_metrics(None)
        except ValueError:
            assert True
Exemplo n.º 9
0
def test_organize_metrics():
    from cornac.metrics import MAE, AUC

    bm = BaseMethod()

    rating_metrics, ranking_metrics = bm._organize_metrics([MAE(), AUC()])
    assert 1 == len(rating_metrics)  # MAE
    assert 1 == len(ranking_metrics)  # AUC

    try:
        bm._organize_metrics(None)
    except ValueError:
        assert True
Exemplo n.º 10
0
def test_evaluate():
    data_file = './tests/data.txt'
    data = reader.read_uir(data_file)

    ratio_split = RatioSplit(data, exclude_unknowns=True, verbose=True)
    ratio_split.evaluate(MF(), [MAE(), Recall()], user_based=False)

    ratio_split = RatioSplit(data, exclude_unknowns=False, verbose=True)
    ratio_split.evaluate(MF(), [MAE(), Recall()], user_based=False)

    users = []
    items = []
    for u, i, r in data:
        users.append(u)
        items.append(i)
    for u in users:
        for i in items:
            data.append((u, i, 5))

    ratio_split = RatioSplit(data, exclude_unknowns=True, verbose=True)
    ratio_split.evaluate(MF(), [MAE(), Recall()], user_based=True)

    ratio_split = RatioSplit(data, exclude_unknowns=False, verbose=True)
    ratio_split.evaluate(MF(), [MAE(), Recall()], user_based=True)
Exemplo n.º 11
0
    def test_with_ratio_split(self):
        exp = Experiment(eval_method=RatioSplit(self.data, verbose=True),
                         models=[PMF(1, 0)],
                         metrics=[MAE(), RMSE(),
                                  Recall(1),
                                  FMeasure(1)],
                         verbose=True)
        exp.run()

        try:
            Experiment(None, None, None)
        except ValueError:
            assert True

        try:
            Experiment(None, [PMF(1, 0)], None)
        except ValueError:
            assert True
Exemplo n.º 12
0
    def test_with_ratio_split(self):
        Experiment(eval_method=RatioSplit(
            self.data + [(self.data[0][0], self.data[1][1], 5.0)],
            exclude_unknowns=True,
            seed=123,
            verbose=True),
                   models=[PMF(1, 0)],
                   metrics=[MAE(), RMSE()],
                   verbose=True).run()

        try:
            Experiment(None, None, None)
        except ValueError:
            assert True

        try:
            Experiment(None, [PMF(1, 0)], None)
        except ValueError:
            assert True
Exemplo n.º 13
0
 def test_evaluate(self):
     data = Reader().read('./tests/data.txt')
     bm = BaseMethod.from_splits(train_data=data, test_data=data)
     model = MF(k=1, max_iter=0)
     result = bm.evaluate(model, metrics=[MAE()], user_based=False)
     result.__str__()
Exemplo n.º 14
0
rs = RatioSplit(data=ml_100k, test_size=0.2, rating_threshold=4.0, seed=123)

# initialize models, here we are comparing: Biased MF, PMF, and BPR
models = [
    MF(k=10,
       max_iter=25,
       learning_rate=0.01,
       lambda_reg=0.02,
       use_bias=True,
       seed=123),
    PMF(k=10, max_iter=100, learning_rate=0.001, lambda_reg=0.001, seed=123),
    BPR(k=10, max_iter=200, learning_rate=0.001, lambda_reg=0.01, seed=123),
]

# define metrics to evaluate the models
metrics = [
    MAE(),
    RMSE(),
    Precision(k=10),
    Recall(k=10),
    NDCG(k=10),
    AUC(),
    MAP()
]

# put it together in an experiment, voilà!
cornac.Experiment(eval_method=rs,
                  models=models,
                  metrics=metrics,
                  user_based=True).run()
Exemplo n.º 15
0
from cornac.eval_methods import BaseMethod
from cornac.models import MF
from cornac.metrics import MAE, RMSE
from cornac.utils import cache

# Download MovieLens 100K provided training and test splits
reader = Reader()
train_data = reader.read(
    cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.base'))
test_data = reader.read(
    cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.test'))

eval_method = BaseMethod.from_splits(train_data=train_data,
                                     test_data=test_data,
                                     exclude_unknowns=False,
                                     verbose=True)

mf = MF(k=10,
        max_iter=25,
        learning_rate=0.01,
        lambda_reg=0.02,
        use_bias=True,
        early_stop=True,
        verbose=True)

# Evaluation
result = eval_method.evaluate(model=mf,
                              metrics=[MAE(), RMSE()],
                              user_based=True)
print(result)
Exemplo n.º 16
0
from cornac.metrics import MAE, RMSE, Precision, Recall, NDCG, AUC, MAP

from cornac.eval_methods import PropensityStratifiedEvaluation
from cornac.experiment import Experiment

# Load the MovieLens 1M dataset
ml_dataset = cornac.datasets.movielens.load_feedback(variant="1M")

# Instantiate an instance of PropensityStratifiedEvaluation method
stra_eval_method = PropensityStratifiedEvaluation(
    data=ml_dataset,
    n_strata=2,  # number of strata
    rating_threshold=4.0,
    verbose=True)

# define the examined models
models = [
    WMF(k=10, seed=123),
    BPR(k=10, seed=123),
]

# define the metrics
metrics = [MAE(), RMSE(), Precision(k=10), Recall(k=10), NDCG(), AUC(), MAP()]

# run an experiment
exp_stra = Experiment(eval_method=stra_eval_method,
                      models=models,
                      metrics=metrics)

exp_stra.run()