コード例 #1
0
    def test_evaluate(self):
        ratio_split = RatioSplit(self.data,
                                 exclude_unknowns=True,
                                 verbose=True)
        ratio_split.evaluate(MF(), [MAE(), Recall()], user_based=False)

        ratio_split = RatioSplit(self.data,
                                 exclude_unknowns=False,
                                 verbose=True)
        ratio_split.evaluate(MF(), [MAE(), Recall()], user_based=False)

        users = []
        items = []
        for u, i, r in self.data:
            users.append(u)
            items.append(i)
        for u in users:
            for i in items:
                self.data.append((u, i, 5))

        ratio_split = RatioSplit(self.data,
                                 exclude_unknowns=True,
                                 verbose=True)
        ratio_split.evaluate(MF(), [MAE(), Recall()], user_based=True)

        ratio_split = RatioSplit(self.data,
                                 exclude_unknowns=False,
                                 verbose=True)
        ratio_split.evaluate(MF(), [MAE(), Recall()], user_based=True)
コード例 #2
0
def test_with_ratio_split():
    data_file = './tests/data.txt'
    data = Reader.read_uir_triplets(data_file)
    exp = Experiment(eval_method=RatioSplit(data, verbose=True),
                     models=[PMF(1, 0)],
                     metrics=[MAE(), RMSE(),
                              Recall(1), FMeasure(1)],
                     verbose=True)
    exp.run()

    assert (1, 4) == exp.avg_results.shape

    assert 1 == len(exp.user_results)
    assert 4 == len(exp.user_results['PMF'])
    assert 2 == len(exp.user_results['PMF']['MAE'])
    assert 2 == len(exp.user_results['PMF']['RMSE'])
    assert 2 == len(exp.user_results['PMF']['Recall@1'])
    assert 2 == len(exp.user_results['PMF']['F1@1'])

    try:
        Experiment(None, None, None)
    except ValueError:
        assert True

    try:
        Experiment(None, [PMF(1, 0)], None)
    except ValueError:
        assert True
コード例 #3
0
 def test_with_cross_validation(self):
     exp = Experiment(eval_method=CrossValidation(self.data),
                      models=[PMF(1, 0)],
                      metrics=[MAE(), RMSE(),
                               Recall(1),
                               FMeasure(1)],
                      verbose=True)
     exp.run()
コード例 #4
0
 def test_with_cross_validation(self):
     Experiment(eval_method=CrossValidation(
         self.data + [(self.data[0][0], self.data[1][1], 5.0)],
         exclude_unknowns=False,
         verbose=True),
                models=[PMF(1, 0)],
                metrics=[Recall(1), FMeasure(1)],
                verbose=True).run()
コード例 #5
0
def test_with_cross_validation():
    data_file = './tests/data.txt'
    data = reader.read_uir(data_file)
    exp = Experiment(eval_method=CrossValidation(data),
                     models=[PMF(1, 0)],
                     metrics=[MAE(), RMSE(),
                              Recall(1), FMeasure(1)],
                     verbose=True)
    exp.run()
コード例 #6
0
def test_evaluate():
    data_file = './tests/data.txt'
    data = reader.read_uir(data_file)

    ratio_split = RatioSplit(data, exclude_unknowns=True, verbose=True)
    ratio_split.evaluate(MF(), [MAE(), Recall()], user_based=False)

    ratio_split = RatioSplit(data, exclude_unknowns=False, verbose=True)
    ratio_split.evaluate(MF(), [MAE(), Recall()], user_based=False)

    users = []
    items = []
    for u, i, r in data:
        users.append(u)
        items.append(i)
    for u in users:
        for i in items:
            data.append((u, i, 5))

    ratio_split = RatioSplit(data, exclude_unknowns=True, verbose=True)
    ratio_split.evaluate(MF(), [MAE(), Recall()], user_based=True)

    ratio_split = RatioSplit(data, exclude_unknowns=False, verbose=True)
    ratio_split.evaluate(MF(), [MAE(), Recall()], user_based=True)
コード例 #7
0
    def test_with_ratio_split(self):
        exp = Experiment(eval_method=RatioSplit(self.data, verbose=True),
                         models=[PMF(1, 0)],
                         metrics=[MAE(), RMSE(),
                                  Recall(1),
                                  FMeasure(1)],
                         verbose=True)
        exp.run()

        try:
            Experiment(None, None, None)
        except ValueError:
            assert True

        try:
            Experiment(None, [PMF(1, 0)], None)
        except ValueError:
            assert True
コード例 #8
0
    def test_recall(self):
        rec = Recall()

        self.assertEqual(rec.type, "ranking")
        self.assertEqual(rec.name, "Recall@-1")

        self.assertEqual(1, rec.compute(np.asarray([1]), np.asarray([0])))

        ground_truth = np.asarray([1, 0, 1])  # [1, 0, 1]
        rec_list = np.asarray([0, 2, 1])  # [1, 1, 1]
        self.assertEqual(1, rec.compute(ground_truth, rec_list))

        ground_truth = np.asarray([0, 0, 1])  # [0, 0, 1]
        rec_list = np.asarray([1, 2, 0])  # [1, 1, 1]
        self.assertEqual(1, rec.compute(ground_truth, rec_list))

        rec_2 = Recall(k=2)
        self.assertEqual(rec_2.k, 2)

        ground_truth = np.asarray([0, 0, 1])  # [0, 0, 1]
        rec_list = np.asarray([1, 2, 0])  # [1, 1, 1]
        self.assertEqual(1, rec_2.compute(ground_truth, rec_list))
コード例 #9
0
ファイル: test_ranking.py プロジェクト: Andrew-DungLe/cornac
def test_recall():
    rec = Recall()

    assert rec.type == 'ranking'
    assert rec.name == 'Recall@-1'

    assert 1 == rec.compute(np.asarray([1]), np.asarray([0]))

    ground_truth = np.asarray([1, 0, 1])  # [1, 0, 1]
    rec_list = np.asarray([0, 2, 1])  # [1, 1, 1]
    assert 1 == rec.compute(ground_truth, rec_list)

    ground_truth = np.asarray([0, 0, 1])  # [0, 0, 1]
    rec_list = np.asarray([1, 2, 0])  # [1, 1, 1]
    assert 1 == rec.compute(ground_truth, rec_list)

    rec_2 = Recall(k=2)
    assert rec_2.k == 2

    ground_truth = np.asarray([0, 0, 1])  # [0, 0, 1]
    rec_list = np.asarray([1, 2, 0])  # [1, 1, 1]
    assert 1 == rec_2.compute(ground_truth, rec_list)
コード例 #10
0
    lambda_d=0.1,
    min_user_freq=2,
    max_iter=1000,
    trainable=True,
    verbose=True,
    init_params=params,
)

n_items = eval_method.train_set.num_items

k_1 = int(n_items / 100)
k_5 = int(n_items * 5 / 100)
k_10 = int(n_items * 10 / 100)

Experiment(
    eval_method,
    models=[model],
    metrics=[
        AUC(),
        Recall(k=k_1),
        Recall(k=k_5),
        Recall(k=k_10),
        NDCG(k=k_1),
        NDCG(k=k_5),
        NDCG(k=k_10),
    ],
    show_validation=True,
    save_dir="dist/toy/result",
    verbose=True,
).run()
コード例 #11
0
ファイル: first_example.py プロジェクト: zshwuhan/cornac
rs = RatioSplit(data=ml_100k, test_size=0.2, rating_threshold=4.0, seed=123)

# initialize models, here we are comparing: Biased MF, PMF, and BPR
models = [
    MF(k=10,
       max_iter=25,
       learning_rate=0.01,
       lambda_reg=0.02,
       use_bias=True,
       seed=123),
    PMF(k=10, max_iter=100, learning_rate=0.001, lambda_reg=0.001, seed=123),
    BPR(k=10, max_iter=200, learning_rate=0.001, lambda_reg=0.01, seed=123),
]

# define metrics to evaluate the models
metrics = [
    MAE(),
    RMSE(),
    Precision(k=10),
    Recall(k=10),
    NDCG(k=10),
    AUC(),
    MAP()
]

# put it together in an experiment, voilà!
cornac.Experiment(eval_method=rs,
                  models=models,
                  metrics=metrics,
                  user_based=True).run()
コード例 #12
0
from cornac.metrics import MAE, RMSE, Precision, Recall, NDCG, AUC, MAP

from cornac.eval_methods import PropensityStratifiedEvaluation
from cornac.experiment import Experiment

# Load the MovieLens 1M dataset
ml_dataset = cornac.datasets.movielens.load_feedback(variant="1M")

# Instantiate an instance of PropensityStratifiedEvaluation method
stra_eval_method = PropensityStratifiedEvaluation(
    data=ml_dataset,
    n_strata=2,  # number of strata
    rating_threshold=4.0,
    verbose=True)

# define the examined models
models = [
    WMF(k=10, seed=123),
    BPR(k=10, seed=123),
]

# define the metrics
metrics = [MAE(), RMSE(), Precision(k=10), Recall(k=10), NDCG(), AUC(), MAP()]

# run an experiment
exp_stra = Experiment(eval_method=stra_eval_method,
                      models=models,
                      metrics=metrics)

exp_stra.run()