Пример #1
0
def test_ndcg():
    ndcg = NDCG()

    assert ndcg.type == 'ranking'
    assert ndcg.name == 'NDCG@-1'

    assert 1 == ndcg.compute(np.asarray([1]), np.asarray([0]))

    ground_truth = np.asarray([1, 0, 1])  # [1, 3]
    rec_list = np.asarray([0, 2, 1])  # [1, 3, 2]
    assert 1 == ndcg.compute(ground_truth, rec_list)

    ndcg_2 = NDCG(k=2)
    assert ndcg_2.k == 2

    ground_truth = np.asarray([0, 0, 1])  # [3]
    rec_list = np.asarray([1, 2, 0])  # [2, 3, 1]
    assert 0.63 == float('{:.2f}'.format(ndcg_2.compute(
        ground_truth, rec_list)))
Пример #2
0
    def test_ndcg(self):
        ndcg = NDCG()

        self.assertEqual(ndcg.type, "ranking")
        self.assertEqual(ndcg.name, "NDCG@-1")

        self.assertEqual(1, ndcg.compute(np.asarray([1]), np.asarray([0])))

        ground_truth = np.asarray([1, 0, 1])  # [1, 3]
        rec_list = np.asarray([0, 2, 1])  # [1, 3, 2]
        self.assertEqual(1, ndcg.compute(ground_truth, rec_list))

        ndcg_2 = NDCG(k=2)
        self.assertEqual(ndcg_2.k, 2)

        ground_truth = np.asarray([0, 0, 1])  # [3]
        rec_list = np.asarray([1, 2, 0])  # [2, 3, 1]
        self.assertEqual(
            0.63, float("{:.2f}".format(ndcg_2.compute(ground_truth,
                                                       rec_list))))
Пример #3
0
    lambda_d=0.1,
    min_user_freq=2,
    max_iter=1000,
    trainable=True,
    verbose=True,
    init_params=params,
)

n_items = eval_method.train_set.num_items

k_1 = int(n_items / 100)
k_5 = int(n_items * 5 / 100)
k_10 = int(n_items * 10 / 100)

Experiment(
    eval_method,
    models=[model],
    metrics=[
        AUC(),
        Recall(k=k_1),
        Recall(k=k_5),
        Recall(k=k_10),
        NDCG(k=k_1),
        NDCG(k=k_5),
        NDCG(k=k_10),
    ],
    show_validation=True,
    save_dir="dist/toy/result",
    verbose=True,
).run()
Пример #4
0
rs = RatioSplit(data=ml_100k, test_size=0.2, rating_threshold=4.0, seed=123)

# initialize models, here we are comparing: Biased MF, PMF, and BPR
models = [
    MF(k=10,
       max_iter=25,
       learning_rate=0.01,
       lambda_reg=0.02,
       use_bias=True,
       seed=123),
    PMF(k=10, max_iter=100, learning_rate=0.001, lambda_reg=0.001, seed=123),
    BPR(k=10, max_iter=200, learning_rate=0.001, lambda_reg=0.01, seed=123),
]

# define metrics to evaluate the models
metrics = [
    MAE(),
    RMSE(),
    Precision(k=10),
    Recall(k=10),
    NDCG(k=10),
    AUC(),
    MAP()
]

# put it together in an experiment, voilà!
cornac.Experiment(eval_method=rs,
                  models=models,
                  metrics=metrics,
                  user_based=True).run()
Пример #5
0
eval_method = RatioSplit(data,
                         test_size=0.2,
                         rating_threshold=1.0,
                         sentiment=md,
                         exclude_unknowns=True,
                         verbose=True,
                         seed=123)

mter = MTER(n_user_factors=15,
            n_item_factors=15,
            n_aspect_factors=12,
            n_opinion_factors=12,
            n_bpr_samples=1000,
            n_element_samples=200,
            lambda_reg=0.1,
            lambda_bpr=5,
            n_epochs=2000,
            lr=0.1,
            verbose=True,
            seed=123)

exp = Experiment(
    eval_method=eval_method,
    models=[mter],
    metrics=[RMSE(), NDCG(k=10),
             NDCG(k=20),
             NDCG(k=50),
             NDCG(k=100)])

exp.run()
Пример #6
0
from cornac.metrics import MAE, RMSE, Precision, Recall, NDCG, AUC, MAP

from cornac.eval_methods import PropensityStratifiedEvaluation
from cornac.experiment import Experiment

# Load the MovieLens 1M dataset
ml_dataset = cornac.datasets.movielens.load_feedback(variant="1M")

# Instantiate an instance of PropensityStratifiedEvaluation method
stra_eval_method = PropensityStratifiedEvaluation(
    data=ml_dataset,
    n_strata=2,  # number of strata
    rating_threshold=4.0,
    verbose=True)

# define the examined models
models = [
    WMF(k=10, seed=123),
    BPR(k=10, seed=123),
]

# define the metrics
metrics = [MAE(), RMSE(), Precision(k=10), Recall(k=10), NDCG(), AUC(), MAP()]

# run an experiment
exp_stra = Experiment(eval_method=stra_eval_method,
                      models=models,
                      metrics=metrics)

exp_stra.run()