コード例 #1
0
 def test_evaluate(self, n_strata=2):
     stra_eval_method = PropensityStratifiedEvaluation(
         data=self.ml_100k, val_size=0.1, n_strata=n_strata, rating_threshold=4.0, verbose=True
     )
     model = MF(k=1, max_iter=0)
     result = stra_eval_method.evaluate(
         model, metrics=[MAE(), AUC()], user_based=False
     )
     result.__str__()
コード例 #2
0
    def test_organize_metrics(self):
        bm = BaseMethod()

        rating_metrics, ranking_metrics = bm._organize_metrics([MAE(), AUC()])
        self.assertEqual(len(rating_metrics), 1)  # MAE
        self.assertEqual(len(ranking_metrics), 1)  # AUC

        try:
            bm._organize_metrics(None)
        except ValueError:
            assert True
コード例 #3
0
    def test_auc(self):
        auc = AUC()

        self.assertEqual(auc.type, "ranking")
        self.assertEqual(auc.name, "AUC")

        gt_pos = np.array([0, 0, 1, 1])
        pd_scores = np.array([0.1, 0.4, 0.35, 0.8])
        auc_score = auc.compute(pd_scores, gt_pos)
        self.assertEqual(0.75, auc_score)

        gt_pos = np.array([0, 1, 0, 1])
        pd_scores = np.array([0.1, 0.4, 0.35, 0.8])
        auc_score = auc.compute(pd_scores, gt_pos)
        self.assertEqual(1.0, auc_score)

        gt_pos = np.array([0, 0, 1, 0])
        gt_neg = np.array([1, 1, 0, 0])
        pd_scores = np.array([0.1, 0.4, 0.35, 0.8])
        auc_score = auc.compute(pd_scores, gt_pos, gt_neg)
        self.assertEqual(0.5, auc_score)
コード例 #4
0
ファイル: test_ranking.py プロジェクト: GalLellouche/cornac
def test_auc():
    auc = AUC()

    assert auc.type == 'ranking'
    assert auc.name == 'AUC'

    gt_pos = np.array([0, 0, 1, 1])
    pd_scores = np.array([0.1, 0.4, 0.35, 0.8])
    auc_score = auc.compute(pd_scores, gt_pos)
    assert 0.75 == auc_score

    gt_pos = np.array([0, 1, 0, 1])
    pd_scores = np.array([0.1, 0.4, 0.35, 0.8])
    auc_score = auc.compute(pd_scores, gt_pos)
    assert 1.0 == auc_score

    gt_pos = np.array([0, 0, 1, 0])
    gt_neg = np.array([1, 1, 0, 0])
    pd_scores = np.array([0.1, 0.4, 0.35, 0.8])
    auc_score = auc.compute(pd_scores, gt_pos, gt_neg)
    assert 0.5 == auc_score
コード例 #5
0
def test_organize_metrics():
    from cornac.metrics import MAE, AUC

    bm = BaseMethod()

    rating_metrics, ranking_metrics = bm._organize_metrics([MAE(), AUC()])
    assert 1 == len(rating_metrics)  # MAE
    assert 1 == len(ranking_metrics)  # AUC

    try:
        bm._organize_metrics(None)
    except ValueError:
        assert True
コード例 #6
0
 def test_random_search(self):
     model = BPR(max_iter=1, verbose=True)
     metric = AUC()
     rs_bpr = RandomSearch(
         model=model,
         space=[
             Discrete("k", [1, 2, 3]),
             Continuous("learning_rate", low=0.01, high=0.1),
         ],
         metric=metric,
         eval_method=self.eval_method,
     )
     Experiment(
         eval_method=self.eval_method,
         models=[rs_bpr],
         metrics=[metric],
         user_based=False,
     ).run()
コード例 #7
0
    lambda_d=0.1,
    min_user_freq=2,
    max_iter=1000,
    trainable=True,
    verbose=True,
    init_params=params,
)

n_items = eval_method.train_set.num_items

k_1 = int(n_items / 100)
k_5 = int(n_items * 5 / 100)
k_10 = int(n_items * 10 / 100)

Experiment(
    eval_method,
    models=[model],
    metrics=[
        AUC(),
        Recall(k=k_1),
        Recall(k=k_5),
        Recall(k=k_10),
        NDCG(k=k_1),
        NDCG(k=k_5),
        NDCG(k=k_10),
    ],
    show_validation=True,
    save_dir="dist/toy/result",
    verbose=True,
).run()
コード例 #8
0
from cornac.metrics import MAE, RMSE, Precision, Recall, NDCG, AUC, MAP

from cornac.eval_methods import PropensityStratifiedEvaluation
from cornac.experiment import Experiment

# Load the MovieLens 1M dataset
ml_dataset = cornac.datasets.movielens.load_feedback(variant="1M")

# Instantiate an instance of PropensityStratifiedEvaluation method
stra_eval_method = PropensityStratifiedEvaluation(
    data=ml_dataset,
    n_strata=2,  # number of strata
    rating_threshold=4.0,
    verbose=True)

# define the examined models
models = [
    WMF(k=10, seed=123),
    BPR(k=10, seed=123),
]

# define the metrics
metrics = [MAE(), RMSE(), Precision(k=10), Recall(k=10), NDCG(), AUC(), MAP()]

# run an experiment
exp_stra = Experiment(eval_method=stra_eval_method,
                      models=models,
                      metrics=metrics)

exp_stra.run()