コード例 #1
0
    def test_evaluate(self):
        ratio_split = RatioSplit(self.data,
                                 exclude_unknowns=True,
                                 verbose=True)
        ratio_split.evaluate(MF(), [MAE(), Recall()], user_based=False)

        ratio_split = RatioSplit(self.data,
                                 exclude_unknowns=False,
                                 verbose=True)
        ratio_split.evaluate(MF(), [MAE(), Recall()], user_based=False)

        users = []
        items = []
        for u, i, r in self.data:
            users.append(u)
            items.append(i)
        for u in users:
            for i in items:
                self.data.append((u, i, 5))

        ratio_split = RatioSplit(self.data,
                                 exclude_unknowns=True,
                                 verbose=True)
        ratio_split.evaluate(MF(), [MAE(), Recall()], user_based=True)

        ratio_split = RatioSplit(self.data,
                                 exclude_unknowns=False,
                                 verbose=True)
        ratio_split.evaluate(MF(), [MAE(), Recall()], user_based=True)
コード例 #2
0
 def test_evaluate(self, n_strata=2):
     stra_eval_method = PropensityStratifiedEvaluation(
         data=self.ml_100k, val_size=0.1, n_strata=n_strata, rating_threshold=4.0, verbose=True
     )
     model = MF(k=1, max_iter=0)
     result = stra_eval_method.evaluate(
         model, metrics=[MAE(), AUC()], user_based=False
     )
     result.__str__()
コード例 #3
0
def select_model(user_input):
    if user_input["model"] == "pmf":
        model_selected = PMF(k=int(user_input["lf"]), max_iter=int(user_input["iteration"]), 
                learning_rate=float(user_input["lr"]), lamda=float(user_input["rp"]), 
                variant=user_input["variant"], verbose=True)
    elif user_input["model"] == "mf":
        model_selected = MF(k=int(user_input["lf"]), max_iter=int(user_input["iteration"]), 
                learning_rate=float(user_input["lr"]), lambda_reg=float(user_input["rp"]), 
                use_bias=True)
    elif user_input["model"] == "bpr":
        model_selected = BPR(k=int(user_input["lf"]), max_iter=int(user_input["iteration"]), 
                learning_rate=float(user_input["lr"]), lambda_reg=float(user_input["rp"]), verbose=True)
    return model_selected 
コード例 #4
0
def test_evaluate():
    data_file = './tests/data.txt'
    data = reader.read_uir(data_file)

    ratio_split = RatioSplit(data, exclude_unknowns=True, verbose=True)
    ratio_split.evaluate(MF(), [MAE(), Recall()], user_based=False)

    ratio_split = RatioSplit(data, exclude_unknowns=False, verbose=True)
    ratio_split.evaluate(MF(), [MAE(), Recall()], user_based=False)

    users = []
    items = []
    for u, i, r in data:
        users.append(u)
        items.append(i)
    for u in users:
        for i in items:
            data.append((u, i, 5))

    ratio_split = RatioSplit(data, exclude_unknowns=True, verbose=True)
    ratio_split.evaluate(MF(), [MAE(), Recall()], user_based=True)

    ratio_split = RatioSplit(data, exclude_unknowns=False, verbose=True)
    ratio_split.evaluate(MF(), [MAE(), Recall()], user_based=True)
コード例 #5
0
 def test_grid_search(self):
     model = MF(max_iter=1, verbose=True)
     metric = RMSE()
     gs_mf = GridSearch(
         model=model,
         space=[
             Discrete("k", [1, 2, 3]),
             Discrete("learning_rate", [0.1, 0.01])
         ],
         metric=metric,
         eval_method=self.eval_method,
     )
     Experiment(
         eval_method=self.eval_method,
         models=[gs_mf],
         metrics=[metric],
         user_based=False,
     ).run()
コード例 #6
0
 def test_evaluate(self):
     data = Reader().read('./tests/data.txt')
     bm = BaseMethod.from_splits(train_data=data, test_data=data)
     model = MF(k=1, max_iter=0)
     result = bm.evaluate(model, metrics=[MAE()], user_based=False)
     result.__str__()
コード例 #7
0
ファイル: first_example.py プロジェクト: zshwuhan/cornac
"""Your very first example with Cornac"""

import cornac
from cornac.eval_methods import RatioSplit
from cornac.models import MF, PMF, BPR
from cornac.metrics import MAE, RMSE, Precision, Recall, NDCG, AUC, MAP

# load the built-in MovieLens 100K and split the data based on ratio
ml_100k = cornac.datasets.movielens.load_feedback()
rs = RatioSplit(data=ml_100k, test_size=0.2, rating_threshold=4.0, seed=123)

# initialize models, here we are comparing: Biased MF, PMF, and BPR
models = [
    MF(k=10,
       max_iter=25,
       learning_rate=0.01,
       lambda_reg=0.02,
       use_bias=True,
       seed=123),
    PMF(k=10, max_iter=100, learning_rate=0.001, lambda_reg=0.001, seed=123),
    BPR(k=10, max_iter=200, learning_rate=0.001, lambda_reg=0.01, seed=123),
]

# define metrics to evaluate the models
metrics = [
    MAE(),
    RMSE(),
    Precision(k=10),
    Recall(k=10),
    NDCG(k=10),
    AUC(),
    MAP()
コード例 #8
0
from cornac.eval_methods import BaseMethod
from cornac.models import MF
from cornac.metrics import MAE, RMSE
from cornac.utils import cache

# Download MovieLens 100K provided training and test splits
reader = Reader()
train_data = reader.read(
    cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.base'))
test_data = reader.read(
    cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.test'))

eval_method = BaseMethod.from_splits(train_data=train_data,
                                     test_data=test_data,
                                     exclude_unknowns=False,
                                     verbose=True)

mf = MF(k=10,
        max_iter=25,
        learning_rate=0.01,
        lambda_reg=0.02,
        use_bias=True,
        early_stop=True,
        verbose=True)

# Evaluation
result = eval_method.evaluate(model=mf,
                              metrics=[MAE(), RMSE()],
                              user_based=True)
print(result)