Exemple #1
0
def test_with_ratio_split():
    data_file = './tests/data.txt'
    data = Reader.read_uir_triplets(data_file)
    exp = Experiment(eval_method=RatioSplit(data, verbose=True),
                     models=[PMF(1, 0)],
                     metrics=[MAE(), RMSE(),
                              Recall(1), FMeasure(1)],
                     verbose=True)
    exp.run()

    assert (1, 4) == exp.avg_results.shape

    assert 1 == len(exp.user_results)
    assert 4 == len(exp.user_results['PMF'])
    assert 2 == len(exp.user_results['PMF']['MAE'])
    assert 2 == len(exp.user_results['PMF']['RMSE'])
    assert 2 == len(exp.user_results['PMF']['Recall@1'])
    assert 2 == len(exp.user_results['PMF']['F1@1'])

    try:
        Experiment(None, None, None)
    except ValueError:
        assert True

    try:
        Experiment(None, [PMF(1, 0)], None)
    except ValueError:
        assert True
Exemple #2
0
 def test_with_cross_validation(self):
     exp = Experiment(eval_method=CrossValidation(self.data),
                      models=[PMF(1, 0)],
                      metrics=[MAE(), RMSE(),
                               Recall(1),
                               FMeasure(1)],
                      verbose=True)
     exp.run()
Exemple #3
0
 def test_with_cross_validation(self):
     Experiment(eval_method=CrossValidation(
         self.data + [(self.data[0][0], self.data[1][1], 5.0)],
         exclude_unknowns=False,
         verbose=True),
                models=[PMF(1, 0)],
                metrics=[Recall(1), FMeasure(1)],
                verbose=True).run()
def generateLatentVectors(mongohost, mongoport, cerebro_url, dim):
    mongodb_url = 'mongodb://' + mongohost + ':' + mongoport
    client = MongoClient(mongodb_url)
    cerebro = client.cerebro
    ratings = cerebro.ratings
    users = cerebro.users
    data = []
    i = 0
    for user in users.find():
        userid = user.pop("_id")
        records = ratings.find({"userID": userid})
        itemid = ""
        for record in records:
            if record['itemID'] == itemid:
                continue
            itemid = record['itemID']
            l = (userid, itemid, record['rating'])
            data.insert(i, l)
            i += 1

    ratio_split = RatioSplit(data=data,
                             test_size=0.01,
                             rating_threshold=4.0,
                             seed=5654)
    pmf = PMF(k=dim, max_iter=50, learning_rate=0.001)

    mae = cornac.metrics.MAE()
    rmse = cornac.metrics.RMSE()
    rec_10 = cornac.metrics.Recall(k=10)
    ndcg_10 = cornac.metrics.NDCG(k=10)
    auc = cornac.metrics.AUC()

    exp = Experiment(eval_method=ratio_split,
                     models=[pmf],
                     metrics=[mae, rmse, rec_10, ndcg_10, auc],
                     user_based=True)
    exp.run()

    userid = list(pmf.train_set.user_ids)
    itemid = list(pmf.train_set.item_ids)
    userVec = list(pmf.U)
    itemVec = list(pmf.V)
    print("userid len:" + str(len(userid)))
    print("uservec len:" + str(len(userVec)))
    print("itemid len:" + str(len(itemid)))
    print("itemVec len:" + str(len(itemVec)))
    for (id, vec) in zip(userid, userVec):
        vec = list(vec)
        users.update_one({"_id": id}, {"$set": {"vec": vec}})

    for (id, vec) in zip(itemid, itemVec):
        vec = list(vec)
        cerebro.items.update_one({"_id": id}, {"$set": {"vec": vec}})

    json_msg = {"msg": "update"}
    r = requests.post(url=cerebro_url + '/update/buildIdx', json=json_msg)
    print(r.text)
Exemple #5
0
    def test_with_ratio_split(self):
        exp = Experiment(eval_method=RatioSplit(self.data, verbose=True),
                         models=[PMF(1, 0)],
                         metrics=[MAE(), RMSE(),
                                  Recall(1),
                                  FMeasure(1)],
                         verbose=True)
        exp.run()

        try:
            Experiment(None, None, None)
        except ValueError:
            assert True

        try:
            Experiment(None, [PMF(1, 0)], None)
        except ValueError:
            assert True
def test_with_cross_validation():
    data_file = './tests/data.txt'
    data = reader.read_uir(data_file)
    exp = Experiment(eval_method=CrossValidation(data),
                     models=[PMF(1, 0)],
                     metrics=[MAE(), RMSE(),
                              Recall(1), FMeasure(1)],
                     verbose=True)
    exp.run()
Exemple #7
0
    def test_with_ratio_split(self):
        Experiment(eval_method=RatioSplit(
            self.data + [(self.data[0][0], self.data[1][1], 5.0)],
            exclude_unknowns=True,
            seed=123,
            verbose=True),
                   models=[PMF(1, 0)],
                   metrics=[MAE(), RMSE()],
                   verbose=True).run()

        try:
            Experiment(None, None, None)
        except ValueError:
            assert True

        try:
            Experiment(None, [PMF(1, 0)], None)
        except ValueError:
            assert True
Exemple #8
0
def select_model(user_input):
    if user_input["model"] == "pmf":
        model_selected = PMF(k=int(user_input["lf"]), max_iter=int(user_input["iteration"]), 
                learning_rate=float(user_input["lr"]), lamda=float(user_input["rp"]), 
                variant=user_input["variant"], verbose=True)
    elif user_input["model"] == "mf":
        model_selected = MF(k=int(user_input["lf"]), max_iter=int(user_input["iteration"]), 
                learning_rate=float(user_input["lr"]), lambda_reg=float(user_input["rp"]), 
                use_bias=True)
    elif user_input["model"] == "bpr":
        model_selected = BPR(k=int(user_input["lf"]), max_iter=int(user_input["iteration"]), 
                learning_rate=float(user_input["lr"]), lambda_reg=float(user_input["rp"]), verbose=True)
    return model_selected 
Exemple #9
0
from cornac.models import MF, PMF, BPR
from cornac.metrics import MAE, RMSE, Precision, Recall, NDCG, AUC, MAP

# load the built-in MovieLens 100K and split the data based on ratio
ml_100k = cornac.datasets.movielens.load_feedback()
rs = RatioSplit(data=ml_100k, test_size=0.2, rating_threshold=4.0, seed=123)

# initialize models, here we are comparing: Biased MF, PMF, and BPR
models = [
    MF(k=10,
       max_iter=25,
       learning_rate=0.01,
       lambda_reg=0.02,
       use_bias=True,
       seed=123),
    PMF(k=10, max_iter=100, learning_rate=0.001, lambda_reg=0.001, seed=123),
    BPR(k=10, max_iter=200, learning_rate=0.001, lambda_reg=0.01, seed=123),
]

# define metrics to evaluate the models
metrics = [
    MAE(),
    RMSE(),
    Precision(k=10),
    Recall(k=10),
    NDCG(k=10),
    AUC(),
    MAP()
]

# put it together in an experiment, voilà!
Exemple #10
0
import cornac
from cornac.datasets import movielens
from cornac.eval_methods import RatioSplit
from cornac.models import PMF

# Load the MovieLens 100K dataset
ml_100k = movielens.load_feedback()

# Instantiate an evaluation method.
ratio_split = RatioSplit(data=ml_100k,
                         test_size=0.2,
                         rating_threshold=4.0,
                         exclude_unknowns=False)

# Instantiate a PMF recommender model.
pmf = PMF(k=10, max_iter=100, learning_rate=0.001, lambda_reg=0.001)

# Instantiate evaluation metrics.
mae = cornac.metrics.MAE()
rmse = cornac.metrics.RMSE()
rec_20 = cornac.metrics.Recall(k=20)
pre_20 = cornac.metrics.Precision(k=20)

# Instantiate and then run an experiment.
cornac.Experiment(
    eval_method=ratio_split,
    models=[pmf],
    metrics=[mae, rmse, rec_20, pre_20],
    user_based=True,
).run()
Exemple #11
0
import cornac
from cornac.datasets import movielens
from cornac.eval_methods import RatioSplit
from cornac.models import PMF

# Load the MovieLens 100K dataset
ml_100k = movielens.load_100k()

# Instantiate an evaluation method.
ratio_split = RatioSplit(data=ml_100k,
                         test_size=0.2,
                         rating_threshold=4.0,
                         exclude_unknowns=False)

# Instantiate a PMF recommender model.
pmf = PMF(k=10, max_iter=100, learning_rate=0.001, lamda=0.001)

# Instantiate evaluation metrics.
mae = cornac.metrics.MAE()
rmse = cornac.metrics.RMSE()
rec_20 = cornac.metrics.Recall(k=20)
pre_20 = cornac.metrics.Precision(k=20)

# Instantiate and then run an experiment.
exp = cornac.Experiment(eval_method=ratio_split,
                        models=[pmf],
                        metrics=[mae, rmse, rec_20, pre_20],
                        user_based=True)
exp.run()