Пример #1
0
item_graph_module = GraphModule(data=contexts)

ratio_split = RatioSplit(data=ratings,
                         test_size=0.2,
                         rating_threshold=3.5,
                         shuffle=True,
                         exclude_unknowns=True,
                         verbose=True,
                         item_graph=item_graph_module)

pcrl = PCRL(k=100, z_dims=[300], max_iter=300, learning_rate=0.001)

# Evaluation metrics
nDgc = metrics.NDCG(k=-1)
rec = metrics.Recall(k=20)
pre = metrics.Precision(k=20)

# Instantiate and run your experiment
exp = Experiment(eval_method=ratio_split,
                 models=[pcrl],
                 metrics=[nDgc, rec, pre])
exp.run()
"""
Output:
     | NDCG@-1 | Recall@20 | Precision@20 | Train (s) | Test (s)
---- + ------- + --------- + ------------ + --------- + --------
pcrl |  0.1922 |    0.0862 |       0.0148 | 2591.4878 |   4.0957

*Results may change slightly from one run to another due to different random initial parameters
"""
Пример #2
0
    lambda_d=0.1,
    min_user_freq=2,
    max_iter=1000,
    trainable=True,
    verbose=True,
    init_params=params,
)

n_items = eval_method.train_set.num_items

k_1 = int(n_items / 100)
k_5 = int(n_items * 5 / 100)
k_10 = int(n_items * 10 / 100)

Experiment(
    eval_method,
    models=[model],
    metrics=[
        AUC(),
        Recall(k=k_1),
        Recall(k=k_5),
        Recall(k=k_10),
        NDCG(k=k_1),
        NDCG(k=k_5),
        NDCG(k=k_10),
    ],
    show_validation=True,
    save_dir="dist/toy/result",
    verbose=True,
).run()
Пример #3
0
ratio_split = RatioSplit(data=ratings,
                         test_size=0.2,
                         rating_threshold=3.5,
                         shuffle=True,
                         exclude_unknowns=True,
                         verbose=True,
                         item_graph=item_graph_module)

mcf = MCF(k=10, max_iter=40, learning_rate=0.001, verbose=True)

# Evaluation metrics
ndcg = metrics.NDCG(k=-1)
rmse = metrics.RMSE()
rec = metrics.Recall(k=20)
pre = metrics.Precision(k=20)

# Instantiate and run your experiment
exp = Experiment(eval_method=ratio_split,
                 models=[mcf],
                 metrics=[rmse, ndcg, rec, pre])
exp.run()
"""
Output:
    
    |   RMSE | NDCG@-1 | Recall@20 | Precision@20 | Train (s) | Test (s)
--- + ------ + ------- + --------- + ------------ + --------- + --------
MCF | 1.0854 |  0.1598 |    0.0348 |       0.0057 |    7.4057 |   4.1801

*Results may change from one run to another due to different random initial parameters
"""
Пример #4
0
    exclude_unknowns=True,
    verbose=True,
    user_graph=user_graph_modality,
    seed=123,
)

# Instantiate CVAECF model
cvaecf = CVAECF(z_dim=20,
                h_dim=20,
                autoencoder_structure=[40],
                learning_rate=0.001,
                n_epochs=70,
                batch_size=128,
                verbose=True,
                seed=123)

# Evaluation metrics
ndcg = metrics.NDCG(k=50)
rec = metrics.Recall(k=50)
pre = metrics.Precision(k=50)

# Put everything together into an experiment and run it
Experiment(eval_method=ratio_split, models=[cvaecf], metrics=[ndcg, pre,
                                                              rec]).run()
"""
Output:
       | NDCG@50 | Precision@50 | Recall@50 | Train (s) | Test (s)
------ + ------- + ------------ + --------- + --------- + --------
CVAECF |  0.4171 |       0.0781 |    0.8689 |   13.0752 |   1.4574
"""
Пример #5
0
# Define an evaluation method to split feedback into train and test sets
ratio_split = RatioSplit(data=ratings,
                         test_size=0.2,
                         rating_threshold=2.5,
                         exclude_unknowns=True,
                         verbose=True,
                         user_graph=user_graph_modality,
                         seed=123)

# Instantiate SoRec
sorec = SoRec(k=10, max_iter=50, learning_rate=0.001, verbose=False, seed=123)

# Evaluation metrics
ndcg = metrics.NDCG(k=-1)
rmse = metrics.RMSE()
rec = metrics.Recall(k=20)
pre = metrics.Precision(k=20)

# Put everything together into an experiment and run it
exp = Experiment(eval_method=ratio_split,
                 models=[sorec],
                 metrics=[rmse, ndcg, pre, rec])
exp.run()
"""
Output:
      |   RMSE | NDCG@-1 | Precision@20 | Recall@20 | Train (s) | Test (s)
----- + ------ + ------- + ------------ + --------- + --------- + --------
SoRec | 0.7574 |  0.3707 |       0.0756 |    0.3736 |    0.8198 |   0.8037
"""
Пример #6
0
# load office rating data in triplet format (uid, iid, rating)
office_ratings = np.loadtxt("path to office ratings")

# load office item context information in triplet format (item_id, context_iterm_id, value), see C2PF paper for details
office_context = np.loadtxt("path to office content data")

item_graph_module = GraphModule(data=office_context)

ratio_split = RatioSplit(data=office_ratings,
                         test_size=0.2,
                         rating_threshold=3.5,
                         shuffle=True,
                         exclude_unknowns=True,
                         verbose=True,
                         item_graph=item_graph_module)

rec_c2pf = C2PF(k=100, max_iter=80, variant='c2pf')

# Evaluation metrics
nDgc = metrics.NDCG(k=-1)
mrr = metrics.MRR()
rec = metrics.Recall(k=20)
pre = metrics.Precision(k=20)

# Instantiate and run your experiment
exp = Experiment(eval_method=ratio_split,
                 models=[rec_c2pf],
                 metrics=[nDgc, mrr, rec, pre])
exp.run()
Пример #7
0
ratings = office.load_feedback()
contexts = office.load_graph()

# Instantiate a GraphModality, it makes it convenient to work with graph (network) auxiliary information
# For more details, please refer to the tutorial on how to work with auxiliary data
item_graph_modality = GraphModality(data=contexts)

# Define an evaluation method to split feedback into train and test sets
ratio_split = RatioSplit(
    data=ratings,
    test_size=0.2,
    rating_threshold=3.5,
    exclude_unknowns=True,
    verbose=True,
    item_graph=item_graph_modality,
)

# Instantiate C2PF
c2pf = C2PF(k=100, max_iter=80, variant="c2pf")

# Evaluation metrics
ndcg = metrics.NDCG(k=-1)
mrr = metrics.MRR()
rec = metrics.Recall(k=20)
pre = metrics.Precision(k=20)

# Put everything together into an experiment and run it
Experiment(eval_method=ratio_split,
           models=[c2pf],
           metrics=[ndcg, mrr, rec, pre]).run()
Пример #8
0
from cornac.metrics import MAE, RMSE, Precision, Recall, NDCG, AUC, MAP

from cornac.eval_methods import PropensityStratifiedEvaluation
from cornac.experiment import Experiment

# Load the MovieLens 1M dataset
ml_dataset = cornac.datasets.movielens.load_feedback(variant="1M")

# Instantiate an instance of PropensityStratifiedEvaluation method
stra_eval_method = PropensityStratifiedEvaluation(
    data=ml_dataset,
    n_strata=2,  # number of strata
    rating_threshold=4.0,
    verbose=True)

# define the examined models
models = [
    WMF(k=10, seed=123),
    BPR(k=10, seed=123),
]

# define the metrics
metrics = [MAE(), RMSE(), Precision(k=10), Recall(k=10), NDCG(), AUC(), MAP()]

# run an experiment
exp_stra = Experiment(eval_method=stra_eval_method,
                      models=models,
                      metrics=metrics)

exp_stra.run()