예제 #1
0
def test_ndcg_0():
    assert NDCG(strong_relevancy=True)([1, 3, 2, 6, 5, 4],
                                       relevancies={
                                           1: 5,
                                           2: 3,
                                           3: 4,
                                           4: 0,
                                           5: 1,
                                           6: 2
                                       }) == 1.0
예제 #2
0
def test_ndcg_8():
    assert round(
        NDCG(strong_relevancy=False)([1, 3, 2, 6, 5, 4],
                                     relevancies={
                                         1: 0,
                                         2: 3,
                                         3: 4,
                                         4: 5,
                                         5: 1,
                                         6: 2
                                     }), 2) == 0.69
예제 #3
0
def test_ndcg_3():
    assert round(
        NDCG(strong_relevancy=False)([6],
                                     relevancies={
                                         1: 5,
                                         2: 3,
                                         3: 4,
                                         4: 0,
                                         5: 1,
                                         6: 2
                                     }), 2) == 0.19
예제 #4
0
def test_ndcg_9():
    assert round(
        NDCG(strong_relevancy=True)([1, 3, 2, 6, 5, 4],
                                    relevancies={
                                        1: 0,
                                        2: 3,
                                        3: 4,
                                        4: 5,
                                        5: 1,
                                        6: 2
                                    },
                                    k=3), 2) == 0.29
예제 #5
0
def test_recommendation_evaluation_15(model, interactions_ds):
    """Evaluation with invalid number of k (< 0)."""
    try:
        recommendation_evaluation(model,
                                  interactions_ds[1],
                                  cn_test_users=None,
                                  k=-1,
                                  n_pos_interactions=None,
                                  novelty=False,
                                  metrics=[NDCG(), HitRatio()],
                                  verbose=False)
        assert False
    except Exception as e:
        assert str(e) == 'k (-1) should be > 0.'
예제 #6
0
def test_ranking_evaluation_11(model, interactions_ds):
    """Evaluation with custom metrics."""
    assert ranking_evaluation(model,
                              interactions_ds[1],
                              n_test_users=None,
                              k=2,
                              n_pos_interactions=None,
                              n_neg_interactions=None,
                              generate_negative_pairs=False,
                              novelty=False,
                              metrics=[HitRatio(), NDCG()]) == {
                                  'HitRatio@2': 0.3137,
                                  'NDCG@2': 0.4093
                              }
예제 #7
0
def test_recommendation_evaluation_13(model, interactions_ds):
    """Evaluation with invalid number of positive interactions (< 0)."""
    try:
        recommendation_evaluation(model,
                                  interactions_ds[1],
                                  cn_test_users=None,
                                  k=[1, 2],
                                  n_pos_interactions=-1,
                                  novelty=False,
                                  metrics=[NDCG(), HitRatio()],
                                  verbose=False)
        assert False
    except Exception as e:
        assert str(
            e
        ) == 'The number of positive interactions (-1) should be None or an integer > 0.'
예제 #8
0
def test_ranking_evaluation_21(model, interactions_ds):
    """Evaluation with invalid number of k (< 0)."""
    try:
        ranking_evaluation(model,
                           interactions_ds[1],
                           n_test_users=None,
                           k=-1,
                           n_pos_interactions=None,
                           n_neg_interactions=None,
                           generate_negative_pairs=False,
                           novelty=False,
                           metrics=[HitRatio(), NDCG()],
                           verbose=False)
        assert False
    except Exception as e:
        assert str(e) == 'k (-1) should be > 0.'
예제 #9
0
def test_ranking_evaluation_17(model, interactions_ds):
    """Evaluation with invalid number of negative interactions (0)."""
    try:
        ranking_evaluation(model,
                           interactions_ds[1],
                           n_test_users=None,
                           k=[1, 2],
                           n_pos_interactions=None,
                           n_neg_interactions=0,
                           generate_negative_pairs=False,
                           novelty=False,
                           metrics=[HitRatio(), NDCG()],
                           verbose=False)
        assert False
    except Exception as e:
        assert str(
            e
        ) == 'The number of negative interactions (0) should be None or an integer > 0.'
예제 #10
0
def test_ranking_evaluation_19(model, interactions_ds):
    """Evaluation with invalid combination of generate_negative_pairs and n_neg_interactions
    (generate_negative_pairs without a set value of n_neg_interactions)."""
    try:
        ranking_evaluation(model,
                           interactions_ds[1],
                           n_test_users=None,
                           k=[1, 2],
                           n_pos_interactions=None,
                           n_neg_interactions=None,
                           generate_negative_pairs=True,
                           novelty=False,
                           metrics=[HitRatio(), NDCG()],
                           verbose=False)
        assert False
    except Exception as e:
        assert str(e) == 'Cannot generate negative interaction pairs when the number of negative interactions per ' \
                         'user is not defined. Either set generate_negative_pairs=False or define the ' \
                         'n_neg_interactions parameter.'
예제 #11
0
from DRecPy.Evaluation.Metrics import Precision
from DRecPy.Evaluation.Metrics import Recall
from DRecPy.Evaluation.Metrics import NDCG

ds = get_full_dataset('ml-100k')

ds_train, ds_test = matrix_split(ds, user_test_ratio=0.2, item_test_ratio=0.2, seed=0, verbose=False)

# cosine sim
knn = UserKNN(k=10, m=0, sim_metric='cosine_cf', shrinkage=None, seed=15, use_averages=False, verbose=True)
knn.fit(ds_train)

evaluation = ranking_evaluation(knn, ds_test, interaction_threshold=5, k=list(range(1, 11)),
                                generate_negative_pairs=False, n_pos_interactions=None,
                                n_neg_interactions=None, seed=15, verbose=True,
                                metrics=[Precision(), Recall(), NDCG()])
print('cosine sim', evaluation)

# jaccard sim
knn = UserKNN(k=10, m=0, sim_metric='jaccard', shrinkage=None, seed=15, use_averages=False, verbose=True)
knn.fit(ds_train)

evaluation = ranking_evaluation(knn, ds_test, interaction_threshold=5, k=list(range(1, 11)),
                                generate_negative_pairs=False, n_pos_interactions=None,
                                n_neg_interactions=None, seed=15, verbose=True,
                                metrics=[Precision(), Recall(), NDCG()])
print('jaccard sim', evaluation)

# msd sim
knn = UserKNN(k=10, m=0, sim_metric='msd', shrinkage=None, seed=15, use_averages=False, verbose=True)
knn.fit(ds_train)
예제 #12
0
def epoch_callback_fn(model):
    return {'val_' + metric: v for metric, v in
            ranking_evaluation(model, ds_val, n_pos_interactions=1, n_neg_interactions=100,
                               generate_negative_pairs=True, k=10, verbose=False, seed=10,
                               metrics=[HitRatio(), NDCG()]).items()}
예제 #13
0
from DRecPy.Evaluation.Processes import ranking_evaluation
from DRecPy.Evaluation.Splits import leave_k_out
from DRecPy.Evaluation.Metrics import NDCG
from DRecPy.Evaluation.Metrics import HitRatio
from DRecPy.Evaluation.Metrics import Precision
import time


ds_train = get_train_dataset('ml-100k')
ds_test = get_test_dataset('ml-100k')
ds_train, ds_val = leave_k_out(ds_train, k=1, min_user_interactions=10, seed=0)


def epoch_callback_fn(model):
    return {'val_' + metric: v for metric, v in
            ranking_evaluation(model, ds_val, n_pos_interactions=1, n_neg_interactions=100,
                               generate_negative_pairs=True, k=10, verbose=False, seed=10,
                               metrics=[HitRatio(), NDCG()]).items()}


start_train = time.time()
cdae = CDAE(hidden_factors=50, corruption_level=0.2, loss='bce', seed=10)
cdae.fit(ds_train, learning_rate=0.001, reg_rate=0.001, epochs=100, batch_size=64, neg_ratio=5,
         epoch_callback_fn=epoch_callback_fn, epoch_callback_freq=10,
         early_stopping_rule=MaxValidationValueRule('val_HitRatio'), early_stopping_freq=10)
print("Training took", time.time() - start_train)

print(ranking_evaluation(cdae, ds_test, k=[1, 5, 10], novelty=True, n_pos_interactions=1,
                         n_neg_interactions=100, generate_negative_pairs=True, seed=10,
                         metrics=[HitRatio(), NDCG(), Precision()], max_concurrent_threads=4, verbose=True))
예제 #14
0
파일: dmf.py 프로젝트: lasigeBioTM/DRecPy
ds_train_bin = ds_train.copy()
ds_train_bin.apply('interaction', lambda x: 1)
ds_test_bin = ds_test.copy()
ds_test_bin.apply('interaction', lambda x: 1)

for nce in [True, False]:
    print('NCE =', nce)
    start_train = time.time()
    dmf = DMF(use_nce=nce,
              user_factors=[128, 64],
              item_factors=[128, 64],
              seed=10)
    dmf.fit(ds_train if nce else ds_train_bin,
            epochs=50,
            batch_size=256,
            learning_rate=0.001,
            reg_rate=0.0001,
            neg_ratio=5)
    print("Training took", time.time() - start_train)

    print(
        ranking_evaluation(dmf,
                           ds_test if nce else ds_test_bin,
                           n_pos_interactions=1,
                           n_neg_interactions=100,
                           generate_negative_pairs=True,
                           novelty=True,
                           k=list(range(1, 11)),
                           metrics=[HitRatio(), NDCG()],
                           seed=10))
예제 #15
0
              use_averages=False,
              verbose=True)
knn.fit(ds_train)

evaluation = ranking_evaluation(knn,
                                ds_test,
                                interaction_threshold=2,
                                k=list(range(1, 11)),
                                generate_negative_pairs=False,
                                n_pos_interactions=None,
                                n_neg_interactions=None,
                                seed=25,
                                verbose=True,
                                metrics=[Precision(),
                                         Recall(),
                                         NDCG()])
print('cosine sim', evaluation)

# jaccard sim
knn = UserKNN(k=10,
              m=0,
              sim_metric='jaccard',
              shrinkage=None,
              seed=25,
              use_averages=False,
              verbose=True)
knn.fit(ds_train)

evaluation = ranking_evaluation(knn,
                                ds_test,
                                interaction_threshold=2,
예제 #16
0
def test_recommendation_evaluation_9(model, interactions_ds):
    """Evaluation with custom metrics and k set to a list."""
    assert recommendation_evaluation(model, interactions_ds[1], cn_test_users=None, k=[2, 3], n_pos_interactions=None,
                                     novelty=False, metrics=[NDCG(), HitRatio()], verbose=False) == \
           {'HitRatio@2': 0.0167, 'HitRatio@3': 0.0233, 'NDCG@2': 0.0189, 'NDCG@3': 0.022}
예제 #17
0
def test_recommendation_evaluation_8(model, interactions_ds):
    """Evaluation with custom metrics."""
    assert recommendation_evaluation(model, interactions_ds[1], cn_test_users=None, k=2, n_pos_interactions=None,
                                     novelty=False, metrics=[NDCG(), HitRatio()]) == \
           {'HitRatio@2': 0.0167, 'NDCG@2': 0.0189}
예제 #18
0
def ranking_evaluation(model, ds_test=None, n_test_users=None, k=10, n_pos_interactions=None, n_neg_interactions=None,
                       generate_negative_pairs=False, novelty=False, seed=0, max_concurrent_threads=4, **kwds):
    """Executes a ranking evaluation process, where the given model will be evaluated under the provided settings.
    This function is not thread-safe (i.e. concurrent calls might produce unexpected results). Instead of trying this,
    increase the max_concurrent_threads argument to speed up the process (if you've the available cores).

    Args:
        model: An instance of a Recommender to be evaluated.
        ds_test: An optional test InteractionDataset. If none is provided, then the test data will be the model
            training data. Evaluating on train data is not ideal for assessing the model's performance.
        n_test_users: An optional integer representing the number of users to evaluate the produced rankings.
            Default: Number of unique users of the provided test dataset.
        k: An optional integer (or a list of integers) representing the truncation factor (keep the first k elements for
             each ranked list), which then affects the produced metric evaluation. Default: 10.
        n_pos_interactions: The number of positive interactions to sample into the list that is going to be ranked and
            evaluated for each user. If for a given user, there's less than n_pos_interactions positive interactions,
            the user's evaluation will be skipped. When this argument is not provided, all positive interactions on the
            test set from each user will be sampled. Default: None.
        n_neg_interactions: The max. number of negative interactions to sample into the list that is going to be ranked
            and evaluated for each user. If a float value is provided, then the max. number of sampled negative
            interactions  will be the percentage of positive interactions present on each user's test set. If this
            argument is not  defined, the sampled negative interactions will be all negative interactions present on
            each user's test set. Default: None.
        generate_negative_pairs: An optional boolean that controls whether negative interaction pairs should also be
            generated (interaction pairs not present on the train or test data sets are also sampled) or not (i.e. only
            gathered from the test data set, where interaction values are bellow than the interaction_threshold). If
            this parameter is set to True, then the number of sampled negative interactions for each user will always
            match the n_neg_interactions. Default: False.
        interaction_threshold: The interaction value threshold to consider an interaction value positive or negative.
            All values above or equal interaction_threshold are considered positive, and all values bellow are
            considered negative. Default: model.interaction_threshold.
        novelty: A boolean indicating whether only novel recommendations should be taken into account or not.
            Default: False.
        metrics: An optional list containing instances of RankingMetricABC. Default: [Precision(), Recall(),
            HitRatio(), NDCG()].
        max_concurrent_threads: An optional integer representing the max concurrent threads to use. Default: 4.
        seed: An optional, integer representing the seed for the random number generator used to sample positive
            and negative interaction pairs. Default: 0.
        verbose: A boolean indicating whether state logs and a final graph should be produced or not. Default: true.
        block: A boolean indicating whether the displayed graph block code execution or not. Note that this graph is
            only displayed when verbose=True. Default: true.
    Returns:
        A dict containing each metric name mapping to the corresponding metric value.
    """
    assert n_test_users is None or n_test_users > 0, f'The number of test users ({n_test_users}) should be > 0.'
    assert n_pos_interactions is None or n_pos_interactions > 0,\
        f'The number of positive interactions ({n_pos_interactions}) should be None or an integer > 0.'
    assert n_neg_interactions is None or n_neg_interactions > 0, \
        f'The number of negative interactions ({n_neg_interactions}) should be None or an integer > 0.'

    if generate_negative_pairs and n_neg_interactions is None:
        raise Exception('Cannot generate negative interaction pairs when the number of negative interactions per user '
                        'is not defined. Either set generate_negative_pairs=False or define the n_neg_interactions '
                        'parameter.')

    interaction_threshold = kwds.get('interaction_threshold', model.interaction_threshold)

    if type(k) is not list: k = [k]
    for k_ in k: assert k_ > 0, f'k ({k_}) should be > 0.'

    train_evaluation = False
    if ds_test is None or ds_test == model.interaction_dataset:
        train_evaluation = True
        ds_test = model.interaction_dataset

    metrics = kwds.get('metrics', [Precision(), Recall(), HitRatio(), NDCG()])

    assert isinstance(metrics, list), f'Expected "metrics" argument to be a list and found {type(metrics)}. ' \
        f'Should contain instances of RankingMetricABC.'

    for m in metrics:
        assert isinstance(m, RankingMetricABC), f'Expected metric {m} to be an instance of type RankingMetricABC.'

    metric_sums = {(m.name, k_): [0, 0] for m in metrics for k_ in k}  # list of (metric value sum, metric rankings count)
    num_users_made = 0

    unique_test_users_ds = ds_test.unique('user')
    n_test_users = len(unique_test_users_ds) if n_test_users is None else min(n_test_users, len(unique_test_users_ds))

    if kwds.get('verbose', True):
        _iter = tqdm(unique_test_users_ds.values(['user'], to_list=True),
                     total=n_test_users, desc='Starting user evaluation tasks', position=0, leave=True)
    else:
        _iter = unique_test_users_ds.values(['user'], to_list=True)

    global n_tasks_done
    n_tasks_done, n_tasks = 0, 0

    pool = ThreadPool(processes=max_concurrent_threads)
    for user in _iter:
        if num_users_made >= n_test_users: break  # reach max number of rankings

        pool.apply_async(_ranking_evaluation_user, (model, user, ds_test, interaction_threshold, n_pos_interactions,
                                                    n_neg_interactions, train_evaluation, metrics, novelty, metric_sums,
                                                    k, generate_negative_pairs, random.Random(seed)))
        n_tasks += 1
        num_users_made += 1
        seed += 1

    pool.close()  # Done adding tasks

    if kwds.get('verbose', True):
        curr_done = 0
        pbar = tqdm(total=n_tasks, desc='Evaluating model ranking performance', position=0, leave=True)
        while n_tasks_done <= n_tasks:
            pbar.update(n_tasks_done - curr_done)
            curr_done = n_tasks_done
            if n_tasks_done == n_tasks:
                break
            time.sleep(1)

    pool.join()  # Wait for all tasks to complete

    results = {m + f'@{k}': round(metric_sums[(m, k)][0] / metric_sums[(m, k)][1], 4)
        if metric_sums[(m, k)][1] > 0 else 0 for m, k in metric_sums}

    if kwds.get('verbose', True) and len(k) > 1:
        fig, axes = plt.subplots(1)
        fig.suptitle(f'Evaluation Metrics for {model.__class__.__name__}')

        axes.set_ylabel("Value", fontsize=12)
        axes.set_xlabel("k", fontsize=12)
        k = sorted(k)
        for m in metrics: axes.plot(k, [results[m.name + f'@{k_}'] for k_ in k], '--o', label=m.name)
        plt.legend()
        plt.show(block=kwds.get('block', True))

    return results
예제 #19
0
def test_ranking_evaluation_12(model, interactions_ds):
    """Evaluation with custom metrics and k set to a list."""
    assert ranking_evaluation(model, interactions_ds[1], n_test_users=None, k=[1, 2], n_pos_interactions=None,
                              n_neg_interactions=None, generate_negative_pairs=False, novelty=False,
                              metrics=[HitRatio(), NDCG()], verbose=False) == \
           {'HitRatio@1': 0.1953, 'HitRatio@2': 0.3137, 'NDCG@1': 0.3968, 'NDCG@2': 0.4093}