def get_performance(r, Ks):
    Ks = [1]
    auc = []

    for K in Ks:
        auc.append(metrics.hit_at_k(r, K))

    return {'auc': np.array(auc)}
Ejemplo n.º 2
0
def get_performance(user_pos_test, r, auc, Ks):
    precision, recall, ndcg, hit_ratio = [], [], [], []

    for K in Ks:
        precision.append(metrics.precision_at_k(r, K))
        recall.append(metrics.recall_at_k(r, K, len(user_pos_test)))
        ndcg.append(metrics.ndcg_at_k(r, K))
        hit_ratio.append(metrics.hit_at_k(r, K))

    return {'recall': np.array(recall), 'precision': np.array(precision),
            'ndcg': np.array(ndcg), 'hit_ratio': np.array(hit_ratio), 'auc': auc}
Ejemplo n.º 3
0
    def evaluate_users(ratings, rel, num_pos):
        # exclude trainin
        nrows = len(ratings)

        inds = np.argsort(ratings, axis=-1)[:, -K:]
        r = rel[np.arange(nrows).reshape(-1, 1), inds]

        precision = metrics.precision_at_k(r, K)
        recall = metrics.recall_at_k(r, K, num_pos)
        ndcg = metrics.ndcg_at_k(r, K)
        hit_ratio = metrics.hit_at_k(r, K)
        return {
            'precision': precision.mean(),
            'recall': recall.mean(),
            'ndcg': ndcg.mean(),
            'hit_ratio': hit_ratio.mean()
        }