def get_performance(user_pos_test, r, auc, Ks): precision, recall, ndcg, hit_ratio = [], [], [], [] for K in Ks: precision.append(metrics.precision_at_k(r, K)) recall.append(metrics.recall_at_k(r, K, len(user_pos_test))) ndcg.append(metrics.ndcg_at_k(r, K)) hit_ratio.append(metrics.hit_at_k(r, K)) return {'recall': np.array(recall), 'precision': np.array(precision), 'ndcg': np.array(ndcg), 'hit_ratio': np.array(hit_ratio), 'auc': auc}
def evaluate_users(ratings, rel, num_pos): # exclude trainin nrows = len(ratings) inds = np.argsort(ratings, axis=-1)[:, -K:] r = rel[np.arange(nrows).reshape(-1, 1), inds] precision = metrics.precision_at_k(r, K) recall = metrics.recall_at_k(r, K, num_pos) ndcg = metrics.ndcg_at_k(r, K) hit_ratio = metrics.hit_at_k(r, K) return { 'precision': precision.mean(), 'recall': recall.mean(), 'ndcg': ndcg.mean(), 'hit_ratio': hit_ratio.mean() }