def get_metric(score_label): Precision = np.zeros(20) NDCG = np.zeros(20) AUC = 0. score_df = pd.DataFrame(score_label, columns=['uid', 'score', 'label']) num = 0 score_label_all = [] for uid, hist in score_df.groupby('uid'): if hist.shape[0] < 10: continue score = hist['score'].tolist() label = hist['label'].tolist() score_label_u = [] for i in range(len(score)): score_label_u.append([score[i], label[i]]) score_label_all.append([score[i], label[i]]) precision, ndcg, auc, mae, mrse = calc_metric(score_label_u) Precision += precision NDCG += ndcg AUC += auc num += 1 score_label_all = sorted(score_label_all, key=lambda d: d[0], reverse=True) GPrecision = np.array([ eval.precision_k(score_label_all, k * len(score_label_all) / 100) for k in range(1, 21) ]) GAUC = eval.auc(score_label_all) MAE = eval.mae(score_label_all) MRSE = eval.mrse(score_label_all) return Precision / num, NDCG / num, AUC / num, GPrecision, GAUC, MAE, MRSE
def calc_metric(score_label_u): score_label_u = sorted(score_label_u, key=lambda d: d[0], reverse=True) precision = np.array( [eval.precision_k(score_label_u, k) for k in range(1, 21)]) ndcg = np.array([eval.ndcg_k(score_label_u, k) for k in range(1, 21)]) auc = eval.auc(score_label_u) mae = eval.mae(score_label_u) mrse = eval.mrse(score_label_u) return precision, ndcg, auc, mae, mrse