def eval_one_epoch(self, epoch): uid1, iid1, uid2, iid2 = [], [], [], [] for u in range(self.num_user1): uid1.extend([u] * self.num_ranking_list) iid1.extend(self.test_dict1[u]) uid2.extend([u] * self.num_ranking_list) iid2.extend(self.test_dict2[u]) batch_size = self.num_ranking_list n_batches = 0 n_mrr1 = 0 n_mrr2 = 0 total_hr1 = 0 total_hr2 = 0 total_ndcg1 = 0 total_ndcg2 = 0 total_mrr1 = 0 total_mrr2 = 0 for i in range(self.num_user1): batch_uid1, batch_iid1, batch_uid2, batch_iid2 = \ uid1[i * batch_size:(i+1) * batch_size],\ iid1[i * batch_size:(i+1) * batch_size],\ uid2[i * batch_size:(i+1) * batch_size],\ iid2[i * batch_size:(i+1) * batch_size] rk1, rk2 = self.session.run( [self.scores1, self.scores2], feed_dict={ self.dom1_uid: batch_uid1, self.dom1_iid: batch_iid1, self.dom2_uid: batch_uid2, self.dom2_iid: batch_iid2 }) _, hr1, ndcg1, mrr1 = evl.evalTopK(rk1, batch_iid1, self.topk) _, hr2, ndcg2, mrr2 = evl.evalTopK(rk2, batch_iid2, self.topk) n_batches += 1 total_hr1 += hr1 total_hr2 += hr2 total_ndcg1 += ndcg1 total_ndcg2 += ndcg2 if np.isinf(mrr1): pass else: n_mrr1 += 1 total_mrr1 += mrr1 if np.isinf(mrr2): pass else: n_mrr2 += 1 total_mrr2 += mrr2 print("Epoch {0}: [HR] {1} and {2}".format(epoch, total_hr1 / n_batches, total_hr2 / n_batches)) print("Epoch {0}: [nDCG@{1}] {2} and {3}".format( epoch, self.topk, total_ndcg1 / n_batches, total_ndcg2 / n_batches))
def eval_one_epoch(self, session, init, epoch): session.run(init) self.training = False n_batches = 0 n_mrr1 = 0 n_mrr2 = 0 total_hr1 = 0 total_hr2 = 0 total_ndcg1 = 0 total_ndcg2 = 0 total_mrr1 = 0 total_mrr2 = 0 try: while True: rk1, rk2, iid1, iid2 = session.run( [self.scores1, self.scores2, self.dom1_iid, self.dom2_iid]) _, hr1, ndcg1, mrr1 = evl.evalTopK(rk1, iid1, self.K) _, hr2, ndcg2, mrr2 = evl.evalTopK(rk2, iid2, self.K) n_batches += 1 total_hr1 += hr1 total_hr2 += hr2 total_ndcg1 += ndcg1 total_ndcg2 += ndcg2 if np.isinf(mrr1): pass else: n_mrr1 += 1 total_mrr1 += mrr1 if np.isinf(mrr2): pass else: n_mrr2 += 1 total_mrr2 += mrr2 except tf.errors.OutOfRangeError: pass print("Epoch {0}: [HR] {1} and {2}".format(epoch, total_hr1 / n_batches, total_hr2 / n_batches)) print("Epoch {0}: [nDCG@{1}] {2} and {3}".format( epoch, self.K, total_ndcg1 / n_batches, total_ndcg2 / n_batches))
def eval_one_epoch(self, epoch): # Input the uncorrupted training data pred_y = self.session.run( self.pred_y, feed_dict={ self.ratings: self.train_array, # self.output_mask: self.negative_output_mask, self.uid: range(self.num_user), self.istraining: False, self.isnegsample: self.is_neg_sa, self.layer1_dropout_rate: 0 }) pred_y = pred_y.clip(min=0, max=1) n_batches = 0 total_hr, total_ndcg = np.zeros(len(self.topK)), np.zeros( len(self.topK)) # Loop for each user (generate the ranking lists for different users) for u in self.ranking_dict: iid = self.ranking_dict[u] # The ranking item ids for user u rk = pred_y[u, np.array(iid)] # The predicted item values for user u n_batches += 1 hr, ndcg = evl.rankingMetrics(rk, iid, self.topK, self.test_dict[u], mod='hr') total_hr += hr total_ndcg += ndcg avg_hr, avg_ndcg = total_hr / n_batches, total_ndcg / n_batches for i in range(len(self.topK)): print('-' * 55) print("Epoch {0}: [HR@{1}] {2}".format(epoch, self.topK[i], avg_hr[i])) print("Epoch {0}: [nDCG@{1}] {2}".format(epoch, self.topK[i], avg_ndcg[i])) print('=' * 55) return avg_hr[0], avg_ndcg[0]
def eval_one_epoch(self, epoch): # Input the uncorrupted training data pred_y = self.session.run( self.pred_y, feed_dict={ self.ratings: self.train_array, # self.output_mask: self.negative_output_mask, self.uid: range(self.num_user), self.istraining: False, self.isnegsample: self.is_neg_sa, self.layer1_dropout_rate: 0 }) pred_y = pred_y.clip(min=0, max=1) n_batches, total_prec, total_recall, total_ap = 0, 0, 0, 0 # Loop for each user (generate the ranking lists for different users) for u in self.ranking_dict: iid = self.ranking_dict[u] # The ranking item ids for user u rk = pred_y[u, np.array(iid)] # The predicted item values for user u prec, recall, ap = evl.rankingMetrics(rk, iid, self.topK, self.test_dict[u], mod='precision') n_batches += 1 total_prec += prec total_recall += recall total_ap += ap avg_prec, avg_recall, avg_ap = total_prec / n_batches, total_recall / n_batches, total_ap / n_batches print("Epoch {0}: [Precision@{1}] {2}".format(epoch, self.topK, avg_prec)) print("Epoch {0}: [Recall@{1}] {2}".format(epoch, self.topK, avg_recall)) print("Epoch {0}: [MAP@{1}] {2}".format(epoch, self.topK, avg_ap)) print("=" * 40) return avg_prec, avg_recall, avg_ap
def eval_one_epoch(self, epoch): n_batches, total_hr, total_ndcg, total_mrr = 0, 0, 0, 0 for u in self.ranking_dict: iid = self.ranking_dict[u] uid = [u] * len(iid) rk = self.session.run(self.pred_y, feed_dict={ self.uid: uid, self.iid: iid }) hr, ndcg, mrr = evl.rankingMetrics(rk, iid, self.topk, self.test_dict[u]) n_batches += 1 total_hr += hr total_ndcg += ndcg total_mrr += mrr print("Epoch {0}: [HR] {1} and [MRR] {2} and [nDCG@{3}] {4}".format( epoch, total_hr / n_batches, total_mrr / n_batches, self.topk, total_ndcg / n_batches))
def eval_one_epoch(self, epoch): n_batches, total_prec, total_recall, total_ap = 0, 0, 0, 0 # n_batches, total_hr, total_ndcg, total_mrr = 0, 0, 0, 0 for u in self.ranking_dict: if len(self.test_dict[u]) == 0: continue iid = self.ranking_dict[u] uid = [u] * len(iid) rk = self.session.run(self.pred_y, feed_dict={ self.uid: uid, self.iid: iid }) # hr, ndcg, mrr = evl.rankingMetrics(rk, iid, self.topK, self.test_dict[u], mod='hr') prec, recall, _, _, _ = evl.rankingMetrics(rk, iid, [self.topK], self.test_dict[u], mod='precision', is_map=False) n_batches += 1 # total_hr += hr # total_ndcg += ndcg # total_mrr += mrr total_prec += prec[0] total_recall += recall[0] # avg_hr, avg_mrr, avg_ndcg = total_hr / n_batches, total_mrr / n_batches, total_ndcg / n_batches # print("Epoch {0}: [HR] {1} and [MRR] {2} and [nDCG@{3}] {4}".format(epoch, avg_hr, avg_mrr, self.topK, avg_ndcg)) # return avg_hr, avg_mrr,avg_ndcg avg_prec, avg_recall, avg_ap = total_prec / n_batches, total_recall / n_batches, total_ap / n_batches print("Epoch {0}: [Precision@{1}] {2}".format(epoch, self.topK, avg_prec)) print("Epoch {0}: [Recall@{1}] {2}".format(epoch, self.topK, avg_recall)) print("Epoch {0}: [MAP@{1}] {2}".format(epoch, self.topK, avg_ap)) print("=" * 40) return avg_prec, avg_recall, avg_ap
def eval_one_epoch(self, epoch): n_batches = 0 total_hr, total_ndcg = np.zeros(len(self.topk)), np.zeros(len(self.topk)) for u in self.ranking_dict: iid = self.ranking_dict[u] uid = [u] * len(iid) rk = self.session.run(self.pred_y, feed_dict={self.uid: uid, self.iid: iid}) n_batches += 1 hr, ndcg = evl.rankingMetrics(rk, iid, self.topk, self.test_dict[u], mod='hr') total_hr += hr total_ndcg += ndcg avg_hr, avg_ndcg = total_hr / n_batches, total_ndcg / n_batches for i in range(len(self.topk)): print('-' * 55) print("Epoch {0}: [HR@{1}] {2}".format(epoch, self.topk[i], avg_hr[i])) print("Epoch {0}: [nDCG@{1}] {2}".format(epoch, self.topk[i], avg_ndcg[i])) print('=' * 55) return avg_hr[0], avg_ndcg[0]
def eval_one_epoch(self, epoch): delta = self.session.run(self.update_delta, feed_dict={self.ratings: self.train_array, self.output_mask: self.negative_output_mask, self.istraining: False, self.isnegsample: False, self.layer1_dropout_rate: 0}) layer2_w, layer2_w_org = self.session.run([self.w2, self.w2_org], feed_dict={self.ratings: self.train_array, self.output_mask: self.negative_output_mask, self.istraining: False, self.isnegsample: False, self.layer1_dropout_rate: 0}) print("Evaluation Epoch {0}: [Delta] = {1} [W2]={2} [W2_ORG]={3}".format(epoch, delta[10,0], layer2_w[10,0], layer2_w_org[10,0])) # Input the uncorrupted training data pred_y = self.session.run(self.pred_y, feed_dict={self.ratings: self.train_array, self.output_mask: self.negative_output_mask, self.istraining:False, self.isnegsample:False, self.layer1_dropout_rate: 0}) pred_y = pred_y.clip(min=0,max=1) # print("Prediction:{0}".format(pred_y[0])) n_batches, total_hr, total_ndcg, total_mrr = 0, 0, 0, 0 # Loop for each user (generate the ranking lists for different users) for u in self.ranking_dict: iid = self.ranking_dict[u] # The ranking item ids for user u rk = pred_y[u, :][np.array(iid)] # The predicted item values for user u hr, ndcg, mrr = evl.rankingMetrics(rk, iid, self.topK, self.test_dict[u]) n_batches += 1 total_hr += hr total_ndcg += ndcg total_mrr += mrr # if u == 0: # print(len(pred_y[u,:]), len(rk), len(iid),self.test_dict[u], hr) avg_hr, avg_mrr, avg_ndcg = total_hr / n_batches, total_mrr / n_batches, total_ndcg / n_batches print("Epoch {0}: [HR] {1} and [MRR] {2} and [nDCG@{3}] {4}".format(epoch, avg_hr, avg_mrr, self.topK, avg_ndcg)) return avg_hr, avg_mrr, avg_ndcg
def eval_one_epoch_np(self, epoch): # Ranking in all the testing data n_batches, n_mrr, total_hr, total_ndcg, total_mrr = 0, 0, 0, 0, 0 for uid in self.ranking_dict: # Prediction to form the ranking list rk = [] for iid in self.ranking_dict[uid]: rk.append(np.dot(self.P[uid, :], self.Q[iid, :])) _, hr, ndcg, mrr = evl.evalTopK(rk, self.test_dict[uid], self.topk) n_batches += 1 total_hr += hr total_ndcg += ndcg if np.isinf(mrr): pass else: n_mrr += 1 total_mrr += mrr print("Epoch {0}: [HR] {1} and [nDCG@{2}] {3}".format( epoch, total_hr / n_batches, self.topk, total_ndcg / n_batches))
def eval_one_epoch(self, epoch): # Input the uncorrupted training data pred_y = self.session.run(self.pred_y, feed_dict={self.ratings: self.train_array, self.output_mask: self.negative_output_mask, self.istraining:False, self.isnegsample:False, self.layer1_dropout_rate: 0}) pred_y = pred_y.clip(min=0,max=1) n_batches, total_hr, total_ndcg, total_mrr = 0, 0, 0, 0 # Loop for each user (generate the ranking lists for different users) for u in self.ranking_dict: iid = self.ranking_dict[u] # The ranking item ids for user u rk = pred_y[u, :][np.array(iid)] # The predicted item values for user u hr, ndcg, mrr = evl.rankingMetrics(rk, iid, self.topK, self.test_dict[u]) n_batches += 1 total_hr += hr total_ndcg += ndcg total_mrr += mrr avg_hr, avg_mrr, avg_ndcg = total_hr / n_batches, total_mrr / n_batches, total_ndcg / n_batches print("Epoch {0}: [HR] {1} and [MRR] {2} and [nDCG@{3}] {4}".format(epoch, avg_hr, avg_mrr, self.topK, avg_ndcg)) return avg_hr, avg_mrr, avg_ndcg
def eval_one_epoch(self, epoch): n_batches = 0 if self.is_prec: total_prec, total_recall = np.zeros(len(self.topK)), np.zeros( len(self.topK)) else: total_hr, total_ndcg = np.zeros(len(self.topK)), np.zeros( len(self.topK)) # if self.robust_test: # print('[Eps={0}] {1} Noise Level [Robust Test]'.format(self.eps, self.noise_type)) # self.session.run([self.update_P, self.update_Q], feed_dict={self.uid: uid, self.pos_iid: iid}) for u in self.ranking_dict: if len(self.test_dict[u]) == 0: continue iid = self.ranking_dict[u] uid = [u] * len(iid) n_batches += 1 if self.is_adv: rk = self.session.run(self.pred_y_pos_adv, feed_dict={ self.uid: uid, self.pos_iid: iid }) else: rk = self.session.run(self.pred_y_pos, feed_dict={ self.uid: uid, self.pos_iid: iid }) if self.is_prec: precision, recall, _, _, _ = evl.rankingMetrics( rk, iid, self.topK, self.test_dict[u], mod='precision', is_map=False) total_prec += precision total_recall += recall else: hr, ndcg = evl.rankingMetrics(rk, iid, self.topK, self.test_dict[u], mod='hr') total_hr += hr total_ndcg += ndcg if self.is_prec: avg_prec, avg_recall = total_prec / n_batches, total_recall / n_batches for i in range(len(self.topK)): print('-' * 55) print("Epoch {0}: [Precision@{1}] {2}".format( epoch, self.topK[i], avg_prec[i])) print("Epoch {0}: [Recall@{1}] {2}".format( epoch, self.topK[i], avg_recall[i])) print('=' * 55) return avg_prec[0], avg_recall[0] else: avg_hr, avg_ndcg = total_hr / n_batches, total_ndcg / n_batches for i in range(len(self.topK)): print('-' * 55) print("Epoch {0}: [HR@{1}] {2}".format(epoch, self.topK[i], avg_hr[i])) print("Epoch {0}: [nDCG@{1}] {2}".format( epoch, self.topK[i], avg_ndcg[i])) print('=' * 55) return avg_hr[0], avg_ndcg[0]
_, ranking_dict, test_dict = mtl.negdict_mat(original_matrix, test_matrix, num_neg=199, mod='others', random_state=10) for user in ranking_dict: if len(test_dict[user]) == 0: continue iid = ranking_dict[user] # The ranking item ids for user u rk = item_pop_arr[np.asarray(iid)] print(rk) hr, ndcg = evl.rankingMetrics(rk, iid, topK, test_dict[user], mod='hr') total_hr += hr total_ndcg += ndcg avg_hr, avg_ndcg = total_hr / num_user, total_ndcg / num_user for i in range(len(topK)): print('-' * 55) print("[HR@{0}] {1}".format(topK[i], avg_hr[i])) print("[nDCG@{0}] {1}".format(topK[i], avg_ndcg[i])) print('=' * 55) save_path = "Result/%s/ItemPop/%s/" % (dataset, date) if not os.path.exists(save_path): os.makedirs(save_path)
def eval_one_epoch(self, epoch): if self.is_user_node: feed_dict = { self.ratings: self.train_array, self.uid: range(self.num_user), self.istraining: False, self.layer1_dropout_rate: 0 } else: feed_dict = { self.ratings: self.train_array, self.istraining: False, self.layer1_dropout_rate: 0 } if self.robust_test: print('[Pos={0}] {1} Noise Added [Robust Test]'.format( self.noise_pos, self.noise_type)) print('[Eps={0}] {1} Noise Level [Robust Test]'.format( self.eps, self.noise_type)) self.session.run(self.update_delta, feed_dict) # Input the uncorrupted training data pred_y = self.session.run(self.pred_y, feed_dict) # pred_y = pred_y.clip(min=0,max=1) n_batches = 0 if self.is_prec: total_prec, total_recall = np.zeros(len(self.topK)), np.zeros( len(self.topK)) else: total_hr, total_ndcg = np.zeros(len(self.topK)), np.zeros( len(self.topK)) # Loop for each user (generate the ranking lists for different users) for u in self.ranking_dict: if len(self.test_dict[u]) == 0: continue iid = self.ranking_dict[u] # The ranking item ids for user u rk = pred_y[u, np.array(iid)] # The predicted item values for user u n_batches += 1 if self.is_prec: precision, recall, _, _, _ = evl.rankingMetrics( rk, iid, self.topK, self.test_dict[u], mod='precision', is_map=False) total_prec += precision total_recall += recall else: hr, ndcg = evl.rankingMetrics(rk, iid, self.topK, self.test_dict[u], mod='hr') total_hr += hr total_ndcg += ndcg if self.is_prec: avg_prec, avg_recall = total_prec / n_batches, total_recall / n_batches for i in range(len(self.topK)): print('-' * 55) print("Epoch {0}: [Precision@{1}] {2}".format( epoch, self.topK[i], avg_prec[i])) print("Epoch {0}: [Recall@{1}] {2}".format( epoch, self.topK[i], avg_recall[i])) print('=' * 55) return avg_prec[0], avg_recall[0] else: avg_hr, avg_ndcg = total_hr / n_batches, total_ndcg / n_batches for i in range(len(self.topK)): print('-' * 55) print("Epoch {0}: [HR@{1}] {2}".format(epoch, self.topK[i], avg_hr[i])) print("Epoch {0}: [nDCG@{1}] {2}".format( epoch, self.topK[i], avg_ndcg[i])) print('=' * 55) return avg_hr[0], avg_ndcg[0]
def eval_one_epoch(self, epoch): if self.robust_test: delta = self.session.run(self.update_delta, feed_dict={ self.ratings: self.train_array, self.output_mask: self.negative_output_mask, self.istraining: False, self.isnegsample: False, self.add_hidden_noise: self.hidden_noise, self.add_weight_noise: self.weight_noise, self.layer1_dropout_rate: 0 }) layer2_w, layer2_w_org = self.session.run( [self.w2, self.w2_org], feed_dict={ self.ratings: self.train_array, self.output_mask: self.negative_output_mask, self.istraining: False, self.isnegsample: False, self.add_hidden_noise: self.hidden_noise, self.add_weight_noise: self.weight_noise, self.layer1_dropout_rate: 0 }) print("Evaluation Epoch {0}: [Delta] = {1} [W2]={2} [W2_ORG]={3}". format(epoch, delta[10, 0], layer2_w[10, 0], layer2_w_org[10, 0])) # Input the uncorrupted training data pred_y = self.session.run(self.pred_y, feed_dict={ self.ratings: self.train_array, self.output_mask: self.negative_output_mask, self.istraining: False, self.isnegsample: self.is_neg_sa, self.add_hidden_noise: self.hidden_noise, self.add_weight_noise: self.weight_noise, self.layer1_dropout_rate: 0 }) pred_y = pred_y.clip(min=0, max=1) # n_batches, total_prec, total_ap = 0, 0, 0 n_batches, total_hr, total_ndcg = 0, 0, 0 # Loop for each user (generate the ranking lists for different users) for u in self.ranking_dict: iid = self.ranking_dict[u] # The ranking item ids for user u rk = pred_y[u, np.array(iid)] # The predicted item values for user u hr, ndcg = evl.rankingMetrics(rk, iid, self.topK, self.test_dict[u], mod='hr') n_batches += 1 total_hr += hr total_ndcg += ndcg # total_prec += precision # total_ap += ap avg_hr, avg_ndcg = total_hr / n_batches, total_ndcg / n_batches print('=' * 50) print("Epoch {0}: [HR@{1}] {2}".format(epoch, self.topK, avg_hr)) print("Epoch {0}: [nDCG@{1}] {2}".format(epoch, self.topK, avg_ndcg)) return avg_hr, avg_ndcg