def eval_one_epoch(self, epoch): # Input the uncorrupted training data pred_y = self.session.run( self.pred_y, feed_dict={ self.ratings: self.train_array, # self.output_mask: self.negative_output_mask, self.uid: range(self.num_user), self.istraining: False, self.isnegsample: self.is_neg_sa, self.layer1_dropout_rate: 0 }) pred_y = pred_y.clip(min=0, max=1) n_batches = 0 total_hr, total_ndcg = np.zeros(len(self.topK)), np.zeros( len(self.topK)) # Loop for each user (generate the ranking lists for different users) for u in self.ranking_dict: iid = self.ranking_dict[u] # The ranking item ids for user u rk = pred_y[u, np.array(iid)] # The predicted item values for user u n_batches += 1 hr, ndcg = evl.rankingMetrics(rk, iid, self.topK, self.test_dict[u], mod='hr') total_hr += hr total_ndcg += ndcg avg_hr, avg_ndcg = total_hr / n_batches, total_ndcg / n_batches for i in range(len(self.topK)): print('-' * 55) print("Epoch {0}: [HR@{1}] {2}".format(epoch, self.topK[i], avg_hr[i])) print("Epoch {0}: [nDCG@{1}] {2}".format(epoch, self.topK[i], avg_ndcg[i])) print('=' * 55) return avg_hr[0], avg_ndcg[0]
def eval_one_epoch(self, epoch): # Input the uncorrupted training data pred_y = self.session.run( self.pred_y, feed_dict={ self.ratings: self.train_array, # self.output_mask: self.negative_output_mask, self.uid: range(self.num_user), self.istraining: False, self.isnegsample: self.is_neg_sa, self.layer1_dropout_rate: 0 }) pred_y = pred_y.clip(min=0, max=1) n_batches, total_prec, total_recall, total_ap = 0, 0, 0, 0 # Loop for each user (generate the ranking lists for different users) for u in self.ranking_dict: iid = self.ranking_dict[u] # The ranking item ids for user u rk = pred_y[u, np.array(iid)] # The predicted item values for user u prec, recall, ap = evl.rankingMetrics(rk, iid, self.topK, self.test_dict[u], mod='precision') n_batches += 1 total_prec += prec total_recall += recall total_ap += ap avg_prec, avg_recall, avg_ap = total_prec / n_batches, total_recall / n_batches, total_ap / n_batches print("Epoch {0}: [Precision@{1}] {2}".format(epoch, self.topK, avg_prec)) print("Epoch {0}: [Recall@{1}] {2}".format(epoch, self.topK, avg_recall)) print("Epoch {0}: [MAP@{1}] {2}".format(epoch, self.topK, avg_ap)) print("=" * 40) return avg_prec, avg_recall, avg_ap
def eval_one_epoch(self, epoch): n_batches, total_hr, total_ndcg, total_mrr = 0, 0, 0, 0 for u in self.ranking_dict: iid = self.ranking_dict[u] uid = [u] * len(iid) rk = self.session.run(self.pred_y, feed_dict={ self.uid: uid, self.iid: iid }) hr, ndcg, mrr = evl.rankingMetrics(rk, iid, self.topk, self.test_dict[u]) n_batches += 1 total_hr += hr total_ndcg += ndcg total_mrr += mrr print("Epoch {0}: [HR] {1} and [MRR] {2} and [nDCG@{3}] {4}".format( epoch, total_hr / n_batches, total_mrr / n_batches, self.topk, total_ndcg / n_batches))
def eval_one_epoch(self, epoch): n_batches, total_prec, total_recall, total_ap = 0, 0, 0, 0 # n_batches, total_hr, total_ndcg, total_mrr = 0, 0, 0, 0 for u in self.ranking_dict: if len(self.test_dict[u]) == 0: continue iid = self.ranking_dict[u] uid = [u] * len(iid) rk = self.session.run(self.pred_y, feed_dict={ self.uid: uid, self.iid: iid }) # hr, ndcg, mrr = evl.rankingMetrics(rk, iid, self.topK, self.test_dict[u], mod='hr') prec, recall, _, _, _ = evl.rankingMetrics(rk, iid, [self.topK], self.test_dict[u], mod='precision', is_map=False) n_batches += 1 # total_hr += hr # total_ndcg += ndcg # total_mrr += mrr total_prec += prec[0] total_recall += recall[0] # avg_hr, avg_mrr, avg_ndcg = total_hr / n_batches, total_mrr / n_batches, total_ndcg / n_batches # print("Epoch {0}: [HR] {1} and [MRR] {2} and [nDCG@{3}] {4}".format(epoch, avg_hr, avg_mrr, self.topK, avg_ndcg)) # return avg_hr, avg_mrr,avg_ndcg avg_prec, avg_recall, avg_ap = total_prec / n_batches, total_recall / n_batches, total_ap / n_batches print("Epoch {0}: [Precision@{1}] {2}".format(epoch, self.topK, avg_prec)) print("Epoch {0}: [Recall@{1}] {2}".format(epoch, self.topK, avg_recall)) print("Epoch {0}: [MAP@{1}] {2}".format(epoch, self.topK, avg_ap)) print("=" * 40) return avg_prec, avg_recall, avg_ap
def eval_one_epoch(self, epoch): n_batches = 0 total_hr, total_ndcg = np.zeros(len(self.topk)), np.zeros(len(self.topk)) for u in self.ranking_dict: iid = self.ranking_dict[u] uid = [u] * len(iid) rk = self.session.run(self.pred_y, feed_dict={self.uid: uid, self.iid: iid}) n_batches += 1 hr, ndcg = evl.rankingMetrics(rk, iid, self.topk, self.test_dict[u], mod='hr') total_hr += hr total_ndcg += ndcg avg_hr, avg_ndcg = total_hr / n_batches, total_ndcg / n_batches for i in range(len(self.topk)): print('-' * 55) print("Epoch {0}: [HR@{1}] {2}".format(epoch, self.topk[i], avg_hr[i])) print("Epoch {0}: [nDCG@{1}] {2}".format(epoch, self.topk[i], avg_ndcg[i])) print('=' * 55) return avg_hr[0], avg_ndcg[0]
def eval_one_epoch(self, epoch): delta = self.session.run(self.update_delta, feed_dict={self.ratings: self.train_array, self.output_mask: self.negative_output_mask, self.istraining: False, self.isnegsample: False, self.layer1_dropout_rate: 0}) layer2_w, layer2_w_org = self.session.run([self.w2, self.w2_org], feed_dict={self.ratings: self.train_array, self.output_mask: self.negative_output_mask, self.istraining: False, self.isnegsample: False, self.layer1_dropout_rate: 0}) print("Evaluation Epoch {0}: [Delta] = {1} [W2]={2} [W2_ORG]={3}".format(epoch, delta[10,0], layer2_w[10,0], layer2_w_org[10,0])) # Input the uncorrupted training data pred_y = self.session.run(self.pred_y, feed_dict={self.ratings: self.train_array, self.output_mask: self.negative_output_mask, self.istraining:False, self.isnegsample:False, self.layer1_dropout_rate: 0}) pred_y = pred_y.clip(min=0,max=1) # print("Prediction:{0}".format(pred_y[0])) n_batches, total_hr, total_ndcg, total_mrr = 0, 0, 0, 0 # Loop for each user (generate the ranking lists for different users) for u in self.ranking_dict: iid = self.ranking_dict[u] # The ranking item ids for user u rk = pred_y[u, :][np.array(iid)] # The predicted item values for user u hr, ndcg, mrr = evl.rankingMetrics(rk, iid, self.topK, self.test_dict[u]) n_batches += 1 total_hr += hr total_ndcg += ndcg total_mrr += mrr # if u == 0: # print(len(pred_y[u,:]), len(rk), len(iid),self.test_dict[u], hr) avg_hr, avg_mrr, avg_ndcg = total_hr / n_batches, total_mrr / n_batches, total_ndcg / n_batches print("Epoch {0}: [HR] {1} and [MRR] {2} and [nDCG@{3}] {4}".format(epoch, avg_hr, avg_mrr, self.topK, avg_ndcg)) return avg_hr, avg_mrr, avg_ndcg
def eval_one_epoch(self, epoch): # Input the uncorrupted training data pred_y = self.session.run(self.pred_y, feed_dict={self.ratings: self.train_array, self.output_mask: self.negative_output_mask, self.istraining:False, self.isnegsample:False, self.layer1_dropout_rate: 0}) pred_y = pred_y.clip(min=0,max=1) n_batches, total_hr, total_ndcg, total_mrr = 0, 0, 0, 0 # Loop for each user (generate the ranking lists for different users) for u in self.ranking_dict: iid = self.ranking_dict[u] # The ranking item ids for user u rk = pred_y[u, :][np.array(iid)] # The predicted item values for user u hr, ndcg, mrr = evl.rankingMetrics(rk, iid, self.topK, self.test_dict[u]) n_batches += 1 total_hr += hr total_ndcg += ndcg total_mrr += mrr avg_hr, avg_mrr, avg_ndcg = total_hr / n_batches, total_mrr / n_batches, total_ndcg / n_batches print("Epoch {0}: [HR] {1} and [MRR] {2} and [nDCG@{3}] {4}".format(epoch, avg_hr, avg_mrr, self.topK, avg_ndcg)) return avg_hr, avg_mrr, avg_ndcg
def eval_one_epoch(self, epoch): n_batches = 0 if self.is_prec: total_prec, total_recall = np.zeros(len(self.topK)), np.zeros( len(self.topK)) else: total_hr, total_ndcg = np.zeros(len(self.topK)), np.zeros( len(self.topK)) # if self.robust_test: # print('[Eps={0}] {1} Noise Level [Robust Test]'.format(self.eps, self.noise_type)) # self.session.run([self.update_P, self.update_Q], feed_dict={self.uid: uid, self.pos_iid: iid}) for u in self.ranking_dict: if len(self.test_dict[u]) == 0: continue iid = self.ranking_dict[u] uid = [u] * len(iid) n_batches += 1 if self.is_adv: rk = self.session.run(self.pred_y_pos_adv, feed_dict={ self.uid: uid, self.pos_iid: iid }) else: rk = self.session.run(self.pred_y_pos, feed_dict={ self.uid: uid, self.pos_iid: iid }) if self.is_prec: precision, recall, _, _, _ = evl.rankingMetrics( rk, iid, self.topK, self.test_dict[u], mod='precision', is_map=False) total_prec += precision total_recall += recall else: hr, ndcg = evl.rankingMetrics(rk, iid, self.topK, self.test_dict[u], mod='hr') total_hr += hr total_ndcg += ndcg if self.is_prec: avg_prec, avg_recall = total_prec / n_batches, total_recall / n_batches for i in range(len(self.topK)): print('-' * 55) print("Epoch {0}: [Precision@{1}] {2}".format( epoch, self.topK[i], avg_prec[i])) print("Epoch {0}: [Recall@{1}] {2}".format( epoch, self.topK[i], avg_recall[i])) print('=' * 55) return avg_prec[0], avg_recall[0] else: avg_hr, avg_ndcg = total_hr / n_batches, total_ndcg / n_batches for i in range(len(self.topK)): print('-' * 55) print("Epoch {0}: [HR@{1}] {2}".format(epoch, self.topK[i], avg_hr[i])) print("Epoch {0}: [nDCG@{1}] {2}".format( epoch, self.topK[i], avg_ndcg[i])) print('=' * 55) return avg_hr[0], avg_ndcg[0]
_, ranking_dict, test_dict = mtl.negdict_mat(original_matrix, test_matrix, num_neg=199, mod='others', random_state=10) for user in ranking_dict: if len(test_dict[user]) == 0: continue iid = ranking_dict[user] # The ranking item ids for user u rk = item_pop_arr[np.asarray(iid)] print(rk) hr, ndcg = evl.rankingMetrics(rk, iid, topK, test_dict[user], mod='hr') total_hr += hr total_ndcg += ndcg avg_hr, avg_ndcg = total_hr / num_user, total_ndcg / num_user for i in range(len(topK)): print('-' * 55) print("[HR@{0}] {1}".format(topK[i], avg_hr[i])) print("[nDCG@{0}] {1}".format(topK[i], avg_ndcg[i])) print('=' * 55) save_path = "Result/%s/ItemPop/%s/" % (dataset, date) if not os.path.exists(save_path): os.makedirs(save_path)
def eval_one_epoch(self, epoch): if self.is_user_node: feed_dict = { self.ratings: self.train_array, self.uid: range(self.num_user), self.istraining: False, self.layer1_dropout_rate: 0 } else: feed_dict = { self.ratings: self.train_array, self.istraining: False, self.layer1_dropout_rate: 0 } if self.robust_test: print('[Pos={0}] {1} Noise Added [Robust Test]'.format( self.noise_pos, self.noise_type)) print('[Eps={0}] {1} Noise Level [Robust Test]'.format( self.eps, self.noise_type)) self.session.run(self.update_delta, feed_dict) # Input the uncorrupted training data pred_y = self.session.run(self.pred_y, feed_dict) # pred_y = pred_y.clip(min=0,max=1) n_batches = 0 if self.is_prec: total_prec, total_recall = np.zeros(len(self.topK)), np.zeros( len(self.topK)) else: total_hr, total_ndcg = np.zeros(len(self.topK)), np.zeros( len(self.topK)) # Loop for each user (generate the ranking lists for different users) for u in self.ranking_dict: if len(self.test_dict[u]) == 0: continue iid = self.ranking_dict[u] # The ranking item ids for user u rk = pred_y[u, np.array(iid)] # The predicted item values for user u n_batches += 1 if self.is_prec: precision, recall, _, _, _ = evl.rankingMetrics( rk, iid, self.topK, self.test_dict[u], mod='precision', is_map=False) total_prec += precision total_recall += recall else: hr, ndcg = evl.rankingMetrics(rk, iid, self.topK, self.test_dict[u], mod='hr') total_hr += hr total_ndcg += ndcg if self.is_prec: avg_prec, avg_recall = total_prec / n_batches, total_recall / n_batches for i in range(len(self.topK)): print('-' * 55) print("Epoch {0}: [Precision@{1}] {2}".format( epoch, self.topK[i], avg_prec[i])) print("Epoch {0}: [Recall@{1}] {2}".format( epoch, self.topK[i], avg_recall[i])) print('=' * 55) return avg_prec[0], avg_recall[0] else: avg_hr, avg_ndcg = total_hr / n_batches, total_ndcg / n_batches for i in range(len(self.topK)): print('-' * 55) print("Epoch {0}: [HR@{1}] {2}".format(epoch, self.topK[i], avg_hr[i])) print("Epoch {0}: [nDCG@{1}] {2}".format( epoch, self.topK[i], avg_ndcg[i])) print('=' * 55) return avg_hr[0], avg_ndcg[0]
def eval_one_epoch(self, epoch): if self.robust_test: delta = self.session.run(self.update_delta, feed_dict={ self.ratings: self.train_array, self.output_mask: self.negative_output_mask, self.istraining: False, self.isnegsample: False, self.add_hidden_noise: self.hidden_noise, self.add_weight_noise: self.weight_noise, self.layer1_dropout_rate: 0 }) layer2_w, layer2_w_org = self.session.run( [self.w2, self.w2_org], feed_dict={ self.ratings: self.train_array, self.output_mask: self.negative_output_mask, self.istraining: False, self.isnegsample: False, self.add_hidden_noise: self.hidden_noise, self.add_weight_noise: self.weight_noise, self.layer1_dropout_rate: 0 }) print("Evaluation Epoch {0}: [Delta] = {1} [W2]={2} [W2_ORG]={3}". format(epoch, delta[10, 0], layer2_w[10, 0], layer2_w_org[10, 0])) # Input the uncorrupted training data pred_y = self.session.run(self.pred_y, feed_dict={ self.ratings: self.train_array, self.output_mask: self.negative_output_mask, self.istraining: False, self.isnegsample: self.is_neg_sa, self.add_hidden_noise: self.hidden_noise, self.add_weight_noise: self.weight_noise, self.layer1_dropout_rate: 0 }) pred_y = pred_y.clip(min=0, max=1) # n_batches, total_prec, total_ap = 0, 0, 0 n_batches, total_hr, total_ndcg = 0, 0, 0 # Loop for each user (generate the ranking lists for different users) for u in self.ranking_dict: iid = self.ranking_dict[u] # The ranking item ids for user u rk = pred_y[u, np.array(iid)] # The predicted item values for user u hr, ndcg = evl.rankingMetrics(rk, iid, self.topK, self.test_dict[u], mod='hr') n_batches += 1 total_hr += hr total_ndcg += ndcg # total_prec += precision # total_ap += ap avg_hr, avg_ndcg = total_hr / n_batches, total_ndcg / n_batches print('=' * 50) print("Epoch {0}: [HR@{1}] {2}".format(epoch, self.topK, avg_hr)) print("Epoch {0}: [nDCG@{1}] {2}".format(epoch, self.topK, avg_ndcg)) return avg_hr, avg_ndcg