def ndcg(self, l, dis, label, feature, label_i=5): label_index = l.index(label_i) if dis: res = ndcg_at_k(self.dis, label, feature, k=l, use_cuda=self.cuda) label = res[label_index] for i in range(len(l)): print('ndcg@{}:{:.4f} '.format(l[i], res[i])) else: res = ndcg_at_k(self.gen, label, feature, k=l, use_cuda=self.cuda) label = res[label_index] for i in range(len(l)): print('ndcg@{}:{:.4f} '.format(l[i], res[i])) return label
query_pos_test = ut.get_query_pos(workdir + '/test.txt') param_best = cPickle.load(open(GAN_MODEL_BEST_FILE)) assert param_best is not None generator_best = GEN(FEATURE_SIZE, HIDDEN_SIZE, WEIGHT_DECAY, G_LEARNING_RATE, temperature=TEMPERATURE, param=param_best) config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) sess.run(tf.initialize_all_variables()) p_1_best = precision_at_k(sess, generator_best, query_pos_test, query_pos_train, query_url_feature, k=1) p_3_best = precision_at_k(sess, generator_best, query_pos_test, query_pos_train, query_url_feature, k=3) p_5_best = precision_at_k(sess, generator_best, query_pos_test, query_pos_train, query_url_feature, k=5) p_10_best = precision_at_k(sess, generator_best, query_pos_test, query_pos_train, query_url_feature, k=10) ndcg_1_best = ndcg_at_k(sess, generator_best, query_pos_test, query_pos_train, query_url_feature, k=1) ndcg_3_best = ndcg_at_k(sess, generator_best, query_pos_test, query_pos_train, query_url_feature, k=3) ndcg_5_best = ndcg_at_k(sess, generator_best, query_pos_test, query_pos_train, query_url_feature, k=5) ndcg_10_best = ndcg_at_k(sess, generator_best, query_pos_test, query_pos_train, query_url_feature, k=10) map_best = MAP(sess, generator_best, query_pos_test, query_pos_train, query_url_feature) mrr_best = MRR(sess, generator_best, query_pos_test, query_pos_train, query_url_feature) print("Best ", "p@1 ", p_1_best, "p@3 ", p_3_best, "p@5 ", p_5_best, "p@10 ", p_10_best) print("Best ", "ndcg@1 ", ndcg_1_best, "ndcg@3 ", ndcg_3_best, "ndcg@5 ", ndcg_5_best, "p@10 ", ndcg_10_best) print("Best MAP ", map_best) print("Best MRR ", mrr_best)
def main(): discriminator = DIS(FEATURE_SIZE, HIDDEN_SIZE, WEIGHT_DECAY, D_LEARNING_RATE, param=None) generator = GEN(FEATURE_SIZE, HIDDEN_SIZE, WEIGHT_DECAY, G_LEARNING_RATE, temperature=TEMPERATURE, param=None) config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) sess.run(tf.initialize_all_variables()) print('start adversarial training') p_best_val = 0.0 ndcg_best_val = 0.0 for epoch in range(30): if epoch >= 0: # G generate negative for D, then train D print('Training D ...') for d_epoch in range(100): if d_epoch % 30 == 0: generate_for_d(sess, generator, DIS_TRAIN_FILE) train_size = ut.file_len(DIS_TRAIN_FILE) index = 1 while True: if index > train_size: break if index + BATCH_SIZE <= train_size + 1: input_pos, input_neg = ut.get_batch_data( DIS_TRAIN_FILE, index, BATCH_SIZE) else: input_pos, input_neg = ut.get_batch_data( DIS_TRAIN_FILE, index, train_size - index + 1) index += BATCH_SIZE pred_data = [] pred_data.extend(input_pos) pred_data.extend(input_neg) pred_data = np.asarray(pred_data) pred_data_label = [1.0] * len(input_pos) pred_data_label.extend([0.0] * len(input_neg)) pred_data_label = np.asarray(pred_data_label) _ = sess.run(discriminator.d_updates, feed_dict={ discriminator.pred_data: pred_data, discriminator.pred_data_label: pred_data_label }) # Train G print('Training G ...') for g_epoch in range(30): for query in query_pos_train.keys(): pos_list = query_pos_train[query] pos_set = set(pos_list) all_list = query_index_url[query] all_list_feature = [ query_url_feature[query][url] for url in all_list ] all_list_feature = np.asarray(all_list_feature) all_list_score = sess.run( generator.pred_score, {generator.pred_data: all_list_feature}) # softmax for all exp_rating = np.exp(all_list_score - np.max(all_list_score)) prob = exp_rating / np.sum(exp_rating) prob_IS = prob * (1.0 - LAMBDA) for i in range(len(all_list)): if all_list[i] in pos_set: prob_IS[i] += (LAMBDA / (1.0 * len(pos_list))) choose_index = np.random.choice(np.arange(len(all_list)), [5 * len(pos_list)], p=prob_IS) choose_list = np.array(all_list)[choose_index] choose_feature = [ query_url_feature[query][url] for url in choose_list ] choose_IS = np.array(prob)[choose_index] / np.array( prob_IS)[choose_index] choose_index = np.asarray(choose_index) choose_feature = np.asarray(choose_feature) choose_IS = np.asarray(choose_IS) choose_reward = sess.run( discriminator.reward, feed_dict={discriminator.pred_data: choose_feature}) _ = sess.run(generator.g_updates, feed_dict={ generator.pred_data: all_list_feature, generator.sample_index: choose_index, generator.reward: choose_reward, generator.important_sampling: choose_IS }) p_5 = precision_at_k(sess, generator, query_pos_test, query_pos_train, query_url_feature, k=5) ndcg_5 = ndcg_at_k(sess, generator, query_pos_test, query_pos_train, query_url_feature, k=5) if p_5 > p_best_val: p_best_val = p_5 ndcg_best_val = ndcg_5 generator.save_model(sess, GAN_MODEL_BEST_FILE) print("Best:", "gen p@5 ", p_5, "gen ndcg@5 ", ndcg_5) elif p_5 == p_best_val: if ndcg_5 > ndcg_best_val: ndcg_best_val = ndcg_5 generator.save_model(sess, GAN_MODEL_BEST_FILE) print("Best:", "gen p@5 ", p_5, "gen ndcg@5 ", ndcg_5) sess.close() param_best = cPickle.load(open(GAN_MODEL_BEST_FILE)) assert param_best is not None generator_best = GEN(FEATURE_SIZE, HIDDEN_SIZE, WEIGHT_DECAY, G_LEARNING_RATE, temperature=TEMPERATURE, param=param_best) sess = tf.Session(config=config) sess.run(tf.initialize_all_variables()) p_1_best = precision_at_k(sess, generator_best, query_pos_test, query_pos_train, query_url_feature, k=1) p_3_best = precision_at_k(sess, generator_best, query_pos_test, query_pos_train, query_url_feature, k=3) p_5_best = precision_at_k(sess, generator_best, query_pos_test, query_pos_train, query_url_feature, k=5) p_10_best = precision_at_k(sess, generator_best, query_pos_test, query_pos_train, query_url_feature, k=10) ndcg_1_best = ndcg_at_k(sess, generator_best, query_pos_test, query_pos_train, query_url_feature, k=1) ndcg_3_best = ndcg_at_k(sess, generator_best, query_pos_test, query_pos_train, query_url_feature, k=3) ndcg_5_best = ndcg_at_k(sess, generator_best, query_pos_test, query_pos_train, query_url_feature, k=5) ndcg_10_best = ndcg_at_k(sess, generator_best, query_pos_test, query_pos_train, query_url_feature, k=10) map_best = MAP(sess, generator_best, query_pos_test, query_pos_train, query_url_feature) mrr_best = MRR(sess, generator_best, query_pos_test, query_pos_train, query_url_feature) print("Best ", "p@1 ", p_1_best, "p@3 ", p_3_best, "p@5 ", p_5_best, "p@10 ", p_10_best) print("Best ", "ndcg@1 ", ndcg_1_best, "ndcg@3 ", ndcg_3_best, "ndcg@5 ", ndcg_5_best, "p@10 ", ndcg_10_best) print("Best MAP ", map_best) print("Best MRR ", mrr_best)
def main(): discriminator = DIS(FEATURE_SIZE, HIDDEN_SIZE, WEIGHT_DECAY, D_LEARNING_RATE, param=None) config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) sess.run(tf.initialize_all_variables()) print('start random negative sampling with log ranking discriminator') generate_uniform(DIS_TRAIN_FILE) train_size = ut.file_len(DIS_TRAIN_FILE) p_best_val = 0.0 ndcg_best_val = 0.0 for epoch in range(200): index = 1 while True: if index > train_size: break if index + BATCH_SIZE <= train_size + 1: input_pos, input_neg = ut.get_batch_data( DIS_TRAIN_FILE, index, BATCH_SIZE) else: input_pos, input_neg = ut.get_batch_data( DIS_TRAIN_FILE, index, train_size - index + 1) index += BATCH_SIZE pred_data = [] pred_data.extend(input_pos) pred_data.extend(input_neg) pred_data = np.asarray(pred_data) pred_data_label = [1.0] * len(input_pos) pred_data_label.extend([0.0] * len(input_neg)) pred_data_label = np.asarray(pred_data_label) _ = sess.run(discriminator.d_updates, feed_dict={ discriminator.pred_data: pred_data, discriminator.pred_data_label: pred_data_label }) p_5 = precision_at_k(sess, discriminator, query_pos_test, query_pos_train, query_url_feature, k=5) ndcg_5 = ndcg_at_k(sess, discriminator, query_pos_test, query_pos_train, query_url_feature, k=5) if p_5 > p_best_val: p_best_val = p_5 discriminator.save_model(sess, MLE_MODEL_BEST_FILE) print("Best: ", " p@5 ", p_5, "ndcg@5 ", ndcg_5) elif p_5 == p_best_val: if ndcg_5 > ndcg_best_val: ndcg_best_val = ndcg_5 discriminator.save_model(sess, MLE_MODEL_BEST_FILE) print("Best: ", " p@5 ", p_5, "ndcg@5 ", ndcg_5) sess.close() param_best = cPickle.load(open(MLE_MODEL_BEST_FILE)) assert param_best is not None discriminator_best = DIS(FEATURE_SIZE, HIDDEN_SIZE, WEIGHT_DECAY, D_LEARNING_RATE, param=param_best) sess = tf.Session(config=config) sess.run(tf.initialize_all_variables()) p_1_best = precision_at_k(sess, discriminator_best, query_pos_test, query_pos_train, query_url_feature, k=1) p_3_best = precision_at_k(sess, discriminator_best, query_pos_test, query_pos_train, query_url_feature, k=3) p_5_best = precision_at_k(sess, discriminator_best, query_pos_test, query_pos_train, query_url_feature, k=5) p_10_best = precision_at_k(sess, discriminator_best, query_pos_test, query_pos_train, query_url_feature, k=10) ndcg_1_best = ndcg_at_k(sess, discriminator_best, query_pos_test, query_pos_train, query_url_feature, k=1) ndcg_3_best = ndcg_at_k(sess, discriminator_best, query_pos_test, query_pos_train, query_url_feature, k=3) ndcg_5_best = ndcg_at_k(sess, discriminator_best, query_pos_test, query_pos_train, query_url_feature, k=5) ndcg_10_best = ndcg_at_k(sess, discriminator_best, query_pos_test, query_pos_train, query_url_feature, k=10) map_best = MAP(sess, discriminator_best, query_pos_test, query_pos_train, query_url_feature) mrr_best = MRR(sess, discriminator_best, query_pos_test, query_pos_train, query_url_feature) print("Best ", "p@1 ", p_1_best, "p@3 ", p_3_best, "p@5 ", p_5_best, "p@10 ", p_10_best) print("Best ", "ndcg@1 ", ndcg_1_best, "ndcg@3 ", ndcg_3_best, "ndcg@5 ", ndcg_5_best, "p@10 ", ndcg_10_best) print("Best MAP ", map_best) print("Best MRR ", mrr_best)
def main(): print("load initial model ...") param_nn = cPickle.load(open(DIS_MODEL_FILE_NN)) assert param_nn is not None discriminator = DIS(FEATURE_SIZE, HIDDEN_SIZE, D_WEIGHT_DECAY, D_LEARNING_RATE, loss='log', param=param_nn) generator = GEN(FEATURE_SIZE, HIDDEN_SIZE, G_WEIGHT_DECAY, G_LEARNING_RATE, param=param_nn) config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) sess.run(tf.initialize_all_variables()) print('start adversarial training') p_best_val = 0.0 ndcg_best_val = 0.0 for epoch in range(30): if epoch > 0: # G generate negative for D, then train D print('Training D ...') generate_for_d(sess, generator, DIS_TRAIN_FILE) train_size = ut.file_len(DIS_TRAIN_FILE) for d_epoch in range(30): index = 1 while True: if index > train_size: break if index + BATCH_SIZE <= train_size + 1: input_pos, input_neg = ut.get_batch_data( DIS_TRAIN_FILE, index, BATCH_SIZE) else: input_pos, input_neg = ut.get_batch_data( DIS_TRAIN_FILE, index, train_size - index + 1) index += BATCH_SIZE _ = sess.run(discriminator.d_updates, feed_dict={ discriminator.pos_data: input_pos, discriminator.neg_data: input_neg }) p_5 = precision_at_k(sess, discriminator, query_pos_test, query_pos_train, query_url_feature, k=5) ndcg_5 = ndcg_at_k(sess, discriminator, query_pos_test, query_pos_train, query_url_feature, k=5) if p_5 > p_best_val: p_best_val = p_5 ndcg_best_val = ndcg_5 discriminator.save_model(sess, GAN_MODEL_BEST_FILE) print("Best: ", "dis p@5 ", p_5, "dis ndcg@5 ", ndcg_5) elif p_5 == p_best_val: if ndcg_5 > ndcg_best_val: ndcg_best_val = ndcg_5 discriminator.save_model(sess, GAN_MODEL_BEST_FILE) print("Best: ", "dis p@5 ", p_5, "dis ndcg@5 ", ndcg_5) # Train G print('Training G ...') for g_epoch in range(50): # 50 for query in query_pos_train.keys(): pos_list = query_pos_train[query] # candidate_list = list(set(query_url_feature[query].keys()) - set(pos_list)) candidate_list = list(query_url_feature[query].keys()) if len(candidate_list) <= 0: continue candidate_list_feature = [ query_url_feature[query][url] for url in candidate_list ] candidate_list_feature = np.asarray(candidate_list_feature) candidate_list_score = sess.run( generator.pred_score, {generator.pred_data: candidate_list_feature}) # softmax for all exp_rating = np.exp(candidate_list_score) prob = exp_rating / np.sum(exp_rating) neg_index = np.random.choice(np.arange(len(candidate_list)), size=[len(pos_list)], p=prob) neg_list = np.array(candidate_list)[neg_index] pos_list_feature = [ query_url_feature[query][url] for url in pos_list ] neg_list_feature = [ query_url_feature[query][url] for url in neg_list ] neg_index = np.asarray(neg_index) # every negative samples have a reward neg_reward = sess.run(discriminator.reward, feed_dict={ discriminator.pos_data: pos_list_feature, discriminator.neg_data: neg_list_feature }) # Method 1: softmax before gather _ = sess.run(generator.gan_updates, feed_dict={ generator.pred_data: candidate_list_feature, generator.sample_index: neg_index, generator.reward: neg_reward }) print('Best p@5: ', p_best_val, 'Best ndcg@5: ', ndcg_best_val)
def main(): #call discriminator, generator discriminator = DIS(FEATURE_SIZE, HIDDEN_SIZE, WEIGHT_DECAY, D_LEARNING_RATE) generator = GEN(FEATURE_SIZE, HIDDEN_SIZE, WEIGHT_DECAY, G_LEARNING_RATE, temperature=TEMPERATURE) print('start adversarial training') p_best_val = 0.0 ndcg_best_val = 0.0 for epoch in range(30): if epoch >= 0: # G generate negative for D, then train D print('Training D ...') for d_epoch in range(100): if d_epoch % 30 == 0: generate_for_d(generator, DIS_TRAIN_FILE) train_size = ut.file_len(DIS_TRAIN_FILE) index = 1 while True: if index > train_size: break if index + BATCH_SIZE <= train_size + 1: input_pos, input_neg = ut.get_batch_data(DIS_TRAIN_FILE, index, BATCH_SIZE) else: input_pos, input_neg = ut.get_batch_data(DIS_TRAIN_FILE, index, train_size - index + 1) index += BATCH_SIZE pred_data = [] #prepare pos and neg data pred_data.extend(input_pos) pred_data.extend(input_neg) pred_data = np.asarray(pred_data) #prepara pos and neg label pred_data_label = [1.0] * len(input_pos) pred_data_label.extend([0.0] * len(input_neg)) pred_data_label = np.asarray(pred_data_label) #train discriminator.train(pred_data, pred_data_label) # Train G print('Training G ...') for g_epoch in range(10): start_time = time.time() print ('now_ G_epoch : ', str(g_epoch)) for query in query_pos_train.keys(): pos_list = query_pos_train[query] pos_set = set(pos_list) #all url all_list = query_index_url[query] #all feature all_list_feature = [query_url_feature[query][url] for url in all_list] all_list_feature = np.asarray(all_list_feature) # G generate all url prob prob = generator.get_prob(all_list_feature[np.newaxis, :]) prob = prob[0] prob = prob.reshape([-1]) #important sampling, change doc prob prob_IS = prob * (1.0 - LAMBDA) for i in range(len(all_list)): if all_list[i] in pos_set: prob_IS[i] += (LAMBDA / (1.0 * len(pos_list))) # G generate some url (5 * postive doc num) choose_index = np.random.choice(np.arange(len(all_list)), [5 * len(pos_list)], p=prob_IS) #choose url choose_list = np.array(all_list)[choose_index] #choose feature choose_feature = [query_url_feature[query][url] for url in choose_list] #prob / importan sampling prob (loss => prob * reward * prob / importan sampling prob) choose_IS = np.array(prob)[choose_index] / np.array(prob_IS)[choose_index] choose_index = np.asarray(choose_index) choose_feature = np.asarray(choose_feature) choose_IS = np.asarray(choose_IS) #get reward((prob - 0.5) * 2 ) choose_reward = discriminator.get_preresult(choose_feature) #train generator.train(choose_feature[np.newaxis, :], choose_reward.reshape([-1])[np.newaxis, :], choose_IS[np.newaxis, :]) print("train end--- %s seconds ---" % (time.time() - start_time)) p_5 = precision_at_k(generator, query_pos_test, query_pos_train, query_url_feature, k=5) ndcg_5 = ndcg_at_k(generator, query_pos_test, query_pos_train, query_url_feature, k=5) if p_5 > p_best_val: p_best_val = p_5 ndcg_best_val = ndcg_5 generator.save_model(GAN_MODEL_BEST_FILE) print("Best:", "gen p@5 ", p_5, "gen ndcg@5 ", ndcg_5) elif p_5 == p_best_val: if ndcg_5 > ndcg_best_val: ndcg_best_val = ndcg_5 generator.save_model(GAN_MODEL_BEST_FILE) print("Best:", "gen p@5 ", p_5, "gen ndcg@5 ", ndcg_5)
def main(): p_best_val = 0.0 ndcg_best_val = 0.0 for epoch in range(30): if epoch >= 0: print('Training D ...') for d_epoch in range(100): if d_epoch % 30 == 0: generate_for_d(DIS_TRAIN_FILE) train_size = ut.file_len(DIS_TRAIN_FILE) index = 1 while True: if index > train_size: break if index + BATCH_SIZE <= train_size + 1: input_pos, input_neg = ut.get_batch_data( DIS_TRAIN_FILE, index, BATCH_SIZE) else: input_pos, input_neg = ut.get_batch_data( DIS_TRAIN_FILE, index, train_size - index + 1) index += BATCH_SIZE pred_data = [] pred_data.extend(input_pos) pred_data.extend(input_neg) pred_data = np.asarray(pred_data) pred_data_label = [1.0] * len(input_pos) pred_data_label.extend([0.0] * len(input_neg)) pred_data_label = np.asarray(pred_data_label) loss_d = discriminator(torch.tensor(pred_data), torch.tensor(pred_data_label)) \ + WEIGHT_DECAY * (criterion(D_w1) + criterion(D_w2) + criterion(D_b1) + criterion(D_b2)) optimizer_D.zero_grad() loss_d.backward() optimizer_D.step() print("\r[D Epoch %d/%d] [loss: %f]" % (d_epoch, 100, loss_d.item())) print('Training G ...') for g_epoch in range(30): num = 0 for query in query_pos_train.keys(): pos_list = query_pos_train[query] pos_set = set(pos_list) all_list = query_index_url[query] all_list_feature = [ query_url_feature[query][url] for url in all_list ] all_list_feature = np.asarray(all_list_feature) # pdb.set_trace() with torch.cuda.device(device[0]): all_list_score = generator.module.pred_score( torch.tensor(all_list_feature).cuda()) all_list_score = all_list_score.detach().cpu().numpy() # softmax for all exp_rating = np.exp(all_list_score - np.max(all_list_score)) prob = exp_rating / np.sum(exp_rating) prob_IS = prob * (1.0 - LAMBDA) for i in range(len(all_list)): if all_list[i] in pos_set: prob_IS[i] += (LAMBDA / (1.0 * len(pos_list))) # pdb.set_trace() choose_index = np.random.choice(np.arange(len(all_list)), [5 * len(pos_list)], p=prob_IS.reshape(-1, )) choose_list = np.array(all_list)[choose_index] choose_feature = [ query_url_feature[query][url] for url in choose_list ] choose_IS = np.array(prob)[choose_index] / np.array( prob_IS)[choose_index] choose_index = np.asarray(choose_index) choose_feature = np.asarray(choose_feature) choose_IS = np.asarray(choose_IS) with torch.cuda.device(device[0]): choose_reward = discriminator.module.get_reward( torch.tensor(choose_feature).cuda()) choose_reward.detach_() loss_g = generator(torch.tensor(all_list_feature).cuda(), torch.tensor(choose_index), choose_reward, torch.tensor(choose_IS)) \ + WEIGHT_DECAY * (criterion(G_w1) + criterion(G_w2) + criterion(G_b1) + criterion(G_b2)) # pdb.set_trace() optimizer_G.zero_grad() loss_g.backward() optimizer_G.step() num += 1 # if num == 200: # pdb.set_trace() print("\r[G Epoch %d/%d] [loss: %f]" % (g_epoch, 30, loss_g.item())) # pdb.set_trace() p_5 = precision_at_k(device, generator, query_pos_test, query_pos_train, query_url_feature, k=5) ndcg_5 = ndcg_at_k(device, generator, query_pos_test, query_pos_train, query_url_feature, k=5) if p_5 > p_best_val: p_best_val = p_5 ndcg_best_val = ndcg_5 print("Best:", "gen p@5 ", p_5, "gen ndcg@5 ", ndcg_5) elif p_5 == p_best_val: if ndcg_5 > ndcg_best_val: ndcg_best_val = ndcg_5 print("Best:", "gen p@5 ", p_5, "gen ndcg@5 ", ndcg_5) #validation # p_5 = precision_at_k(val_loader, 5) # if p_5 > p_best_val: # p_best_val = p_5 # print("Best:", "gen p@5 ", p_5) # torch.save(recipe_emb.state_dict(), 'saved_models/recipe_emb_%d_%.3f.pth' % (epoch, p_5)) # param_num = 1 # for param in DG_param: # torch.save(param, 'saved_models/param%d_%d_%.3f.pt' % (param_num, epoch, p_5)) # param_num += 1 p_1_best = precision_at_k(device, generator, query_pos_test, query_pos_train, query_url_feature, k=1) p_3_best = precision_at_k(device, generator, query_pos_test, query_pos_train, query_url_feature, k=3) p_5_best = precision_at_k(device, generator, query_pos_test, query_pos_train, query_url_feature, k=5) p_10_best = precision_at_k(device, generator, query_pos_test, query_pos_train, query_url_feature, k=10) ndcg_1_best = ndcg_at_k(device, generator, query_pos_test, query_pos_train, query_url_feature, k=1) ndcg_3_best = ndcg_at_k(device, generator, query_pos_test, query_pos_train, query_url_feature, k=3) ndcg_5_best = ndcg_at_k(device, generator, query_pos_test, query_pos_train, query_url_feature, k=5) ndcg_10_best = ndcg_at_k(device, generator, query_pos_test, query_pos_train, query_url_feature, k=10) # map_best = MAP(sess, generator, query_pos_test, query_pos_train, query_url_feature) # mrr_best = MRR(sess, generator, query_pos_test, query_pos_train, query_url_feature) print("Best ", "p@1 ", p_1_best, "p@3 ", p_3_best, "p@5 ", p_5_best, "p@10 ", p_10_best) print("Best ", "ndcg@1 ", ndcg_1_best, "ndcg@3 ", ndcg_3_best, "ndcg@5 ", ndcg_5_best, "p@10 ", ndcg_10_best)