def evaluate_test(select_net_e, rank_net_e): list_gen = ListGenerator(rel_file=Letor07Path + '/relation.test.fold%d.txt' % (letor_config['fold']), config=letor_config) map_v = 0.0 map_c = 0.0 with torch.no_grad(): for X1, X1_len, X1_id, X2, X2_len, X2_id, Y, F in \ list_gen.get_batch(data1=loader.query_data, data2=loader.doc_data): #print_log(X1.shape, X2.shape, Y.shape) X1, X1_len, X2, X2_len, Y, F = to_device(X1, X1_len, X2, X2_len, Y, F, device=select_device) X1, X2, X1_len, X2_len, X2_pos = select_net_e( X1, X2, X1_len, X2_len, X1_id, X2_id) X2, X2_len = utils.data_adaptor(X2, X2_len, select_net, rank_net, letor_config) #print_log(X1.shape, X2.shape, Y.shape) pred = rank_net_e(X1, X2, X1_len, X2_len, X2_pos) map_o = utils.eval_MAP(pred.tolist(), Y.tolist()) #print_log(pred.shape, Y.shape) map_v += map_o map_c += 1.0 map_v /= map_c print_log('[Test]', map_v) return map_v
rank_loss_list = [] start_t = time.time() for i in range(1000): # One Step Forward X1, X1_len, X1_id, X2, X2_len, X2_id, Y, F = pair_gen.get_batch( data1=loader.query_data, data2=loader.doc_data) X1, X1_len, X2, X2_len, Y, F = to_device(X1, X1_len, X2, X2_len, Y, F, device=select_device) X1, X2, X1_len, X2_len, X2_pos = select_net(X1, X2, X1_len, X2_len, X1_id, X2_id) X2, X2_len = utils.data_adaptor(X2, X2_len, select_net, rank_net, letor_config) output = rank_net(X1, X2, X1_len, X2_len, X2_pos) # Update Rank Net rank_loss = rank_net.pair_loss(output, Y) print('rank loss:', rank_loss.item()) rank_loss_list.append(rank_loss.item()) rank_optimizer.zero_grad() rank_loss.backward() rank_optimizer.step() end_t = time.time() print('Time Cost: %s s' % (end_t - start_t)) torch.save(select_net, "qcentric.model") torch.save(rank_net, "deeprank.model")