예제 #1
0
def test(model):
    model = load_model(model, model_name, args.gpu)
    print('\n\n-------------------------------------------')
    print('Testing')
    print('-------------------------------------------')
    test_iter = enumerate(chat_data.get_iter('test'))
    if not args.no_tqdm:
        test_iter = tqdm(test_iter)
        test_iter.set_description_str('Testing')
        test_iter.total = chat_data.n_test // chat_data.batch_size

    test_loss = 0.0
    extrema = []
    gm = []
    emb_avg_all = []
    predicted_s = []
    orig_s = []
    f1_score = 0.0
    for it, mb in test_iter:
        q, q_c, a, q_m, a_m, kb, kb_m, sentient, v_m, teams = mb
        pred, loss = model.evaluate_batch(q, q_c, a, q_m, a_m, kb, kb_m,
                                          sentient)
        pred = pred.transpose(0, 1).contiguous()
        a = a.transpose(0, 1).contiguous()
        s_g = get_sentences(a, teams)
        s_p = get_sentences(pred, teams)
        e_a, v_e, g_m = metrics.get_metrics_batch(s_g, s_p)
        f1_score += compute_f1(s_g, s_p, global_entity_list, teams)
        emb_avg_all.append(e_a)
        extrema.append(v_e)
        gm.append(g_m)
        predicted_s.append(s_p)
        orig_s.append(s_g)
        test_loss += loss.item()

    print("Vector extrema:" + str(np.average(extrema)))
    print("Greedy Matching:" + str(np.average(gm)))
    print("Embedding Average on Test:{:.6f}".format(np.average(emb_avg_all)))
    print('\n\n-------------------------------------------')
    print('-------------------------------------------')
    predicted_s = [q for ques in predicted_s for q in ques]
    orig_s = [q for ques in orig_s for q in ques]
    moses_bleu = get_moses_multi_bleu(predicted_s, orig_s, lowercase=True)
    print("Moses Bleu:" + str(moses_bleu))
    print("F1 score: ", f1_score / len(test_iter))
    test_out['original_response'] = orig_s
    test_out['predicted_response'] = predicted_s
    print('Saving the test predictions......')
    test_out.to_csv(test_results, index=False)
예제 #2
0
    def __init__(self):
        #userdictionary
        super().__init__()
        self.dialogue_hist = defaultdict(list)
        # Set random seed
        np.random.seed(args.randseed)
        torch.manual_seed(args.randseed)

        if args.gpu:
            torch.cuda.manual_seed(args.randseed)

        if os.path.isfile(args.stoi):
            self.stoi = np.load(args.stoi, allow_pickle=True).item()

        self.itos = {v: k for k, v in self.stoi.items()}
        # Get data
        # chat_data = DialogBatcher(gpu=args.gpu)
        self.model = KGSentient(hidden_size=args.hidden_size,
                                max_r=args.resp_len,
                                gpu=args.gpu,
                                n_words=len(self.stoi) + 1,
                                emb_dim=args.words_dim,
                                kb_max_size=200,
                                b_size=args.batch_size,
                                lr=args.lr,
                                dropout=args.rnn_dropout,
                                emb_drop=args.emb_drop,
                                teacher_forcing_ratio=args.teacher_forcing,
                                sos_tok=self.stoi['<sos>'],
                                eos_tok=self.stoi['<eos>'],
                                itos=self.itos,
                                first_kg_token=self.stoi['o0'])
        if args.gpu:
            self.model = self.model.cuda()

        self.model_name = 'Sentient_model2'
        self.model = load_model(self.model, self.model_name, gpu=args.gpu)
        self.bot = BotAgent(self.model, stoi=self.stoi, itos=self.itos)
def load_convnet():
    model = load_model()
    model.summary()
    return model