コード例 #1
0
def evaluate_snli_final(esnli_net, criterion_expl, dataset, data, expl_no_unk,
                        word_vec, word_index, batch_size, print_every,
                        current_run_dir):
    assert dataset in ['snli_dev', 'snli_test']
    print(dataset.upper())
    esnli_net.eval()

    correct = 0.
    correct_labels_expl = 0.
    cum_test_ppl = 0
    cum_test_n_words = 0

    headers = [
        "gold_label", "Premise", "Hypothesis", "pred_label", "pred_expl",
        "pred_lbl_decoder", "Expl_1", "Expl_2", "Expl_3"
    ]
    expl_csv = os.path.join(
        current_run_dir,
        time.strftime("%d:%m") + "_" + time.strftime("%H:%M:%S") + "_" +
        dataset + ".csv")
    remove_file(expl_csv)
    expl_f = open(expl_csv, "a")
    writer = csv.writer(expl_f)
    writer.writerow(headers)

    s1 = data['s1']
    s2 = data['s2']
    expl_1 = data['expl_1']
    expl_2 = data['expl_2']
    expl_3 = data['expl_3']
    label = data['label']
    label_expl = data['label_expl']

    for i in range(0, len(s1), batch_size):
        # prepare batch
        s1_batch, s1_len = get_batch(s1[i:i + batch_size], word_vec)
        s2_batch, s2_len = get_batch(s2[i:i + batch_size], word_vec)
        s1_batch, s2_batch = Variable(s1_batch.cuda()), Variable(
            s2_batch.cuda())
        tgt_label_batch = Variable(torch.LongTensor(label[i:i +
                                                          batch_size])).cuda()
        tgt_label_expl_batch = label_expl[i:i + batch_size]

        # print example
        if i % print_every == 0:
            print("Final SNLI example from " + dataset)
            print("Sentence1:  ", ' '.join(s1[i]), " LENGHT: ", s1_len[0])
            print("Sentence2:  ", ' '.join(s2[i]), " LENGHT: ", s2_len[0])
            print("Gold label:  ", get_key_from_val(label[i], NLI_DIC_LABELS))

        out_lbl = [0, 1, 2, 3]
        for index in range(1, 4):
            expl = eval("expl_" + str(index))
            input_expl_batch, _ = get_batch(expl[i:i + batch_size], word_vec)
            input_expl_batch = Variable(input_expl_batch[:-1].cuda())
            if i % print_every == 0:
                print("Explanation " + str(index) + " :  ", ' '.join(expl[i]))
                print("Predicted label by decoder " + str(index) + " :  ",
                      ' '.join(expl[i][0]))
            tgt_expl_batch, lens_tgt_expl = get_target_expl_batch(
                expl[i:i + batch_size], word_index)
            assert tgt_expl_batch.dim() == 2, "tgt_expl_batch.dim()=" + str(
                tgt_expl_batch.dim())
            tgt_expl_batch = Variable(tgt_expl_batch).cuda()
            if i % print_every == 0:
                print(
                    "Target expl " + str(index) + " :  ",
                    get_sentence_from_indices(word_index, tgt_expl_batch[:,
                                                                         0]),
                    " LENGHT: ", lens_tgt_expl[0])

            # model forward, tgt_labels is still None bcs in test mode we get the predicted labels
            out_expl, out_lbl[index - 1] = esnli_net((s1_batch, s1_len),
                                                     (s2_batch, s2_len),
                                                     input_expl_batch,
                                                     mode="teacher")
            # ppl
            loss_expl = criterion_expl(
                out_expl.view(out_expl.size(0) * out_expl.size(1), -1),
                tgt_expl_batch.view(
                    tgt_expl_batch.size(0) * tgt_expl_batch.size(1)))
            cum_test_n_words += lens_tgt_expl.sum()
            cum_test_ppl += loss_expl.data[0]
            answer_idx = torch.max(out_expl, 2)[1]
            if i % print_every == 0:
                print("Decoded explanation " + str(index) + " :  ",
                      get_sentence_from_indices(word_index, answer_idx[:, 0]))
                print("\n")

        pred_expls, out_lbl[3] = esnli_net((s1_batch, s1_len),
                                           (s2_batch, s2_len),
                                           input_expl_batch,
                                           mode="forloop")
        if i % print_every == 0:
            print("Fully decoded explanation: ",
                  pred_expls[0].strip().split()[1:-1])
            print("Predicted label from decoder: ",
                  pred_expls[0].strip().split()[0])

        for b in range(len(pred_expls)):
            assert tgt_label_expl_batch[b] in [
                'entailment', 'neutral', 'contradiction'
            ]
            if len(pred_expls[b]) > 0:
                words = pred_expls[b].strip().split()
                assert words[0] in ['entailment', 'neutral',
                                    'contradiction'], words[0]
                if words[0] == tgt_label_expl_batch[b]:
                    correct_labels_expl += 1

        assert (torch.equal(out_lbl[0], out_lbl[1]))
        assert (torch.equal(out_lbl[1], out_lbl[2]))
        assert (torch.equal(out_lbl[2], out_lbl[3]))

        # accuracy
        pred = out_lbl[0].data.max(1)[1]
        if i % print_every == 0:
            print("Predicted label from classifier:  ",
                  get_key_from_val(pred[0], NLI_DIC_LABELS), "\n\n\n")
        correct += pred.long().eq(tgt_label_batch.data.long()).cpu().sum()

        # write csv row of predictions
        for j in range(len(pred_expls)):
            row = []
            row.append(get_key_from_val(label[i + j], NLI_DIC_LABELS))
            row.append(' '.join(s1[i + j][1:-1]))
            row.append(' '.join(s2[i + j][1:-1]))
            row.append(get_key_from_val(pred[j], NLI_DIC_LABELS))
            row.append(' '.join(pred_expls[j].strip().split()[1:-1]))
            assert pred_expls[j].strip().split()[0] in [
                'entailment', 'contradiction', 'neutral'
            ], pred_expls[j].strip().split()[0]
            row.append(pred_expls[j].strip().split()[0])
            #row.append(' '.join(expl_1[i+j][2:-1]))
            #row.append(' '.join(expl_2[i+j][2:-1]))
            #row.append(' '.join(expl_3[i+j][2:-1]))
            row.append(expl_no_unk['expl_1'][i + j])
            row.append(expl_no_unk['expl_2'][i + j])
            row.append(expl_no_unk['expl_3'][i + j])
            writer.writerow(row)

    eval_acc = round(100 * correct / len(s1), 2)
    eval_acc_label_expl = round(100 * correct_labels_expl / len(s1), 2)
    eval_ppl = math.exp(cum_test_ppl / cum_test_n_words)

    expl_f.close()
    bleu_score = 100 * bleu_prediction(expl_csv, expl_no_unk)

    print(dataset.upper() + ' SNLI accuracy: ', eval_acc, 'bleu score: ',
          bleu_score, 'ppl: ', eval_ppl, 'eval_acc_label_expl: ',
          eval_acc_label_expl)
    return eval_acc, round(bleu_score, 2), round(eval_ppl,
                                                 2), eval_acc_label_expl
コード例 #2
0
    def forward(self, expl, s1_embed, s2_embed, mode, classif_lbl):
        # expl: Variable(seqlen x bsize x worddim)
        # s1/2_embed: Variable(bsize x sent_dim)

        assert mode in ['forloop', 'teacher'], mode

        batch_size = expl.size(1)
        assert_sizes(s1_embed, 2, [batch_size, self.sent_dim])
        assert_sizes(s2_embed, 2, [batch_size, self.sent_dim])
        assert_sizes(expl, 3, [expl.size(0), batch_size, self.word_emb_dim])

        context = torch.cat([s1_embed, s2_embed], 1).unsqueeze(0)
        if self.use_diff_prod_sent_embed:
            context = torch.cat([
                s1_embed, s2_embed,
                torch.abs(s1_embed - s2_embed), s1_embed * s2_embed
            ], 1).unsqueeze(0)
        if self.only_diff_prod:
            context = torch.cat(
                [torch.abs(s1_embed - s2_embed), s1_embed * s2_embed],
                1).unsqueeze(0)

        assert_sizes(
            context, 3,
            [1, batch_size, self.context_mutiply_coef * self.sent_dim])

        # init decoder
        context_init = torch.cat([s1_embed, s2_embed], 1).unsqueeze(0)
        if self.use_init:
            if 2 * self.sent_dim != self.dec_rnn_dim:
                init_0 = self.proj_init(
                    context_init.expand(self.n_layers_dec, batch_size,
                                        2 * self.sent_dim))
            else:
                init_0 = context_init
        else:
            init_0 = Variable(
                torch.zeros(self.n_layers_dec, batch_size,
                            self.dec_rnn_dim)).cuda()

        init_state = init_0
        if self.decoder_type == 'lstm':
            init_state = (init_0, init_0)

        self.decoder_rnn.flatten_parameters()

        if mode == "teacher":
            input_dec = torch.cat([
                expl,
                context.expand(expl.size(0), batch_size,
                               self.context_mutiply_coef * self.sent_dim)
            ], 2)
            input_dec = self.proj_inp_dec(
                nn.Dropout(self.dpout_dec)(input_dec))

            out, _ = self.decoder_rnn(input_dec, init_state)
            dp_out = nn.Dropout(self.dpout_dec)(out)

            if not self.use_vocab_proj:
                return self.vocab_layer(dp_out)
            return self.vocab_layer(self.vocab_proj(dp_out))

        else:
            assert classif_lbl is not None
            assert_sizes(classif_lbl, 1, [batch_size])
            pred_expls = []
            finished = []
            for i in range(batch_size):
                pred_expls.append("")
                finished.append(False)

            dec_inp_t = torch.cat([expl[0, :, :].unsqueeze(0), context], 2)
            dec_inp_t = self.proj_inp_dec(dec_inp_t)

            ht = init_state
            t = 0
            while t < self.max_T_decoder and not array_all_true(finished):
                t += 1
                word_embed = torch.zeros(1, batch_size, self.word_emb_dim)
                assert_sizes(dec_inp_t, 3, [1, batch_size, self.inp_dec_dim])
                dec_out_t, ht = self.decoder_rnn(dec_inp_t, ht)
                assert_sizes(dec_out_t, 3, [1, batch_size, self.dec_rnn_dim])
                if self.use_vocab_proj:
                    out_t_proj = self.vocab_proj(dec_out_t)
                    out_t = self.vocab_layer(out_t_proj).data
                else:
                    out_t = self.vocab_layer(
                        dec_out_t
                    ).data  # TODO: Use torch.stack with variables instead
                assert_sizes(out_t, 3, [1, batch_size, self.n_vocab])
                i_t = torch.max(out_t, 2)[1]
                assert_sizes(i_t, 2, [1, batch_size])
                pred_words = get_keys_from_vals(
                    i_t, self.word_index
                )  # array of bs of words at current timestep
                assert len(pred_words) == batch_size, "pred_words " + str(
                    len(pred_words)) + " batch_size " + str(batch_size)
                for i in range(batch_size):
                    if pred_words[i] == '</s>':
                        finished[i] = True
                    if not finished[i]:
                        pred_expls[i] += " " + pred_words[i]
                    if t > 1:
                        #print "self.word_vec[pred_words[i]]", type(self.word_vec[pred_words[i]])
                        word_embed[0, i] = torch.from_numpy(
                            self.word_vec[pred_words[i]])
                        #print "type(word_embed[0, i]) ", word_embed[0, i]
                        #assert False
                    else:
                        # put label predicted by classifier
                        classif_label = get_key_from_val(
                            classif_lbl[i], NLI_DIC_LABELS)
                        assert classif_label in [
                            'entailment', 'contradiction', 'neutral'
                        ], classif_label
                        word_embed[0, i] = torch.from_numpy(
                            self.word_vec[classif_label])
                word_embed = Variable(word_embed.cuda())
                assert_sizes(word_embed, 3, [1, batch_size, self.word_emb_dim])
                dec_inp_t = self.proj_inp_dec(
                    torch.cat([word_embed, context], 2))
            return pred_expls
コード例 #3
0
def eval_datasets_without_expl(esnli_net, which_set, data, word_vec,
                               word_emb_dim, batch_size, print_every,
                               current_run_dir):

    dict_labels = NLI_DIC_LABELS

    esnli_net.eval()
    correct = 0.
    correct_labels_expl = 0.

    s1 = data['s1']
    s2 = data['s2']
    label = data['label']
    label_expl = data['label_expl']

    headers = [
        "gold_label", "Premise", "Hypothesis", "pred_label", "pred_expl",
        "pred_lbl_decoder"
    ]
    expl_csv = os.path.join(
        current_run_dir,
        time.strftime("%d:%m") + "_" + time.strftime("%H:%M:%S") + "_" +
        which_set + ".csv")
    remove_file(expl_csv)
    expl_f = open(expl_csv, "a")
    writer = csv.writer(expl_f)
    writer.writerow(headers)

    for i in range(0, len(s1), batch_size):
        # prepare batch
        s1_batch, s1_len = get_batch(s1[i:i + batch_size], word_vec)
        s2_batch, s2_len = get_batch(s2[i:i + batch_size], word_vec)

        current_bs = s1_batch.size(1)
        assert_sizes(s1_batch, 3, [s1_batch.size(0), current_bs, word_emb_dim])
        assert_sizes(s2_batch, 3, [s2_batch.size(0), current_bs, word_emb_dim])

        s1_batch, s2_batch = Variable(s1_batch.cuda()), Variable(
            s2_batch.cuda())
        tgt_label_batch = Variable(torch.LongTensor(label[i:i +
                                                          batch_size])).cuda()
        tgt_label_expl_batch = label_expl[i:i + batch_size]

        expl_t0 = Variable(
            torch.from_numpy(word_vec['<s>']).float().unsqueeze(0).expand(
                current_bs, word_emb_dim).unsqueeze(0)).cuda()
        assert_sizes(expl_t0, 3, [1, current_bs, word_emb_dim])

        # model forward
        pred_expls, out_lbl = esnli_net((s1_batch, s1_len), (s2_batch, s2_len),
                                        expl_t0,
                                        mode="forloop")
        assert len(pred_expls) == current_bs, "pred_expls: " + str(
            len(pred_expls)) + " current_bs: " + str(current_bs)

        for b in range(len(pred_expls)):
            assert tgt_label_expl_batch[b] in [
                'entailment', 'neutral', 'contradiction'
            ]
            if len(pred_expls[b]) > 0:
                words = pred_expls[b].strip().split(" ")
                if words[0] == tgt_label_expl_batch[b]:
                    correct_labels_expl += 1

        # accuracy
        pred = out_lbl.data.max(1)[1]
        correct += pred.long().eq(tgt_label_batch.data.long()).cpu().sum()

        # write csv row of predictions
        # Look up for the headers order
        for j in range(len(pred_expls)):
            row = []
            row.append(get_key_from_val(label[i + j], dict_labels))
            row.append(' '.join(s1[i + j][1:-1]))
            row.append(' '.join(s2[i + j][1:-1]))
            row.append(get_key_from_val(pred[j], dict_labels))
            row.append(pred_expls[j][1:-1])
            row.append(pred_expls[j][0])
            writer.writerow(row)

        # print example
        if i % print_every == 0:
            print(which_set.upper() + " example: ")
            print("Premise:  ", ' '.join(s1[i]), " LENGHT: ", s1_len[0])
            print("Hypothesis:  ", ' '.join(s2[i]), " LENGHT: ", s2_len[0])
            print("Gold label:  ", get_key_from_val(label[i], dict_labels))
            print("Predicted label:  ", get_key_from_val(pred[0], dict_labels))
            print("Predicted explanation:  ", pred_expls[0], "\n\n\n")

    eval_acc = round(100 * correct / len(s1), 2)
    eval_acc_label_expl = round(100 * correct_labels_expl / len(s1), 2)
    print(which_set.upper() + " no train ", eval_acc, '\n\n\n')
    expl_f.close()
    return eval_acc, eval_acc_label_expl