示例#1
0
def main():
    train_data, dev_data, test_data, TEXT = parser.parse_input_files(
        BATCH_SIZE, EMBEDDING_DIM)

    word_to_ix = TEXT.vocab.stoi
    ix_to_word = TEXT.vocab.itos

    VOCAB_SIZE = len(word_to_ix)

    word_embeds = TEXT.vocab.vectors

    model = Model(NUM_LABELS, VOCAB_SIZE, EMBEDDING_DIM, HIDDEN_DIM,
                  word_embeds, NUM_POLARITIES, BATCH_SIZE, DROPOUT_RATE)

    # Move the model to the GPU if available
    if using_GPU:
        model = model.cuda()

    train_c, dev_c, test_c = train(train_data, dev_data, test_data, model,
                                   word_to_ix, ix_to_word, using_GPU)

    print(train_c)
    print(dev_c)
    print(test_c)
    '''
def main():
    train_data, dev_data, test_data, TEXT, DOCID, _ = parser.parse_input_files(BATCH_SIZE, EMBEDDING_DIM, using_GPU,
                                                                            filepath=datasets[set_name]["filepath"],
                                                                            train_name=datasets[set_name]["filenames"][0],
                                                                            dev_name=datasets[set_name]["filenames"][1],
                                                                            test_name=datasets[set_name]["filenames"][2],
                                                                            has_holdtarg=True)

    word_to_ix = TEXT.vocab.stoi
    ix_to_word = TEXT.vocab.itos
    ix_to_docid = DOCID.vocab.itos

    VOCAB_SIZE = len(word_to_ix)

    word_embeds = TEXT.vocab.vectors

    model = Model(NUM_LABELS, VOCAB_SIZE,
                  EMBEDDING_DIM, HIDDEN_DIM, word_embeds,
                  NUM_POLARITIES, BATCH_SIZE, DROPOUT_RATE)
#    model.load_state_dict(torch.load("./model_states/baseline_F_10.pt"))

    # Move the model to the GPU if available
    if using_GPU:
        model = model.cuda()

    train_c, dev_c, test_c, train_a, dev_a, test_a, losses, best_epoch, wrongs = \
                                   train(train_data, dev_data, test_data,
                                   model,
                                   word_to_ix, ix_to_word, ix_to_docid,
                                   using_GPU)

    print("Train results: ")
    print("    " + str(train_c))
    print("    " + str(train_a))
    print("Dev results: ")
    print("    " + str(dev_c))
    print("    " + str(dev_a))
    print("Test results: ")
    print("    " + str(test_c))
    print("    " + str(test_a))
    print("Losses: ")
    print(losses)

    print("Best epoch = " + str(best_epoch))
    print("Train results: ")
    print("    " + str(train_c[best_epoch]) + " " + str(sum(train_c[best_epoch]) / len(train_c[best_epoch])))
    print("    " + str(train_a[best_epoch]))
    print("Dev results: ")
    print("    " + str(dev_c[best_epoch]) + " " + str(sum(dev_c[best_epoch]) / len(dev_c[best_epoch])))
    print("    " + str(dev_a[best_epoch]))
    print("Test results: ")
    print("    " + str(test_c[best_epoch]) + " " + str(sum(test_c[best_epoch]) / len(test_c[best_epoch])))
    print("    " + str(test_a[best_epoch]))

    print("Wrongs")
    print(wrongs)
    '''                   
示例#3
0
def main():
    train_data, dev_data, test_data, TEXT, DOCID = parser.parse_input_files(
        BATCH_SIZE, EMBEDDING_DIM, using_GPU)

    word_to_ix = TEXT.vocab.stoi
    ix_to_word = TEXT.vocab.itos

    VOCAB_SIZE = len(word_to_ix)

    word_embeds = TEXT.vocab.vectors

    model = Model(NUM_LABELS, VOCAB_SIZE, EMBEDDING_DIM, HIDDEN_DIM,
                  word_embeds, NUM_POLARITIES, BATCH_SIZE, DROPOUT_RATE)

    # Move the model to the GPU if available
    if using_GPU:
        model = model.cuda()

    train_c, dev_c, test_c, train_a, dev_a, test_a, losses, best_epoch = \
        train(train_data, dev_data, test_data,
              model,
              word_to_ix, ix_to_word,
              using_GPU)
    print("Train results: ")
    print("    " + str(train_c))
    print("    " + str(train_a))
    print("Dev results: ")
    print("    " + str(dev_c))
    print("    " + str(dev_a))
    print("Test results: ")
    print("    " + str(test_c))
    print("    " + str(test_a))
    print("Losses: ")
    print(losses)

    print("Best epoch = " + str(best_epoch))
    print("Train results: ")
    print("    " + str(train_c[best_epoch]) + " " +
          str(sum(train_c[best_epoch]) / len(train_c[best_epoch])))
    print("    " + str(train_a[best_epoch]))
    print("Dev results: ")
    print("    " + str(dev_c[best_epoch]) + " " +
          str(sum(dev_c[best_epoch]) / len(dev_c[best_epoch])))
    print("    " + str(dev_a[best_epoch]))
    print("Test results: ")
    print("    " + str(test_c[best_epoch]) + " " +
          str(sum(test_c[best_epoch]) / len(test_c[best_epoch])))
    print("    " + str(test_a[best_epoch]))
    '''                   
示例#4
0
def main():
    train_data, dev_data, test_data, TEXT = parser.parse_input_files(
        BATCH_SIZE, EMBEDDING_DIM, using_GPU=False)

    print()

    pos, neg, null = train(train_data)
    raw_nums = [neg, null, pos]
    print(raw_nums)
    pos = pos / sum(raw_nums)
    neg = neg / sum(raw_nums)
    null = null / sum(raw_nums)
    ratios = [neg, null, pos]
    print(ratios)
    train_accs, train_f1, train_p, train_r = evaluate(train_data,
                                                      ratios,
                                                      is_train=True)
    dev_accs, dev_f1, dev_p, dev_r = evaluate(dev_data, ratios)
    test_accs, test_f1, test_p, test_r = evaluate(test_data, ratios)

    print()
    print("Train results")
    print("    f1s = " + str(train_f1) + " " +
          str(sum(train_f1) / len(train_f1)))
    print("    pre = " + str(train_p))
    print("    rec = " + str(train_r))
    print("    acc = " + str(train_accs))
    print("Dev results")
    print("    f1s = " + str(dev_f1) + " " + str(sum(dev_f1) / len(dev_f1)))
    print("    pre = " + str(dev_p))
    print("    rec = " + str(dev_r))
    print("    acc = " + str(dev_accs))
    print("Test results")
    print("    f1s = " + str(test_f1) + " " + str(sum(test_f1) / len(test_f1)))
    print("    pre = " + str(test_p))
    print("    rec = " + str(test_r))
    print("    acc = " + str(test_accs))

    train_ = [0.8059490084985835, 0.47965738758029974, 0.781630740393627]
    dev_ = [0.5325443786982249, 0.21212121212121213, 0.5454545454545455]
    test_ = [0.6818181818181819, 0.21428571428571427, 0.5692307692307693]

    print()
    print(sum(train_) / len(train_))
    print(sum(dev_) / len(dev_))
    print(sum(test_) / len(test_))
示例#5
0
def main():
    Xtrain, Xdev, ACLtest, TEXT, DOCID, POLARITY = parser.parse_input_files(BATCH_SIZE, EMBEDDING_DIM, using_GPU,
                                                                          filepath=datasets[set_name]["filepath"],
                                                                          train_name=datasets[set_name]["filenames"][0],
                                                                          dev_name=datasets[set_name]["filenames"][1],
                                                                          test_name=datasets[set_name]["filenames"][2],
                                                                          has_holdtarg=True)
    _, _, MPQAtest, TEXT1, _, _ = parser.parse_input_files(BATCH_SIZE, EMBEDDING_DIM, using_GPU,
                                                           filepath=datasets[set_name]["filepath"],
                                                           train_name=datasets[set_name]["filenames"][0],
                                                           dev_name=datasets[set_name]["filenames"][1],
                                                           test_name=datasets[set_name]["filenames"][3],
                                                           has_holdtarg=True)

    assert len(TEXT.vocab.stoi) == len(TEXT1.vocab.stoi)
    assert TEXT.vocab.vectors.equal(TEXT1.vocab.vectors)

    word_to_ix = TEXT.vocab.stoi
    ix_to_word = TEXT.vocab.itos
    ix_to_docid = DOCID.vocab.itos

    VOCAB_SIZE = len(word_to_ix)

    word_embeds = TEXT.vocab.vectors

    mode = None

    if MODEL == Model:  # if baseline...
        model = MODEL(NUM_LABELS, VOCAB_SIZE,
                      EMBEDDING_DIM, HIDDEN_DIM, word_embeds,
                      NUM_POLARITIES, BATCH_SIZE, DROPOUT_RATE)
    else:
        model = MODEL(NUM_LABELS, VOCAB_SIZE,
                      EMBEDDING_DIM, HIDDEN_DIM, word_embeds,
                      NUM_POLARITIES, BATCH_SIZE, DROPOUT_RATE,
                      max_co_occurs=MAX_CO_OCCURS,
                      ablations=ABLATIONS)



    print("num params = ")
    print(len(model.state_dict()))
    model = model.eval()

    for epoch in epochs:
        model.load_state_dict(torch.load(save_name(epoch)))

        # Move the model to the GPU if available
        if using_GPU:
            model = model.cuda()

        print("evaluating epoch " + str(epoch) + "...")
        train_score = dev_score = ACL_test_score = MPQA_test_score = None
        if MODEL == Model:
            train_score, train_acc, _ = evaluate(model, word_to_ix, ix_to_word, ix_to_docid, Xtrain, using_GPU)
            dev_score, dev_acc, _ = evaluate(model, word_to_ix, ix_to_word, ix_to_docid, Xdev, using_GPU)
            ACL_test_score, ACL_test_acc, _ = evaluate(model, word_to_ix, ix_to_word, ix_to_docid, ACLtest, using_GPU)
            MPQA_test_score, MPQA_test_acc, _ = evaluate(model, word_to_ix, ix_to_word, ix_to_docid, MPQAtest, using_GPU)
        else:
            train_score, train_acc = evaluate1(model, word_to_ix, ix_to_word, Xtrain, using_GPU)
            dev_score, dev_acc = evaluate1(model, word_to_ix, ix_to_word, Xdev, using_GPU)
            ACL_test_score, ACL_test_acc = evaluate1(model, word_to_ix, ix_to_word, ACLtest, using_GPU)
            MPQA_test_score, MPQA_test_acc = evaluate1(model, word_to_ix, ix_to_word, MPQAtest, using_GPU)

        print("    train f1 scores = " + str(train_score))
        print("    dev f1 scores = " + str(dev_score))
        print("    ACL test f1 scores = " + str(ACL_test_score))
        print("    MPQA test f1 scores = " + str(MPQA_test_score))
def main():
    _, dev_data, _, TEXT, DOCID, _ = parser.parse_input_files(
        BATCH_SIZE,
        EMBEDDING_DIM,
        using_GPU,
        filepath=datasets[set_name]["filepath"],
        train_name=datasets[set_name]["filenames"][0],
        dev_name=datasets[set_name]["filenames"][1],
        test_name=datasets[set_name]["filenames"][2],
        has_holdtarg=True,
        dev_batch_size=1)

    word_to_ix = TEXT.vocab.stoi
    ix_to_word = TEXT.vocab.itos

    print(len(ix_to_word))
    assert ix_to_word == TEXT.vocab.itos
    print("wow!")

    VOCAB_SIZE = len(word_to_ix)

    word_embeds = TEXT.vocab.vectors
    ix_to_docid = DOCID.vocab.itos
    '''
    model = Model(NUM_LABELS, VOCAB_SIZE,
                  EMBEDDING_DIM, HIDDEN_DIM, word_embeds,
                  NUM_POLARITIES, BATCH_SIZE, DROPOUT_RATE)
    model.load_state_dict(torch.load("./model_states/baseline_" + set_name + "_" + str(epochs) + ".pt"))
    '''
    model = Model1(NUM_LABELS,
                   VOCAB_SIZE,
                   EMBEDDING_DIM,
                   HIDDEN_DIM,
                   word_embeds,
                   NUM_POLARITIES,
                   BATCH_SIZE,
                   DROPOUT_RATE,
                   max_co_occurs=MAX_CO_OCCURS)
    model.load_state_dict(
        torch.load("./model_states/final/" + set_name +
                   "/span_attentive/adv_" + str(epochs) + ".pt"))
    # '''

    print("num params = ")
    print(len(model.state_dict()))
    model.eval()

    # Move the model to the GPU if available
    if using_GPU:
        model = model.cuda()

    print("num batches = " + str(len(dev_data)))
    counter = 0
    preds = []
    probs = []
    acts = []
    texts = []
    right_texts = []
    for batch in dev_data:
        counter += 1
        (words,
         lengths), polarity, label = batch.text, batch.polarity, batch.label
        holder_targets = batch.holder_target
        (holders, holder_lengths) = batch.holder_index
        (targets, target_lengths) = batch.target_index
        co_occur_feature = batch.co_occurrences
        holder_rank, target_rank = batch.holder_rank, batch.target_rank
        sent_classify = batch.sent_classify

        docid = batch.docid
        # Step 1. Remember that Pytorch accumulates gradients.
        # We need to clear them out before each instance
        model.zero_grad()
        model.batch_size = len(label.data)  # set batch size
        # Step 3. Run our forward pass.
        '''
        log_probs, _ = model(words, polarity, holder_targets, lengths)
        '''
        log_probs = model(
            words,
            polarity,
            None,
            lengths,
            holders,
            targets,
            holder_lengths,
            target_lengths,
            co_occur_feature=co_occur_feature,
            holder_rank=holder_rank,
            target_rank=target_rank,
            sent_classify=sent_classify)  # log probs: batch_size x 3
        # '''
        pred_label = log_probs.data.max(1)[
            1]  # torch.ones(len(log_probs), dtype=torch.long)
        '''
        pred_label = torch.ones(len(log_probs), dtype=torch.long)
        if using_GPU:
            pred_label = pred_label.cuda()
        pred_label[log_probs[:, 2] + 0.02 > log_probs[:, 0]] = 2  # classify more as positive
        pred_label[log_probs[:, 0] > log_probs[:, 2] + 0.02] = 0
        pred_label[log_probs[:, 1] > threshold[1]] = 1  # predict is 1 if even just > 10% certainty
        '''
        if int(pred_label) != -1:
            prob = torch.exp(log_probs)
            probs.append(prob[0].data.cpu().numpy().tolist())
            preds.append(int(pred_label))
            acts.append(int(label))
            entry = {
                "docid": DOCID.vocab.itos[int(docid)],
                "holders": holders[0].data.cpu().numpy().tolist(),
                "targets": targets[0].data.cpu().numpy().tolist(),
                "probabilities": probs[len(probs) - 1],
                "prediction": preds[len(preds) - 1],
                "actual": acts[len(acts) - 1]
            }
            if int(pred_label) != int(label):
                texts.append(entry)
            else:
                right_texts.append(entry)
        if counter % 100 == 0:
            print(counter)
            print(len(texts))

    with open("./error_analysis/" + set_name + "/wrong_docs_dev_adv.json",
              "w") as wf:
        for line in texts:
            json.dump(line, wf)
            wf.write("\n")
    with open("./error_analysis/" + set_name + "/right_docs_dev_adv.json",
              "w") as wf:
        for line in right_texts:
            json.dump(line, wf)
            wf.write("\n")
示例#7
0
def main():
    print(save_name("<epoch>"))

    Xtrain, Xdev, ACLtest, TEXT, DOCID, POLARITY = parser.parse_input_files(
        BATCH_SIZE,
        EMBEDDING_DIM,
        using_GPU,
        filepath=datasets[set_name]["filepath"],
        train_name=datasets[set_name]["filenames"][0],
        dev_name=datasets[set_name]["filenames"][1],
        test_name=datasets[set_name]["filenames"][2],
        has_holdtarg=False)
    MPQAtest = None
    if len(datasets[set_name]["filenames"]) == 4:
        _, _, MPQAtest, TEXT1, _, _ = parser.parse_input_files(
            BATCH_SIZE,
            EMBEDDING_DIM,
            using_GPU,
            filepath=datasets[set_name]["filepath"],
            train_name=datasets[set_name]["filenames"][0],
            dev_name=datasets[set_name]["filenames"][1],
            test_name=datasets[set_name]["filenames"][3],
            has_holdtarg=False)

    word_to_ix = TEXT.vocab.stoi
    ix_to_word = TEXT.vocab.itos

    VOCAB_SIZE = len(word_to_ix)

    word_embeds = TEXT.vocab.vectors

    model = MODEL(NUM_LABELS,
                  VOCAB_SIZE,
                  EMBEDDING_DIM,
                  HIDDEN_DIM,
                  word_embeds,
                  NUM_POLARITIES,
                  BATCH_SIZE,
                  DROPOUT_RATE,
                  max_co_occurs=MAX_CO_OCCURS,
                  ablations=ABLATIONS)

    print("num params = ")
    print(len(model.state_dict()))

    if epochs[0] != 0:
        model.load_state_dict(torch.load(save_name(epochs[0])))

    # Move the model to the GPU if available
    if using_GPU:
        model = model.cuda()

    train_c, dev_c, test_c, train_a, dev_a, test_a, losses, best_epoch = \
        train(Xtrain, Xdev, ACLtest,
              model,
              word_to_ix, ix_to_word,
              using_GPU, Xtest2=MPQAtest)
    print("Train results: ")
    print("    " + str(train_c))
    print("    " + str(train_a))
    print("Dev results: ")
    print("    " + str(dev_c))
    print("    " + str(dev_a))
    print("Test results: ")
    print("    " + str(test_c))
    print("    " + str(test_a))
    print("Losses: ")
    print(losses)

    print("Best epoch = " + str(best_epoch))
    best_epoch -= epochs[0]
    print("Train results: ")
    print("    " + str(train_c[best_epoch]) + " " +
          str(sum(train_c[best_epoch]) / len(train_c[best_epoch])))
    print("    " + str(train_a[best_epoch]))
    print("Dev results: ")
    print("    " + str(dev_c[best_epoch]) + " " +
          str(sum(dev_c[best_epoch]) / len(dev_c[best_epoch])))
    print("    " + str(dev_a[best_epoch]))
    print("Test results: ")
    print("    " + str(test_c[best_epoch]) + " " +
          str(sum(test_c[best_epoch]) / len(test_c[best_epoch])))
    print("    " + str(test_a[best_epoch]))

    return model, TEXT, POLARITY