Exemple #1
0
        attention_result = {}
        attention_result['labels'] = labels
        attention_result['input_toks'] = results['input_toks'][i]
        attention_result['bias_word'] = results['input_toks'][i][idx_biased]
        attention_result['attention_dist'] = results['attention_dist'][i][
            idx_biased]
        attention_result['tok_logits'] = batch_logits[i]
        attention_result['epoch_num'] = epoch
        attention_results.append(attention_result)


print('TRAINING...')
model.train()
for epoch in range(ARGS.epochs):
    print('STARTING EPOCH ', epoch)
    losses = tagging_utils.train_for_epoch(model, train_dataloader, loss_fn,
                                           optimizer)
    writer.add_scalar('train/loss', np.mean(losses), epoch + 1)

    # eval
    print('EVAL...')
    model.eval()
    results = tagging_utils.run_inference(model, eval_dataloader, loss_fn,
                                          tokenizer)
    print(np.mean(results['labeling_hits']))
    print(np.mean(results['tok_loss']))
    writer.add_scalar('eval/tok_loss', np.mean(results['tok_loss']), epoch + 1)
    writer.add_scalar('eval/tok_acc', np.mean(results['labeling_hits']),
                      epoch + 1)

    model.train()
Exemple #2
0
print('INITIAL EVAL...')
joint_model.eval()
hits, preds, golds, srcs = joint_utils.run_eval(
    joint_model, eval_dataloader, tok2id,
    ARGS.working_dir + '/results_initial.txt', ARGS.max_seq_len,
    ARGS.beam_width)
writer.add_scalar('eval/bleu', seq2seq_utils.get_bleu(preds, golds), 0)
writer.add_scalar('eval/true_hits', np.mean(hits), 0)

for epoch in range(ARGS.epochs):
    print('EPOCH ', epoch)
    print('TRAIN...')
    losses = joint_utils.train_for_epoch(joint_model,
                                         train_dataloader,
                                         joint_optimizer,
                                         debias_loss_fn,
                                         tagging_loss_fn,
                                         ignore_tagger=False,
                                         coverage=ARGS.coverage)
    writer.add_scalar('train/loss', np.mean(losses), epoch + 1)

    print('SAVING...')
    joint_model.save(ARGS.working_dir + '/model_%d.ckpt' % (epoch + 1))

    print('EVAL...')
    joint_model.eval()
    hits, preds, golds, srcs = joint_utils.run_eval(
        joint_model, eval_dataloader, tok2id,
        ARGS.working_dir + '/results_%d.txt' % (epoch + 1), ARGS.max_seq_len,
        ARGS.beam_width)
    writer.add_scalar('eval/bleu', seq2seq_utils.get_bleu(preds, golds),
Exemple #3
0
optimizer = utils.build_optimizer(model, num_train_steps)

loss_fn, cross_entropy_loss = utils.build_loss_fn(vocab_size=len(tok2id))

writer = SummaryWriter(ARGS.working_dir)

# # # # # # # # # # # PRETRAINING (optional) # # # # # # # # # # # # # # # #
if ARGS.pretrain_data:
    print('PRETRAINING...')
    for epoch in range(ARGS.pretrain_epochs):
        model.train()
        losses = utils.train_for_epoch(
            model,
            pretrain_dataloader,
            tok2id,
            optimizer,
            cross_entropy_loss,
            ignore_enrich=not ARGS.use_pretrain_enrich)
        writer.add_scalar('pretrain/loss', np.mean(losses), epoch)

    print('SAVING DEBIASER...')
    torch.save(model.state_dict(), ARGS.working_dir + '/debiaser.ckpt')

# # # # # # # # # # # # TRAINING # # # # # # # # # # # # # #

for epoch in range(ARGS.epochs):
    print('EPOCH ', epoch)
    print('TRAIN...')
    model.train()
    losses = utils.train_for_epoch(model,