Ejemplo n.º 1
0
Train model on Natural Language Inference task
"""
epoch = 1

if params.load_model:
    encoder_net.load_state_dict(
        torch.load('../encoder_' + params.load_id + 'saved.pt'))

if params.load_classifier:
    nli_net.load_state_dict(
        torch.load('../classifier_' + params.load_id + 'saved.pt'))

while (not stop_training
       or epoch <= params.min_epoch) and epoch <= params.n_epochs:
    train_acc = trainepoch(epoch)

    eval_acc, adv_models, bias_models, bias_loss_functions, bias_list, bias_optimizers = evaluate(
        epoch, adv_models, bias_models, bias_loss_functions, bias_list,
        bias_optimizers, optim_params, 'valid')
    epoch += 1

print("Evaluating on test-set")

evaluate(0, adv_models, bias_models, bias_loss_functions, bias_list,
         bias_optimizers, optim_params, 'test', True)

torch.save(encoder_net.state_dict(),
           '../encoder_' + params.save_id + 'saved.pt')
torch.save(nli_net.state_dict(),
           '../classifier_' + params.save_id + 'saved.pt')
Ejemplo n.º 2
0
}

# model
encoder_types = ['BLSTMEncoder', 'BLSTMprojEncoder', 'BGRUlastEncoder',
                 'InnerAttentionMILAEncoder', 'InnerAttentionYANGEncoder',
                 'InnerAttentionNAACLEncoder', 'ConvNetEncoder', 'LSTMEncoder']
assert params.encoder_type in encoder_types, "encoder_type must be in " + \
                                             str(encoder_types)
nli_net = NLINet(config_nli_model)

if params.pre_trained_model:
  print "Pre_trained_model: " + params.pre_trained_model
  pre_trained_model = torch.load(params.pre_trained_model)
  
  nli_net_params = nli_net.state_dict()
  pre_trained_params = pre_trained_model.state_dict()
  assert nli_net_params.keys() == pre_trained_params.keys(), "load model has different parameter state names that NLI_HYPOTHS_NET"
  for key, parameters in nli_net_params.items():
    if parameters.size() == pre_trained_params[key].size():
      nli_net_params[key] = pre_trained_params[key]
  nli_net.load_state_dict(nli_net_params)

print(nli_net)

# loss
weight = torch.FloatTensor(params.n_classes).fill_(1)
loss_fn = nn.CrossEntropyLoss(weight=weight)
loss_fn.size_average = False

# optimizer