Пример #1
0
 def __init__(self):
     hparams = load_hparams('/tmp/nmt_model')
     ckpt = tf.train.latest_checkpoint('/tmp/nmt_model')
     self.model = create_infer_model(Model, hparams)
     self.sess = tf.Session(graph=self.model.graph,
                            config=get_config_proto())
     with self.model.graph.as_default():
         self.loaded_infer_model = load_model(self.model.model, ckpt,
                                              self.sess, "infer")
Пример #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-config", type=str)
    parser.add_argument("-nmt_dir", type=str)
    parser.add_argument("-model_type", type=str)
    parser.add_argument('-gpuid', default=[0], nargs='+', type=int)
    parser.add_argument("-valid_file", type=str)
    parser.add_argument("-train_file", type=str)
    parser.add_argument("-train_score", type=str, default=None)
    parser.add_argument("-src_vocab", type=str)
    parser.add_argument("-tgt_vocab", type=str)
    parser.add_argument("-rg_model", type=str, default=None)
    parser.add_argument("-tg_model", type=str, default=None)
    parser.add_argument("-critic_model", type=str, default=None)

    args = parser.parse_args()
    opt = utils.load_hparams(args.config)
    cuda.set_device(args.gpuid[0])
    if opt.random_seed > 0:
        random.seed(opt.random_seed)
        torch.manual_seed(opt.random_seed)
        np.random.seed(opt.random_seed)

    fields = dict()
    vocab_src = Vocab(args.src_vocab, noST=True)
    vocab_tgt = Vocab(args.tgt_vocab)

    fields['src'] = vocab_wrapper(vocab_src)
    fields['tgt'] = vocab_wrapper(vocab_tgt)

    mask_end = True
    train = Data_Loader(args.train_file,
                        opt.train_batch_size,
                        score=args.train_score,
                        mask_end=mask_end)
    valid = Data_Loader(args.valid_file,
                        opt.train_batch_size,
                        mask_end=mask_end)

    # Build model.
    model, critic, start_epoch_at = build_or_load_model(args, opt, fields)

    check_save_model_path(args, opt)

    # Build optimizer.
    optimR, lr_schedulerR, optimT, lr_schedulerT, optimC, lr_schedulerC = build_optims_and_schedulers(
        model, critic, opt)

    if use_cuda:
        model = model.cuda()
        if opt.use_critic:
            critic = critic.cuda()

    # Do training.
    train_model(opt, model, critic, train, valid, fields, optimR,
                lr_schedulerR, optimT, lr_schedulerT, optimC, lr_schedulerC,
                start_epoch_at)
Пример #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-config", type=str)
    parser.add_argument("-nmt_dir", type=str)
    parser.add_argument("-model_type", type=str)
    parser.add_argument('-gpuid', default=[0], nargs='+', type=int)
    parser.add_argument("-valid_file", type=str)
    parser.add_argument("-train_file", type=str)
    parser.add_argument("-train_score", type=str, default=None)
    parser.add_argument("-src_vocab", type=str)
    parser.add_argument("-tgt_vocab", type=str)
    parser.add_argument("-start_point", type=str, default=None)

    args = parser.parse_args()
    opt = utils.load_hparams(args.config)

    if opt.random_seed > 0:
        random.seed(opt.random_seed)
        torch.manual_seed(opt.random_seed)

    fields = dict()
    vocab_src = Vocab(args.src_vocab, noST=True)
    vocab_tgt = Vocab(args.tgt_vocab)
    fields['src'] = vocab_wrapper(vocab_src)
    fields['tgt'] = vocab_wrapper(vocab_tgt)

    train = Data_Loader(args.train_file,
                        opt.train_batch_size,
                        score=args.train_score,
                        mask_end=(args.model_type == "ev"))
    valid = Data_Loader(args.valid_file,
                        opt.train_batch_size,
                        mask_end=(args.model_type == "ev"))

    # Build model.

    model, start_epoch_at = build_or_load_model(args, opt, fields)
    check_save_model_path(args, opt)

    optimG, schedulerG, optimD, schedulerD = build_optims_and_lr_schedulers(
        model, opt)

    if use_cuda:
        model = model.cuda()

    # Do training.
    #pretrain_discriminators(opt, model, train, valid, fields, optimD, schedulerD, start_epoch_at)
    train_model(opt, model, train, valid, fields, optimG, schedulerG, optimD,
                schedulerD, start_epoch_at)
    print("DONE")
    x = 0
    while True:
        x = (x + 1) % 5
Пример #4
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument("-test_data", type=str)
    parser.add_argument("-test_out", type=str)
    parser.add_argument("-config", type=str)
    parser.add_argument("-model", type=str)
    parser.add_argument("-vocab", type=str)
    parser.add_argument("-dump_beam", default="", type=str)
    parser.add_argument('-gpuid', default=[], nargs='+', type=int)
    parser.add_argument("-beam_size", type=int)
    parser.add_argument("-decode_max_length", type=int)

    args = parser.parse_args()
    opt = utils.load_hparams(args.config)

    use_cuda = False
    device = None
    if args.gpuid:
        cuda.set_device(args.gpuid[0])
        device = torch.device('cuda', args.gpuid[0])
        use_cuda = True
    fields = nmt.IO.load_fields(torch.load(args.vocab))
    test_dataset = nmt.IO.InferDataset(data_path=args.test_data,
                                       fields=[('src', fields["src"])])

    test_data_iter = nmt.IO.OrderedIterator(dataset=test_dataset,
                                            device=device,
                                            batch_size=1,
                                            train=False,
                                            sort=False,
                                            sort_within_batch=True,
                                            shuffle=False)

    model = nmt.model_helper.create_base_model(opt, fields)

    print('Loading parameters ...')

    model.load_checkpoint(args.model)
    if use_cuda:
        model = model.cuda()

    translator = nmt.Translator(model=model,
                                fields=fields,
                                beam_size=args.beam_size,
                                n_best=1,
                                max_length=args.decode_max_length,
                                global_scorer=None,
                                cuda=use_cuda,
                                beam_trace=True if args.dump_beam else False)

    translate_file(translator, test_data_iter, args.test_out, fields, use_cuda,
                   args.dump_beam)
Пример #5
0
def create_or_load_hparams(out_dir,
                           default_hparams,
                           hparams_path,
                           save_hparams=True):
    """Create hparams or load hparams from out_dir."""
    hparams = utils.load_hparams(out_dir)
    if not hparams:
        hparams = default_hparams
        hparams = utils.maybe_parse_standard_hparams(hparams, hparams_path)
        hparams = extend_hparams(hparams)
    else:
        hparams = ensure_compatible_hparams(hparams, default_hparams,
                                            hparams_path)

    # Save HParams
    if save_hparams:
        utils.save_hparams(out_dir, hparams)
        for metric in hparams.metrics:
            utils.save_hparams(getattr(hparams, "best_" + metric + "_dir"),
                               hparams)

    # Print HParams
    utils.print_hparams(hparams)
    return hparams
Пример #6
0
import nmt.IO
import argparse
import nmt.utils.misc_utils as utils
import torch
import json
parser = argparse.ArgumentParser()
parser.add_argument('-train_src', type=str)
parser.add_argument('-train_tgt', type=str)

parser.add_argument('-save_data', type=str)
parser.add_argument('-config', type=str)
args = parser.parse_args()

opt = utils.load_hparams(args.config)

if opt.random_seed > 0:
    torch.manual_seed(opt.random_seed)

fields = nmt.IO.get_fields()
print("Building Training...")
train = nmt.IO.NMTDataset(
    src_path=args.train_src,
    tgt_path=args.train_tgt,   
    fields=[('src', fields["src"]),
            ('tgt', fields["tgt"])])    
print("Building Vocab...")   
nmt.IO.build_vocab(train, opt)

print("Saving fields")
torch.save(nmt.IO.save_vocab(fields),open(args.save_data+'.vocab.pkl', 'wb'))
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-config", type=str)
    parser.add_argument("-nmt_dir", type=str)
    parser.add_argument('-gpuid', default=[0], nargs='+', type=int)
    parser.add_argument("-valid_file", type=str)
    parser.add_argument("-train_file", type=str)
    parser.add_argument("-test_file", type=str)
    parser.add_argument("-model", type=str)
    parser.add_argument("-src_vocab", type=str)
    parser.add_argument("-tgt_vocab", type=str)
    parser.add_argument("-mode", type=str)
    parser.add_argument("-out_file", type=str)
    parser.add_argument("-stop_words", type=str, default=None)
    parser.add_argument("-for_train", type=bool, default=True)
    args = parser.parse_args()
    opt = utils.load_hparams(args.config)

    if opt.random_seed > 0:
        random.seed(opt.random_seed)
        torch.manual_seed(opt.random_seed)

    fields = dict()
    vocab_src = Vocab(args.src_vocab, noST=True)
    vocab_tgt = Vocab(args.tgt_vocab)
    fields['src'] = vocab_wrapper(vocab_src)
    fields['tgt'] = vocab_wrapper(vocab_tgt)

    if args.mode == "test":
        model = nmt.model_helper.create_template_generator(opt, fields)
        if use_cuda:
            model = model.cuda()
        model.load_checkpoint(args.model)
        model.eval()
        test = Data_Loader(args.test_file,
                           opt.train_batch_size,
                           train=False,
                           mask_end=True,
                           stop_words=args.stop_words)
        fo = open(args.out_file, 'w')
        loss, acc, ntokens = 0., 0., 0.
        reserved, targeted, received = 0., 0., 0.
        for batch in test:
            I_word, I_word_length = batch.I
            D_word, D_word_length = batch.D
            target, _ = batch.mask
            target = target.float()
            ref_tgt_inputs, ref_tgt_lengths = batch.ref_tgt
            preds = model(I_word, I_word_length, D_word, D_word_length,
                          ref_tgt_inputs, ref_tgt_lengths)
            preds = preds.squeeze(2)
            mask = sequence_mask(ref_tgt_lengths).transpose(0, 1).float()
            loss += F.binary_cross_entropy(preds,
                                           target,
                                           mask,
                                           size_average=False).data[0]
            ans = torch.ge(preds, 0.5).float()
            output_results(ans, batch, fo, vocab_tgt, args.for_train)
            acc += (torch.eq(ans, target).float().data * mask).sum()
            received += (ans.data * target.data * mask).sum()
            reserved += (ans.data * mask).sum()
            targeted += (target.data * mask).sum()
            ntokens += mask.sum()
        print("test_loss: ", loss / ntokens, "test_acc: ", acc / ntokens,
              "precision:", received / reserved, "recall: ",
              received / targeted, "leave percentage", targeted / ntokens)
        fo.close()
        #x = 1
        #while True:
        #    x = (x+1)%5
        return

    train = Data_Loader(args.train_file,
                        opt.train_batch_size,
                        mask_end=True,
                        stop_words=args.stop_words)
    valid = Data_Loader(args.valid_file,
                        opt.train_batch_size,
                        mask_end=True,
                        stop_words=args.stop_words)

    # Build model.

    model, start_epoch_at = build_or_load_model(args, opt, fields)
    check_save_model_path(args, opt)

    # Build optimizer.
    optim = build_optim(model, opt)
    lr_scheduler = build_lr_scheduler(optim.optimizer, opt)

    if use_cuda:
        model = model.cuda()

    # Do training.

    train_model(opt, model, train, valid, fields, optim, lr_scheduler,
                start_epoch_at)
    print("DONE")