Exemple #1
0
# show the arg:
arg_str = "args:\n"
for w in sorted(args.__dict__.keys()):
    if (w is not "U") and (w is not "V") and (w is not "Freq"):
        arg_str += "{}:\t{}\n".format(w, args.__dict__[w])
logger.info(arg_str)

# ----------------------------------------------------------------------------------------------------------------- #
#
# Starting Meta-Learning for Low-Resource Neural Machine Transaltion
#
# ----------------------------------------------------------------------------------------------------------------- #

# optimizer
meta_opt = torch.optim.Adam([
    p for p in model.get_parameters(
        type='meta' if not args.no_meta_training else 'full')
    if p.requires_grad
],
                            betas=(0.9, 0.98),
                            eps=1e-9)
if args.meta_approx_2nd:
    sgd_opt = torch.optim.SGD(
        [p for p in model.get_parameters(type='meta0') if p.requires_grad],
        lr=args.approx_lr)

# if resume training
if (args.load_from is not None) and (args.resume):
    with torch.cuda.device(args.gpu):  # very important.
        offset, opt_states = torch.load(
            args.models_dir + '/' + args.load_from + '.pt.states',
            map_location=lambda storage, loc: storage.cuda())
Exemple #2
0
                data_name, export(loss_inner), self_opt.param_groups[0]['lr'],
                bs_inner)

            if use_prog_bar:
                progressbar.update(1)
                progressbar.set_description(info)

        if use_prog_bar:
            progressbar.close()
        return model.save_fast_weights()

    # ----- meta-validation ----- #
    dev_iters = iters
    weights = model.save_fast_weights()
    self_opt = torch.optim.Adam([
        p for p in model.get_parameters(type=args.finetune_params)
        if p.requires_grad
    ],
                                betas=(0.9, 0.98),
                                eps=1e-9)
    corpus_bleu = -1

    # training start..
    best = Best(max,
                'corpus_bleu',
                'i',
                model=model,
                opt=self_opt,
                path=args.model_name,
                gpu=args.gpu)
    dev_metrics = Metrics('dev', 'loss', 'gleu')
Exemple #3
0
        if use_prog_bar:
            progressbar.update(1)
            progressbar.set_description(info)

    if use_prog_bar:
        progressbar.close()
    return model.save_fast_weights()



# ----- meta-validation ----- #
dev_iters = iters
weights = model.save_fast_weights()

fast_weights = weights
self_opt = torch.optim.Adam([p for p in model.get_parameters(type='fast') if p.requires_grad], betas=(0.9, 0.98), eps=1e-9)
corpus_bleu = -1

# training start..
best = Best(max, 'corpus_bleu', 'i', model=model, opt=self_opt, path=args.model_name, gpu=args.gpu)
dev_metrics = Metrics('dev', 'loss', 'gleu')

outputs_data = valid_model(args, model, dev_real, dev_metrics, print_out=True)
corpus_bleu0 = outputs_data['corpus_bleu']

if args.tensorboard and (not args.debug):
    writer.add_scalar('dev/BLEU_corpus_', outputs_data['corpus_bleu'], dev_iters)

for j in range(args.valid_epochs):
    args.logger.info("Fine-tuning epoch: {}".format(j))
    dev_metrics.reset()
Exemple #4
0
            bs_inner)

        if use_prog_bar:
            progressbar.update(1)
            progressbar.set_description(info)

    if use_prog_bar:
        progressbar.close()
    return model.save_fast_weights()


# ----- meta-validation ----- #
dev_iters = iters
weights = model.save_fast_weights()
self_opt = torch.optim.Adam(
    [p for p in model.get_parameters(type='fast') if p.requires_grad],
    betas=(0.9, 0.98),
    eps=1e-9)
corpus_bleu = -1

# training start..
best = Best(max,
            'corpus_bleu',
            'i',
            model=model,
            opt=self_opt,
            path=args.model_name,
            gpu=args.gpu)
dev_metrics = Metrics('dev', 'loss', 'gleu')

outputs_data = valid_model(args, model, dev_real, dev_metrics, print_out=True)
Exemple #5
0
# show the arg:
arg_str = "args:\n"
for w in sorted(args.__dict__.keys()):
    if (w is not "U") and (w is not "V") and (w is not "Freq"):
        arg_str += "{}:\t{}\n".format(w, args.__dict__[w])
logger.info(arg_str)

# ----------------------------------------------------------------------------------------------------------------- #
#
# Starting Meta-Learning for Low-Resource Neural Machine Transaltion
#
# ----------------------------------------------------------------------------------------------------------------- #

# optimizer
meta_opt = torch.optim.Adam(
    [p for p in model.get_parameters(type='meta') if p.requires_grad],
    betas=(0.9, 0.98),
    eps=1e-9)

# if resume training
if (args.load_from is not None) and (args.resume):
    with torch.cuda.device(args.gpu):  # very important.
        offset, opt_states = torch.load(
            args.models_dir + '/' + args.load_from + '.pt.states',
            map_location=lambda storage, loc: storage.cuda())
        meta_opt.load_state_dict(opt_states)
else:
    offset = 0

# ---- updates ------ #
iters = offset