Exemplo n.º 1
0
def load_test_model(opt, model_path=None):
    if model_path is None:
        model_path = opt.models[0]
    checkpoint = torch.load(model_path,
                            map_location=lambda storage, loc: storage)

    model_opt = ArgumentParser.ckpt_model_opts(checkpoint['opt'])
    ArgumentParser.update_model_opts(model_opt)
    ArgumentParser.validate_model_opts(model_opt)
    vocab = checkpoint['vocab']
    if inputters.old_style_vocab(vocab):
        fields = inputters.load_old_vocab(vocab,
                                          opt.data_type,
                                          dynamic_dict=model_opt.copy_attn)
    else:
        fields = vocab

    model = build_base_model(model_opt, fields, use_gpu(opt), checkpoint,
                             opt.gpu)
    if opt.fp32:
        model.float()
    model.eval()
    model.generator.eval()
    # TODO(yida)
    if model_opt.pos_gen:
        model.pos_generator.eval()
    return fields, model, model_opt
Exemplo n.º 2
0
def load_test_multitask_model(opt, model_path=None):
    if model_path is None:
        model_path = opt.models[0]
    checkpoint = torch.load(model_path,
                            map_location=lambda storage, loc: storage)
    model = checkpoint['whole_model']
    vocab = checkpoint['vocab']
    src_tgtpair = opt.src_lang + '-' + opt.tgt_lang
    vocab = vocab if type(vocab) is dict else vocab[src_tgtpair]
    if inputters.old_style_vocab(vocab):
        fields = inputters.load_old_vocab(vocab,
                                          opt.data_type,
                                          dynamic_dict=model_opt.copy_attn)
    else:
        fields = vocab

    if opt.data_type == 'audio' and not (isinstance(
            checkpoint.get('vocab')[src_tgtpair]['src'], AudioSeqField)):
        vocab_path = "/home/vazquezj/Documents/iwslt2019/_ready_to_train/onmt_ready/ENaudio_DEtext/data"
        fields = torch.load(vocab_path + '.vocab.pt')

    model_opt = ArgumentParser.ckpt_model_opts(checkpoint['opt'])
    device = torch.device("cuda" if use_gpu(opt) else "cpu")
    model.to(device)

    model.eval()

    return fields, model, model_opt
Exemplo n.º 3
0
def load_test_model(opt, model_path=None):
    if model_path is None:
        model_path = opt.models[0]
    checkpoint = torch.load(model_path,
                            map_location=lambda storage, loc: storage)

    model_opt = ArgumentParser.ckpt_model_opts(checkpoint['opt'])
    ArgumentParser.update_model_opts(model_opt)
    ArgumentParser.validate_model_opts(model_opt)
    vocab = checkpoint['vocab']
    if inputters.old_style_vocab(vocab):
        fields = inputters.load_old_vocab(
            vocab, opt.data_type, dynamic_dict=model_opt.copy_attn
        )
    else:
        fields = vocab
    # @memray, to make tgt_field be aware of format of targets (multiple phrases)
    if opt.data_type == "keyphrase":
        fields["tgt"].type = opt.tgt_type

    model = build_base_model(model_opt, fields, use_gpu(opt), checkpoint,
                             opt.gpu)
    if opt.fp32:
        model.float()
    model.eval()
    model.generator.eval()
    return fields, model, model_opt
Exemplo n.º 4
0
def load_test_model(opt, dummy_opt, model_path=None):
    if model_path is None:
        model_path = opt.models[0]
    checkpoint = torch.load(model_path,
                            map_location=lambda storage, loc: storage)

    vocab = checkpoint['vocab']
    if inputters.old_style_vocab(vocab):
        fields = inputters.load_fields_from_vocab(vocab, opt.data_type)
    else:
        fields = vocab

    model_opt = checkpoint['opt']

    for arg in dummy_opt:
        if arg not in model_opt:
            model_opt.__dict__[arg] = dummy_opt[arg]
    # changed to my_build_base_model by wchen
    if 'hr' in model_opt.encoder_type or 'hr' in model_opt.decoder_type or 'CatSeqD' in model_opt.decoder_type:
        model = my_build_base_model(model_opt, fields, use_gpu(opt),
                                    checkpoint)
    else:
        model = build_base_model(model_opt, fields, use_gpu(opt), checkpoint)
    model.eval()
    model.generator.eval()
    return fields, model, model_opt
Exemplo n.º 5
0
def load_test_model(opt, args):
    model_path = opt.models[0]
    checkpoint = torch.load(model_path,
                            map_location=lambda storage, loc: storage)

    model_opt = ArgumentParser.ckpt_model_opts(checkpoint['opt'])
    ArgumentParser.update_model_opts(model_opt)
    ArgumentParser.validate_model_opts(model_opt)
    vocab = checkpoint['vocab']
    if inputters.old_style_vocab(vocab):
        fields = inputters.load_old_vocab(vocab,
                                          opt.data_type,
                                          dynamic_dict=model_opt.copy_attn)
    else:
        fields = vocab

    model = build_base_model(model_opt, fields, use_gpu(opt), args, checkpoint,
                             opt.gpu)
    if args.data_type == 'fp32':
        model.float()
    elif args.data_type == 'fp16':
        model.half()
    else:
        raise ValueError('wrong data_type argument {}'.format(args.data_type))
    model.eval()
    model.generator.eval()
    return fields, model, model_opt
Exemplo n.º 6
0
def ltm(opt, model_path=None):
    if model_path is None:
        model_path = opt.models[0]
    checkpoint = torch.load(model_path,
                            map_location=lambda storage, loc: storage)

    model_opt = ArgumentParser.ckpt_model_opts(checkpoint["opt"])
    ArgumentParser.update_model_opts(model_opt)
    ArgumentParser.validate_model_opts(model_opt)
    vocab = checkpoint["vocab"]
    if inputters.old_style_vocab(vocab):
        fields = inputters.load_old_vocab(vocab,
                                          opt.data_type,
                                          dynamic_dict=model_opt.copy_attn)
    else:
        fields = vocab

    # This will randomly initialize
    if settings.RANDOM_WEIGHTS:
        checkpoint = None
    model = build_base_model(model_opt, fields, use_gpu(opt), checkpoint,
                             opt.gpu)
    if opt.fp32:
        model.float()
    model.eval()
    model.generator.eval()
    return fields, model, model_opt
Exemplo n.º 7
0
def load_test_model(opt, model_path=None):
    if model_path is None:
        model_path = opt.models[0]
    checkpoint = torch.load(model_path,
                            map_location=lambda storage, loc: storage)

    model_opt = ArgumentParser.ckpt_model_opts(checkpoint['opt'])
    ArgumentParser.update_model_opts(model_opt)
    ArgumentParser.validate_model_opts(model_opt)
    vocab = checkpoint['vocab']
    if inputters.old_style_vocab(vocab):
        fields = inputters.load_old_vocab(
            vocab, opt.data_type, dynamic_dict=model_opt.copy_attn
        )
    else:
        fields = vocab

    arae_model_path = opt.model_arae if opt.arae and checkpoint else None
    model = build_base_model(model_opt, fields, use_gpu(opt), checkpoint,
                             opt.gpu, arae_setting=opt.arae, arae_model_path=arae_model_path)
    if opt.arae:
        model, gan_g, gan_d = model
        gan_g.eval()
        gan_d.eval()

    if opt.fp32:
        model.float()
    model.eval()
    model.generator.eval()

    if opt.arae:
        model = model, gan_g, gan_d

    return fields, model, model_opt
Exemplo n.º 8
0
def load_test_model(opt, model_path=None):
    if model_path is None:
        model_path = opt.models
    checkpoint = torch.load(model_path[0],
                            map_location=lambda storage, loc: storage)

    model_opt = ArgumentParser.ckpt_model_opts(checkpoint['opt'])
    ArgumentParser.update_model_opts(model_opt)
    ArgumentParser.validate_model_opts(model_opt)
    vocab = checkpoint['vocab']
    teacher_vocab = checkpoint['teacher_vocab']
    if inputters.old_style_vocab(vocab):
        fields = inputters.load_old_vocab(vocab,
                                          opt.data_type,
                                          dynamic_dict=model_opt.copy_attn)

    else:
        fields = vocab
        teacher_fields = teacher_vocab

    fields_opt = {'original': fields, 'teacher': teacher_fields}
    # setattr(fields,"true_tgt_vocab",true_tgt_field.vocab)

    model = build_base_model(model_opt, fields_opt, use_gpu(opt), checkpoint,
                             opt.gpu)
    if opt.fp32:
        model.float()
    model.eval()
    model.generator.eval()
    return fields, model, model_opt
Exemplo n.º 9
0
def main():
    dummy_parser = argparse.ArgumentParser(description='train.py')
    onmt.opts.model_opts(dummy_parser)
    dummy_opt = dummy_parser.parse_known_args([])[0]
    opt = parser.parse_args()
    opt.cuda = opt.gpu > -1
    if opt.cuda:
        torch.cuda.set_device(opt.gpu)

    # Add in default model arguments, possibly added since training.
    checkpoint = torch.load(opt.model,
                            map_location=lambda storage, loc: storage)
    model_opt = checkpoint['opt']

    vocab = checkpoint['vocab']
    if inputters.old_style_vocab(vocab):
        fields = onmt.inputters.load_old_vocab(vocab)
    else:
        fields = vocab
    src_dict = fields['src'].base_field.vocab  # assumes src is text
    tgt_dict = fields['tgt'].base_field.vocab

    model_opt = checkpoint['opt']
    for arg in dummy_opt.__dict__:
        if arg not in model_opt:
            model_opt.__dict__[arg] = dummy_opt.__dict__[arg]

    model = onmt.model_builder.build_base_model(model_opt, fields,
                                                use_gpu(opt), opt.length_model,
                                                checkpoint)
    encoder = model.encoder
    decoder = model.decoder

    encoder_embeddings = encoder.embeddings.word_lut.weight.data.tolist()
    decoder_embeddings = decoder.embeddings.word_lut.weight.data.tolist()

    logger.info("Writing source embeddings")
    write_embeddings(opt.output_dir + "/src_embeddings.txt", src_dict,
                     encoder_embeddings)

    logger.info("Writing target embeddings")
    write_embeddings(opt.output_dir + "/tgt_embeddings.txt", tgt_dict,
                     decoder_embeddings)

    logger.info('... done.')
    logger.info('Converting model...')
Exemplo n.º 10
0
def load_test_model(opt, dummy_opt, model_path=None):
    if model_path is None:
        model_path = opt.models[0]
    checkpoint = torch.load(model_path,
                            map_location=lambda storage, loc: storage)

    model_opt = checkpoint['opt']
    vocab = checkpoint['vocab']
    if inputters.old_style_vocab(vocab):
        fields = inputters.load_old_vocab(vocab,
                                          opt.data_type,
                                          dynamic_dict=model_opt.copy_attn)
    else:
        fields = vocab

    for arg in dummy_opt:
        if arg not in model_opt:
            model_opt.__dict__[arg] = dummy_opt[arg]
    model = build_base_model(model_opt, fields, use_gpu(opt), checkpoint)
    model.eval()
    model.generator.eval()
    return fields, model, model_opt
Exemplo n.º 11
0
def load_test_model(opt, model_path=None):
    if model_path is None:
        model_path = opt.models[0]
    checkpoint = torch.load(model_path,
                            map_location=lambda storage, loc: storage)

    model_opt = ArgumentParser.ckpt_model_opts(checkpoint['opt'])
    ArgumentParser.update_model_opts(model_opt)
    ArgumentParser.validate_model_opts(model_opt)
    vocab = checkpoint['vocab']
    if inputters.old_style_vocab(vocab):
        fields = inputters.load_old_vocab(
            vocab, opt.data_type, dynamic_dict=model_opt.copy_attn
        )
    else:
        fields = vocab

    model = build_base_model(model_opt, fields, use_gpu(opt), checkpoint,
                             opt.gpu)
    if opt.fp32:
        model.float()
    model.eval()
    model.generator.eval()
    return fields, model, model_opt
Exemplo n.º 12
0
    def __init__(self, model_dir):

        # Model dir
        self._model_dir = os.path.abspath(model_dir)
        if not os.path.isdir(self._model_dir):
            msg = f"{model_dir} doesn't exists'"
            raise ValueError(msg)

        # Extended model
        self._extended_model = ExtendedModel(model_dir)

        # Config
        self._config = self._extended_model.config

        # Options
        self._opts = self._config.opts

        # Get the model options
        model_path = self._opts.models[0]
        checkpoint = torch.load(
            model_path, map_location=lambda storage, loc: storage
        )
        self._model_opts = ArgumentParser.ckpt_model_opts(checkpoint["opt"])
        ArgumentParser.update_model_opts(self._model_opts)
        ArgumentParser.validate_model_opts(self._model_opts)

        # Extract vocabulary
        vocab = checkpoint["vocab"]
        if inputters.old_style_vocab(vocab):
            self._fields = inputters.load_old_vocab(
                vocab, "text", dynamic_dict=False
            )
        else:
            self._fields = vocab

        # Train_steps
        self._train_steps = self._model_opts.train_steps

        # Build openmmt model
        self._opennmt_model = build_base_model(
            self._model_opts,
            self._fields,
            use_gpu(self._opts),
            checkpoint,
            self._opts.gpu,
        )

        # Translator
        try:
            min_length = self._opts.min_length
        except:
            min_length = 0

        try:
            max_length = self._opts.max_length
        except:
            max_length = 100

        try:
            beam_size = self._opts.beam_size
        except:
            beam_size = 5

        try:
            replace_unk = self._opts.replace_unk
        except:
            replace_unk = 0

        self._translator = Translator(
            self._opennmt_model,
            self._fields,
            TextDataReader(),
            TextDataReader(),
            gpu=self._opts.gpu,
            min_length=min_length,
            max_length=max_length,
            beam_size=beam_size,
            replace_unk=replace_unk,
            copy_attn=self._model_opts.copy_attn,
            global_scorer=GNMTGlobalScorer(0.0, -0.0, "none", "none"),
            seed=self.SEED,
        )

        online_learning = self._config.online_learning
        if online_learning:
            # Optim
            optimizer_opt = type("", (), {})()
            optimizer_opt.optim = "sgd"
            optimizer_opt.learning_rate = self._opts.learning_rate
            optimizer_opt.train_from = ""
            optimizer_opt.adam_beta1 = 0
            optimizer_opt.adam_beta2 = 0
            optimizer_opt.model_dtype = "fp32"
            optimizer_opt.decay_method = "none"
            optimizer_opt.start_decay_steps = 100000
            optimizer_opt.learning_rate_decay = 1.0
            optimizer_opt.decay_steps = 100000
            optimizer_opt.max_grad_norm = 5
            self._optim = Optimizer.from_opt(
                self._opennmt_model, optimizer_opt, checkpoint=None
            )

            trainer_opt = type("", (), {})()
            trainer_opt.lambda_coverage = 0.0
            trainer_opt.copy_attn = False
            trainer_opt.label_smoothing = 0.0
            trainer_opt.truncated_decoder = 0
            trainer_opt.model_dtype = "fp32"
            trainer_opt.max_generator_batches = 32
            trainer_opt.normalization = "sents"
            trainer_opt.accum_count = [1]
            trainer_opt.accum_steps = [0]
            trainer_opt.world_size = 1
            trainer_opt.average_decay = 0
            trainer_opt.average_every = 1
            trainer_opt.dropout = 0
            trainer_opt.dropout_steps = (0,)
            trainer_opt.gpu_verbose_level = 0
            trainer_opt.early_stopping = 0
            trainer_opt.early_stopping_criteria = (None,)
            trainer_opt.tensorboard = False
            trainer_opt.report_every = 50
            trainer_opt.gpu_ranks = []
            if self._opts.gpu != -1:
                trainer_opt.gpu_ranks = [self._opts.gpu]

            self._trainer = build_trainer(
                trainer_opt,
                self._opts.gpu,
                self._opennmt_model,
                self._fields,
                self._optim,
            )
        else:
            self._trainer = None
Exemplo n.º 13
0
    def __init__(self, model_dir):

        # Model dir
        self._model_dir = os.path.abspath(model_dir)
        if not os.path.isdir(self._model_dir):
            msg = f"{model_dir} doesn't exists'"
            raise ValueError(msg)

        # Extended model
        self._extended_model = ExtendedModel(model_dir)

        # Config
        self._config = self._extended_model.config

        # Options
        self._opts = self._config.opts

        # Get the model options
        model_path = self._opts.models[0]
        checkpoint = torch.load(model_path,
                                map_location=lambda storage, loc: storage)
        self._model_opts = ArgumentParser.ckpt_model_opts(checkpoint['opt'])
        ArgumentParser.update_model_opts(self._model_opts)
        ArgumentParser.validate_model_opts(self._model_opts)

        # Train_steps
        self._train_steps = self._model_opts.train_steps

        # Extract vocabulary
        vocab = checkpoint['vocab']
        if inputters.old_style_vocab(vocab):
            self._fields = inputters.load_old_vocab(
                vocab,
                self._opts.data_type,
                dynamic_dict=self._model_opts.copy_attn)
        else:
            self._fields = vocab

        # Build model
        self._model = build_base_model(self._model_opts, self._fields,
                                       use_gpu(self._opts), checkpoint,
                                       self._opts.gpu)

        if self._opts.fp32:
            self._model.float()

        #Translator
        scorer = GNMTGlobalScorer.from_opt(self._opts)

        self.translator = OnmtxTranslator.from_opt(
            self._model,
            self._fields,
            self._opts,
            self._model_opts,
            global_scorer=scorer,
            out_file=None,
            report_score=False,
            logger=None,
        )

        # Create trainer
        self._optim = Optimizer.from_opt(self._model,
                                         self._opts,
                                         checkpoint=checkpoint)

        device_id = -1  # TODO Handle GPU
        self.trainer = build_trainer(self._opts, device_id, self._model,
                                     self._fields, self._optim)