def nmtmodel_forward(self, opt, source_l=3, bsize=1):
        """
        Creates a nmtmodel with a custom opt function.
        Forwards a testbatch and checks output size.

        Args:
            opt: Namespace with options
            source_l: length of input sequence
            bsize: batchsize
        """
        word_dict = self.get_vocab()
        feature_dicts = []

        embeddings = make_embeddings(opt, word_dict, feature_dicts)
        enc = make_encoder(opt, embeddings)

        embeddings = make_embeddings(opt,
                                     word_dict,
                                     feature_dicts,
                                     for_encoder=False)
        dec = make_decoder(opt, embeddings)

        model = onmt.Models.NMTModel(enc, dec)

        test_src, test_tgt, test_length = self.get_batch(source_l=source_l,
                                                         bsize=bsize)
        outputs, attn, _ = model(test_src, test_tgt, test_length)
        outputsize = torch.zeros(source_l - 1, bsize, opt.rnn_size)
        # Make sure that output has the correct size and type
        self.assertEqual(outputs.size(), outputsize.size())
        self.assertEqual(type(outputs), torch.autograd.Variable)
        self.assertEqual(type(outputs.data), torch.FloatTensor)
Esempio n. 2
0
    def nmtmodel_forward(self, opt, source_l=3, bsize=1):
        """
        Creates a nmtmodel with a custom opt function.
        Forwards a testbatch and checks output size.

        Args:
            opt: Namespace with options
            source_l: length of input sequence
            bsize: batchsize
        """
        word_dict = self.get_vocab()
        feature_dicts = []

        embeddings = make_embeddings(opt, word_dict, feature_dicts)
        enc = make_encoder(opt, embeddings)

        embeddings = make_embeddings(opt, word_dict, feature_dicts,
                                     for_encoder=False)
        dec = make_decoder(opt, embeddings)

        model = onmt.Models.NMTModel(enc, dec)

        test_src, test_tgt, test_length = self.get_batch(source_l=source_l,
                                                         bsize=bsize)
        outputs, attn, _ = model(test_src,
                                 test_tgt,
                                 test_length)
        outputsize = torch.zeros(source_l - 1, bsize, opt.rnn_size)
        # Make sure that output has the correct size and type
        self.assertEqual(outputs.size(), outputsize.size())
        self.assertEqual(type(outputs), torch.autograd.Variable)
        self.assertEqual(type(outputs.data), torch.FloatTensor)
Esempio n. 3
0
    def nmtmodel_forward(self, opt, source_l=3, bsize=1):
        """
        Creates a nmtmodel with a custom opt function.
        Forwards a testbatch and checks output size.

        Args:
            opt: Namespace with options
            source_l: length of input sequence
            bsize: batchsize
        """
        word_dict = self.get_vocab()
        feature_dicts = []

        embeddings_enc = make_embeddings(opt, word_dict, feature_dicts)

        embeddings_dec = make_embeddings(opt,
                                         word_dict,
                                         feature_dicts,
                                         for_encoder=False)

        generator = DdpgOffPolicy.QueryGenerator(opt, embeddings_dec,
                                                 len(word_dict))

        model = onmt.Models.RL_Model(opt, embeddings_enc, embeddings_dec,
                                     generator)

        test_src, test_tgt, test_length = self.get_batch(source_l=source_l,
                                                         bsize=bsize)
        ys, values_fit, values_optim = model(test_src, test_tgt, test_length)
        outputsize = torch.zeros(source_l - 1, bsize, opt.rnn_size)
        # Make sure that output has the correct size and type
        print values_fit
Esempio n. 4
0
def main():
    opt = parser.parse_args()
    checkpoint = torch.load(opt.model)
    opt.cuda = opt.gpu > -1
    if opt.cuda:
        torch.cuda.set_device(opt.gpu)

    model_opt = checkpoint['opt']
    src_dict = checkpoint['dicts']['src']
    tgt_dict = checkpoint['dicts']['tgt']
    feature_dicts = []

    embeddings = make_embeddings(model_opt, src_dict, feature_dicts)
    encoder = make_encoder(model_opt, embeddings)

    embeddings = make_embeddings(model_opt,
                                 tgt_dict,
                                 feature_dicts,
                                 for_encoder=False)
    decoder = make_decoder(model_opt, embeddings)

    encoder_embeddings = encoder.word_lut.weight.data.tolist()
    decoder_embeddings = decoder.word_lut.weight.data.tolist()

    print("Writing source embeddings")
    write_embeddings(opt.output_dir + "/src_embeddings.txt", src_dict,
                     encoder_embeddings)

    print("Writing target embeddings")
    write_embeddings(opt.output_dir + "/tgt_embeddings.txt", tgt_dict,
                     decoder_embeddings)

    print('... done.')
    print('Converting model...')
    def encoder_forward(self, opt, source_l=3, bsize=1):
        '''
        Tests if the encoder works as expected

        args:
            opt: set of options
            source_l: Length of generated input sentence
            bsize: Batchsize of generated input
        '''
        word_dict = self.get_vocab()
        feature_dicts = []
        embeddings = make_embeddings(opt, word_dict, feature_dicts)
        enc = make_encoder(opt, embeddings)

        test_src, test_tgt, test_length = self.get_batch(source_l=source_l,
                                                         bsize=bsize)

        hidden_t, outputs = enc(test_src, test_length)

        # Initialize vectors to compare size with
        test_hid = torch.zeros(self.opt.enc_layers, bsize, opt.rnn_size)
        test_out = torch.zeros(source_l, bsize, opt.rnn_size)

        # Ensure correct sizes and types
        self.assertEqual(test_hid.size(), hidden_t[0].size(),
                         hidden_t[1].size())
        self.assertEqual(test_out.size(), outputs.size())
        self.assertEqual(type(outputs), torch.autograd.Variable)
        self.assertEqual(type(outputs.data), torch.FloatTensor)
Esempio n. 6
0
    def imagemodel_forward(self, opt, tgt_l=2, bsize=1, h=15, w=17):
        """
        Creates an image-to-text nmtmodel with a custom opt function.
        Forwards a testbatch and checks output size.

        Args:
            opt: Namespace with options
            source_l: length of input sequence
            bsize: batchsize
        """
        if opt.encoder_type == 'transformer' or opt.encoder_type == 'cnn':
            return

        word_dict = self.get_vocab()
        feature_dicts = []

        enc = ImageEncoder(opt.enc_layers, opt.brnn, opt.rnn_size, opt.dropout)

        embeddings = make_embeddings(opt,
                                     word_dict,
                                     feature_dicts,
                                     for_encoder=False)
        dec = make_decoder(opt, embeddings)

        model = onmt.Models.NMTModel(enc, dec)

        test_src, test_tgt, test_length = self.get_batch_image(h=h,
                                                               w=w,
                                                               bsize=bsize,
                                                               tgt_l=tgt_l)
        outputs, attn, _ = model(test_src, test_tgt, test_length)
        outputsize = torch.zeros(tgt_l - 1, bsize, opt.rnn_size)
        # Make sure that output has the correct size and type
        self.assertEqual(outputs.size(), outputsize.size())
        self.assertEqual(type(outputs), torch.Tensor)
Esempio n. 7
0
    def embeddings_forward(self, opt, sourceL=3, bsize=1):
        '''
        Tests if the embeddings works as expected

        args:
            opt: set of options
            sourceL: Length of generated input sentence
            bsize: Batchsize of generated input
        '''
        vocab = self.get_vocab()
        feats_padding_idx = []
        emb = make_embeddings(opt,
                              vocab.stoi[onmt.IO.PAD_WORD],
                              feats_padding_idx,
                              len(vocab),
                              for_encoder=True)
        test_src, _, __ = self.get_batch(sourceL=sourceL, bsize=bsize)
        if opt.decoder_type == 'transformer':
            input = torch.cat([test_src, test_src], 0)
            res = emb(input)
            compare_to = torch.zeros(sourceL * 2, bsize, opt.src_word_vec_size)
        else:
            res = emb(test_src)
            compare_to = torch.zeros(sourceL, bsize, opt.src_word_vec_size)

        self.assertEqual(res.size(), compare_to.size())
Esempio n. 8
0
    def encoder_forward(self, opt, source_l=3, bsize=1):
        '''
        Tests if the encoder works as expected

        args:
            opt: set of options
            source_l: Length of generated input sentence
            bsize: Batchsize of generated input
        '''
        word_dict = self.get_vocab()
        feature_dicts = []
        embeddings = make_embeddings(opt, word_dict, feature_dicts)
        enc = make_encoder(opt, embeddings)

        test_src, test_tgt, test_length = self.get_batch(source_l=source_l,
                                                         bsize=bsize)

        hidden_t, outputs = enc(test_src, test_length)

        # Initialize vectors to compare size with
        test_hid = torch.zeros(self.opt.enc_layers, bsize, opt.rnn_size)
        test_out = torch.zeros(source_l, bsize, opt.rnn_size)

        # Ensure correct sizes and types
        self.assertEqual(test_hid.size(),
                         hidden_t[0].size(),
                         hidden_t[1].size())
        self.assertEqual(test_out.size(), outputs.size())
        self.assertEqual(type(outputs), torch.autograd.Variable)
        self.assertEqual(type(outputs.data), torch.FloatTensor)
Esempio n. 9
0
def main():
    opt = parser.parse_args()
    checkpoint = torch.load(opt.model,
                            map_location=lambda storage, loc: storage)
    #checkpoint = torch.load('../200k-model_acc_47.70_ppl_20.36_e13.pt',map_location=lambda storage, loc: storage)
    opt.cuda = opt.gpu > -1
    if opt.cuda:
        torch.cuda.set_device(opt.gpu)
    fields = onmt.IO.load_fields(checkpoint['vocab'])
    src_dict = fields["src"].vocab
    tgt_dict = fields["tgt"].vocab
    feature_dicts = onmt.IO.collect_feature_dicts(fields, 'src')

    model_opt = checkpoint['opt']
    # src_dict = checkpoint['dicts']['src']
    # tgt_dict = checkpoint['dicts']['tgt']
    # feature_dicts = []

    embeddings = make_embeddings(model_opt, src_dict, feature_dicts)
    encoder = make_encoder(model_opt, embeddings)

    embeddings = make_embeddings(model_opt,
                                 tgt_dict,
                                 feature_dicts,
                                 for_encoder=False)
    decoder = make_decoder(model_opt, embeddings)

    encoder_embeddings = encoder.embeddings.word_lut.weight.data.tolist()
    decoder_embeddings = decoder.embeddings.word_lut.weight.data.tolist()

    print("Writing source embeddings")
    write_embeddings(opt.output_dir + "/src_embeddings.txt", src_dict,
                     encoder_embeddings)

    print("Writing target embeddings")
    write_embeddings(opt.output_dir + "/tgt_embeddings.txt", tgt_dict,
                     decoder_embeddings)

    print('... done.')
    print('Converting model...')
Esempio n. 10
0
    def ntmmodel_forward(self, opt, sourceL=3, bsize=1):
        """
        Creates a ntmmodel with a custom opt function.
        Forwards a testbatch anc checks output size.

        Args:
            opt: Namespace with options
            sourceL: length of input sequence
            bsize: batchsize
        """
        vocab = self.get_vocab()
        word_padding_idx = vocab.stoi[onmt.IO.PAD_WORD]
        feats_padding_idx = []

        embeddings = make_embeddings(opt,
                                     word_padding_idx,
                                     feats_padding_idx,
                                     len(vocab),
                                     for_encoder=True)
        enc = make_encoder(opt, embeddings)

        embeddings = make_embeddings(opt,
                                     word_padding_idx,
                                     feats_padding_idx,
                                     len(vocab),
                                     for_encoder=False)
        dec = make_decoder(opt, embeddings)

        model = onmt.Models.NMTModel(enc, dec)

        test_src, test_tgt, test_length = self.get_batch(sourceL=sourceL,
                                                         bsize=bsize)
        outputs, attn, _ = model(test_src, test_tgt, test_length)
        outputsize = torch.zeros(sourceL - 1, bsize, opt.rnn_size)
        # Make sure that output has the correct size and type
        self.assertEqual(outputs.size(), outputsize.size())
        self.assertEqual(type(outputs), torch.autograd.Variable)
        self.assertEqual(type(outputs.data), torch.FloatTensor)
Esempio n. 11
0
    def audiomodel_forward(self, opt, tgt_l=2, bsize=1, t=37):
        """
        Creates a speech-to-text nmtmodel with a custom opt function.
        Forwards a testbatch and checks output size.

        Args:
            opt: Namespace with options
            source_l: length of input sequence
            bsize: batchsize
        """
        if opt.encoder_type == 'transformer' or opt.encoder_type == 'cnn':
            return

        word_dict = self.get_vocab()
        feature_dicts = []

        enc = AudioEncoder(opt.enc_layers,
                           opt.brnn,
                           opt.rnn_size,
                           opt.dropout,
                           opt.sample_rate,
                           opt.window_size)

        embeddings = make_embeddings(opt, word_dict, feature_dicts,
                                     for_encoder=False)
        dec = make_decoder(opt, embeddings)

        model = onmt.Models.NMTModel(enc, dec)

        test_src, test_tgt, test_length = self.get_batch_audio(
                                                  bsize=bsize,
                                                  sample_rate=opt.sample_rate,
                                                  window_size=opt.window_size,
                                                  t=t, tgt_l=tgt_l)
        outputs, attn, _ = model(test_src,
                                 test_tgt,
                                 test_length)
        outputsize = torch.zeros(tgt_l - 1, bsize, opt.rnn_size)
        # Make sure that output has the correct size and type
        self.assertEqual(outputs.size(), outputsize.size())
        self.assertEqual(type(outputs), torch.autograd.Variable)
        self.assertEqual(type(outputs.data), torch.FloatTensor)
Esempio n. 12
0
    def audiomodel_forward(self, opt, tgt_l=2, bsize=1, t=37):
        """
        Creates a speech-to-text nmtmodel with a custom opt function.
        Forwards a testbatch and checks output size.

        Args:
            opt: Namespace with options
            source_l: length of input sequence
            bsize: batchsize
        """
        if opt.encoder_type == 'transformer' or opt.encoder_type == 'cnn':
            return

        word_dict = self.get_vocab()
        feature_dicts = []

        enc = AudioEncoder(opt.enc_layers, opt.brnn, opt.rnn_size, opt.dropout,
                           opt.sample_rate, opt.window_size)

        embeddings = make_embeddings(opt,
                                     word_dict,
                                     feature_dicts,
                                     for_encoder=False)
        dec = make_decoder(opt, embeddings)

        model = onmt.Models.NMTModel(enc, dec)

        test_src, test_tgt, test_length = self.get_batch_audio(
            bsize=bsize,
            sample_rate=opt.sample_rate,
            window_size=opt.window_size,
            t=t,
            tgt_l=tgt_l)
        outputs, attn, _ = model(test_src, test_tgt, test_length)
        outputsize = torch.zeros(tgt_l - 1, bsize, opt.rnn_size)
        # Make sure that output has the correct size and type
        self.assertEqual(outputs.size(), outputsize.size())
        self.assertEqual(type(outputs), torch.autograd.Variable)
        self.assertEqual(type(outputs.data), torch.FloatTensor)
Esempio n. 13
0
    def embeddings_forward(self, opt, sourceL=3, bsize=1):
        '''
        Tests if the embeddings works as expected

        args:
            opt: set of options
            sourceL: Length of generated input sentence
            bsize: Batchsize of generated input
        '''
        word_dict = self.get_vocab()
        feature_dicts = []
        emb = make_embeddings(opt, word_dict, feature_dicts)
        test_src, _, __ = self.get_batch(sourceL=sourceL, bsize=bsize)
        if opt.decoder_type == 'transformer':
            input = torch.cat([test_src, test_src], 0)
            res = emb(input)
            compare_to = torch.zeros(sourceL * 2, bsize, opt.src_word_vec_size)
        else:
            res = emb(test_src)
            compare_to = torch.zeros(sourceL, bsize, opt.src_word_vec_size)

        self.assertEqual(res.size(), compare_to.size())
Esempio n. 14
0
    def embeddings_forward(self, opt, source_l=3, bsize=1):
        '''
        Tests if the embeddings works as expected

        args:
            opt: set of options
            source_l: Length of generated input sentence
            bsize: Batchsize of generated input
        '''
        word_dict = self.get_vocab()
        feature_dicts = []
        emb = make_embeddings(opt, word_dict, feature_dicts)
        test_src, _, __ = self.get_batch(source_l=source_l,
                                         bsize=bsize)
        if opt.decoder_type == 'transformer':
            input = torch.cat([test_src, test_src], 0)
            res = emb(input)
            compare_to = torch.zeros(source_l * 2, bsize,
                                     opt.src_word_vec_size)
        else:
            res = emb(test_src)
            compare_to = torch.zeros(source_l, bsize, opt.src_word_vec_size)

        self.assertEqual(res.size(), compare_to.size())
Esempio n. 15
0
def make_base_model(model_opt, fields, gpu, checkpoint=None):
    """
    Args:
        model_opt: the option loaded from checkpoint.
        fields: `Field` objects for the model.
        gpu(bool): whether to use gpu.
        checkpoint: the model gnerated by train phase, or a resumed snapshot
                    model from a stopped training.
    Returns:
        the NMTModel.
    """
    assert model_opt.model_type in ["text", "img", "audio"], \
        ("Unsupported model type %s" % (model_opt.model_type))

    # Make encoder.
    if model_opt.model_type == "text":
        src_dict = fields["src"].vocab
        feature_dicts = onmt.io.collect_feature_vocabs(fields, 'src')
        src_embeddings = make_embeddings(model_opt, src_dict, feature_dicts)
        encoder = make_encoder(model_opt, src_embeddings)
    elif model_opt.model_type == "img":
        encoder = ImageEncoder(model_opt.enc_layers, model_opt.brnn,
                               model_opt.rnn_size, model_opt.dropout)
    elif model_opt.model_type == "audio":
        encoder = AudioEncoder(model_opt.enc_layers, model_opt.brnn,
                               model_opt.rnn_size, model_opt.dropout,
                               model_opt.sample_rate, model_opt.window_size)

    # Make decoder.
    tgt_dict = fields["tgt"].vocab
    feature_dicts = onmt.io.collect_feature_vocabs(fields, 'tgt')
    tgt_embeddings = make_embeddings(model_opt,
                                     tgt_dict,
                                     feature_dicts,
                                     for_encoder=False)

    # Share the embedding matrix - preprocess with share_vocab required.
    if model_opt.share_embeddings:
        # src/tgt vocab should be the same if `-share_vocab` is specified.
        if src_dict != tgt_dict:
            raise AssertionError('The `-share_vocab` should be set during '
                                 'preprocess if you use share_embeddings!')

        tgt_embeddings.word_lut.weight = src_embeddings.word_lut.weight

    decoder = make_decoder(model_opt, tgt_embeddings)

    # Make NMTModel(= encoder + decoder).

    model = NMTTemplateModel(encoder, decoder, model_opt.rnn_size)
    model.model_type = model_opt.model_type

    # Make Generator.
    if not model_opt.copy_attn:
        generator = nn.Sequential(
            nn.Linear(model_opt.rnn_size, len(fields["tgt"].vocab)),
            nn.LogSoftmax())
        if model_opt.share_decoder_embeddings:
            generator[0].weight = decoder.embeddings.word_lut.weight
    else:
        generator = CopyGenerator(model_opt.rnn_size, fields["tgt"].vocab)

    # Load the model states from checkpoint or initialize them.
    if checkpoint is not None:
        print('Loading model parameters.')
        model.load_state_dict(checkpoint['model'])
        generator.load_state_dict(checkpoint['generator'])
    else:
        if model_opt.param_init != 0.0:
            print('Intializing model parameters.')
            for p in model.parameters():
                p.data.uniform_(-model_opt.param_init, model_opt.param_init)
            for p in generator.parameters():
                p.data.uniform_(-model_opt.param_init, model_opt.param_init)
        if hasattr(model.encoder, 'embeddings'):
            model.encoder.embeddings.load_pretrained_vectors(
                model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc)
        if hasattr(model.decoder, 'embeddings'):
            model.decoder.embeddings.load_pretrained_vectors(
                model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec)

    # Add generator to model (this registers it as parameter of model).
    model.generator = generator

    # Make the whole model leverage GPU if indicated to do so.
    if gpu:
        model.cuda()
    else:
        model.cpu()

    return model
Esempio n. 16
0
    src.build_vocab([])
    vocab = src.vocab

    source_l = 3
    bsize = 64

    test_src = Variable(torch.ones(source_l, bsize, 1)).long()
    test_tgt = Variable(torch.ones(source_l, bsize, 1)).long()
    test_length = torch.ones(bsize).fill_(source_l).long()
    test_length = torch.ones(bsize).fill_(source_l).long()
    batch =  test_src, test_tgt, test_length

    word_dict = vocab
    feature_dicts = []

    embeddings_enc = make_embeddings(opt, word_dict, feature_dicts)
    embeddings_dec = make_embeddings(opt, word_dict, feature_dicts,
                                     for_encoder=False)

    generator = DdpgOffPolicy.QueryGenerator(opt,
                                             embeddings_dec,
                                             len(word_dict))

    model = onmt.Models.RL_Model(opt, embeddings_enc, embeddings_dec, generator)

    test_src, test_tgt, test_length = batch

    ys, values_fit, values_optim = model(test_src,
                                         test_tgt,
                                         test_length,
                                         train_mode=True)
Esempio n. 17
0
model_opt = parser.parse_args()

dataset = next(lazily_load_dataset("train"))
print(dataset.examples[0].__dict__)
print(dataset)

data_type = dataset.data_type  # data_type: GCN
# Load fields generated from preprocess phase.
fields = load_fields(dataset, data_type, None)  # checkpoint = None
print(type(fields))
print(fields)

src_dict = fields["src"].vocab
feature_dicts = onmt.io.collect_feature_vocabs(fields, 'src')
src_embeddings = make_embeddings(model_opt, src_dict, feature_dicts)

tgt_dict = fields["tgt"].vocab
feature_dicts = onmt.io.collect_feature_vocabs(fields, 'tgt')
tgt_embeddings = make_embeddings(model_opt,
                                 tgt_dict,
                                 feature_dicts,
                                 for_encoder=False)

make_model(model_opt, src_embeddings, tgt_embeddings)

# gcn_num_inputs=256, gcn_num_labels=5, gcn_num_layers=2, gcn_num_units=256, gcn_out_arcs=True, gcn_residual='residual', gcn_use_gates=False, gcn_use_glus=False

# python3 train.py -data data/${model_id}_exp -save_model data/${save_model_name} -encoder_type ${encoder} -encoder2_type ${encoder2} -layers 1 -gcn_num_layers 2 -gcn_num_labels 5 -gcn_residual residual -word_vec_size ${emb_size} -rnn_size ${hidden_size} -gcn_num_inputs ${hidden_size} -gcn_num_units ${hidden_size} -epochs 20 -optim adam -learning_rate 0.001 -learning_rate_decay 0.7 -seed 1 -gpuid 0 -start_checkpoint_at 15 -gcn_in_arcs -gcn_out_arcs -copy_attn -brnn -use_dgl

# model = make_model()  # D: gcn features must be passed here