def audiomodel_forward(self, opt, tgt_l=7, bsize=3, t=37): """ Creates a speech-to-text nmtmodel with a custom opt function. Forwards a testbatch and checks output size. Args: opt: Namespace with options source_l: length of input sequence bsize: batchsize """ if opt.encoder_type == "transformer" or opt.encoder_type == "cnn": return word_dict = self.get_vocab() feature_dicts = [] enc = AudioEncoder( opt.rnn_type, opt.enc_layers, opt.dec_layers, opt.brnn, opt.enc_rnn_size, opt.dec_rnn_size, opt.audio_enc_pooling, opt.dropout, opt.sample_rate, opt.window_size, ) embeddings = build_embeddings(opt, word_dict, feature_dicts, for_encoder=False) dec = build_decoder(opt, embeddings) model = onmt.models.model.NMTModel(enc, dec) test_src, test_tgt, test_length = self.get_batch_audio( bsize=bsize, sample_rate=opt.sample_rate, window_size=opt.window_size, t=t, tgt_l=tgt_l, ) outputs, attn = model(test_src, test_tgt, test_length) outputsize = torch.zeros(tgt_l - 1, bsize, opt.dec_rnn_size) # Make sure that output has the correct size and type self.assertEqual(outputs.size(), outputsize.size()) self.assertEqual(type(outputs), torch.Tensor)
def build_base_model(model_opt, fields, gpu, checkpoint=None): """ Args: model_opt: the option loaded from checkpoint. fields: `Field` objects for the model. gpu(bool): whether to use gpu. checkpoint: the model gnerated by train phase, or a resumed snapshot model from a stopped training. Returns: the NMTModel. """ assert model_opt.model_type in ["text", "img", "audio"], \ ("Unsupported model type %s" % (model_opt.model_type)) # Build encoder. if model_opt.model_type == "text": src_dict = fields["src"].vocab feature_dicts = inputters.collect_feature_vocabs(fields, 'src') src_embeddings = build_embeddings(model_opt, src_dict, feature_dicts) encoder = build_encoder(model_opt, src_embeddings) elif model_opt.model_type == "img": encoder = ImageEncoder(model_opt.enc_layers, model_opt.brnn, model_opt.rnn_size, model_opt.dropout) elif model_opt.model_type == "audio": encoder = AudioEncoder(model_opt.enc_layers, model_opt.brnn, model_opt.rnn_size, model_opt.dropout, model_opt.sample_rate, model_opt.window_size) # Build decoder. tgt_dict = fields["tgt"].vocab feature_dicts = inputters.collect_feature_vocabs(fields, 'tgt') tgt_embeddings = build_embeddings(model_opt, tgt_dict, feature_dicts, for_encoder=False) # Share the embedding matrix - preprocess with share_vocab required. if model_opt.share_embeddings: # src/tgt vocab should be the same if `-share_vocab` is specified. if src_dict != tgt_dict: raise AssertionError('The `-share_vocab` should be set during ' 'preprocess if you use share_embeddings!') tgt_embeddings.word_lut.weight = src_embeddings.word_lut.weight decoder = build_decoder(model_opt, tgt_embeddings) # Build NMTModel(= encoder + decoder). device = torch.device("cuda" if gpu else "cpu") model = onmt.models.NMTModel(encoder, decoder) model.model_type = model_opt.model_type # Build Generator. if not model_opt.copy_attn: generator = nn.Sequential( nn.Linear(model_opt.rnn_size, len(fields["tgt"].vocab)), nn.LogSoftmax(dim=-1)) if model_opt.share_decoder_embeddings: generator[0].weight = decoder.embeddings.word_lut.weight else: generator = CopyGenerator(model_opt.rnn_size, fields["tgt"].vocab) # Load the model states from checkpoint or initialize them. if checkpoint is not None: model.load_state_dict(checkpoint['model']) generator.load_state_dict(checkpoint['generator']) else: if model_opt.param_init != 0.0: for p in model.parameters(): p.data.uniform_(-model_opt.param_init, model_opt.param_init) for p in generator.parameters(): p.data.uniform_(-model_opt.param_init, model_opt.param_init) if model_opt.param_init_glorot: for p in model.parameters(): if p.dim() > 1: xavier_uniform_(p) for p in generator.parameters(): if p.dim() > 1: xavier_uniform_(p) if hasattr(model.encoder, 'embeddings'): model.encoder.embeddings.load_pretrained_vectors( model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc) if hasattr(model.decoder, 'embeddings'): model.decoder.embeddings.load_pretrained_vectors( model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec) if model_opt.share_embeddings: assert model.encoder.embeddings.word_lut.weight \ is model.decoder.embeddings.word_lut.weight # Add generator to model (this registers it as parameter of model). model.generator = generator model.to(device) return model
def build_base_model(model_opt, fields, gpu, checkpoint=None): """ Args: model_opt: the option loaded from checkpoint. fields: `Field` objects for the model. gpu(bool): whether to use gpu. checkpoint: the model gnerated by train phase, or a resumed snapshot model from a stopped training. Returns: the NMTModel. """ assert model_opt.model_type in ["text", "img", "audio"], \ "Unsupported model type %s" % model_opt.model_type # for backward compatibility if model_opt.rnn_size != -1: model_opt.enc_rnn_size = model_opt.rnn_size model_opt.dec_rnn_size = model_opt.rnn_size # Build encoder. if model_opt.model_type == "text": src_fields = [f for n, f in fields['src']] src_emb = build_embeddings(model_opt, src_fields[0], src_fields[1:]) encoder = build_encoder(model_opt, src_emb) elif model_opt.model_type == "img": # why is build_encoder not used here? # why is the model_opt.__dict__ check necessary? if "image_channel_size" not in model_opt.__dict__: image_channel_size = 3 else: image_channel_size = model_opt.image_channel_size encoder = ImageEncoder(model_opt.enc_layers, model_opt.brnn, model_opt.enc_rnn_size, model_opt.dropout, image_channel_size) elif model_opt.model_type == "audio": encoder = AudioEncoder(model_opt.rnn_type, model_opt.enc_layers, model_opt.dec_layers, model_opt.brnn, model_opt.enc_rnn_size, model_opt.dec_rnn_size, model_opt.audio_enc_pooling, model_opt.dropout, model_opt.sample_rate, model_opt.window_size) # Build decoder. tgt_fields = [f for n, f in fields['tgt']] tgt_emb = build_embeddings(model_opt, tgt_fields[0], tgt_fields[1:], for_encoder=False) # Share the embedding matrix - preprocess with share_vocab required. if model_opt.share_embeddings: # src/tgt vocab should be the same if `-share_vocab` is specified. assert src_fields[0].vocab == tgt_fields[0].vocab, \ "preprocess with -share_vocab if you use share_embeddings" tgt_emb.word_lut.weight = src_emb.word_lut.weight decoder = build_decoder(model_opt, tgt_emb) # Build NMTModel(= encoder + decoder). device = torch.device("cuda" if gpu else "cpu") model = onmt.models.NMTModel(encoder, decoder) # Build Generator. if not model_opt.copy_attn: if model_opt.generator_function == "sparsemax": gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1) else: gen_func = nn.LogSoftmax(dim=-1) generator = nn.Sequential( nn.Linear(model_opt.dec_rnn_size, len(fields["tgt"][0][1].vocab)), gen_func) if model_opt.share_decoder_embeddings: generator[0].weight = decoder.embeddings.word_lut.weight else: vocab_size = len(fields["tgt"][0][1].vocab) pad_idx = fields["tgt"][0][1].vocab.stoi[fields["tgt"][0][1].pad_token] generator = CopyGenerator(model_opt.dec_rnn_size, vocab_size, pad_idx) # Load the model states from checkpoint or initialize them. if checkpoint is not None: # This preserves backward-compat for models using customed layernorm def fix_key(s): s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.b_2', r'\1.layer_norm\2.bias', s) s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.a_2', r'\1.layer_norm\2.weight', s) return s checkpoint['model'] = { fix_key(k): v for k, v in checkpoint['model'].items() } # end of patch for backward compatibility model.load_state_dict(checkpoint['model'], strict=False) generator.load_state_dict(checkpoint['generator'], strict=False) else: if model_opt.param_init != 0.0: for p in model.parameters(): p.data.uniform_(-model_opt.param_init, model_opt.param_init) for p in generator.parameters(): p.data.uniform_(-model_opt.param_init, model_opt.param_init) if model_opt.param_init_glorot: for p in model.parameters(): if p.dim() > 1: xavier_uniform_(p) for p in generator.parameters(): if p.dim() > 1: xavier_uniform_(p) if hasattr(model.encoder, 'embeddings'): model.encoder.embeddings.load_pretrained_vectors( model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc) if hasattr(model.decoder, 'embeddings'): model.decoder.embeddings.load_pretrained_vectors( model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec) model.generator = generator model.to(device) return model
def build_base_model(model_opt, fields, gpu, checkpoint=None): """ Args: model_opt: the option loaded from checkpoint. fields: `Field` objects for the model. gpu(bool): whether to use gpu. checkpoint: the model gnerated by train phase, or a resumed snapshot model from a stopped training. Returns: the NMTModel. """ assert model_opt.model_type in ["text", "img", "audio"], \ ("Unsupported model type %s" % (model_opt.model_type)) # Build encoder. if model_opt.model_type == "text": src_dict = fields["src"].vocab feature_dicts = inputters.collect_feature_vocabs(fields, 'src') src_embeddings = build_embeddings(model_opt, src_dict, feature_dicts) encoder = build_encoder( model_opt, src_embeddings ) # we added additional encoder: TransformerEncoderLM elif model_opt.model_type == "img": if ("image_channel_size" not in model_opt.__dict__): image_channel_size = 3 else: image_channel_size = model_opt.image_channel_size encoder = ImageEncoder(model_opt.enc_layers, model_opt.brnn, model_opt.enc_rnn_size, model_opt.dropout, image_channel_size) elif model_opt.model_type == "audio": encoder = AudioEncoder(model_opt.rnn_type, model_opt.enc_layers, model_opt.dec_layers, model_opt.brnn, model_opt.enc_rnn_size, model_opt.dec_rnn_size, model_opt.audio_enc_pooling, model_opt.dropout, model_opt.sample_rate, model_opt.window_size) # Build decoder. tgt_dict = fields["tgt"].vocab feature_dicts = inputters.collect_feature_vocabs(fields, 'tgt') tgt_embeddings = build_embeddings(model_opt, tgt_dict, feature_dicts, for_encoder=False) # Share the embedding matrix - preprocess with share_vocab required. if model_opt.share_embeddings: # src/tgt vocab should be the same if `-share_vocab` is specified. if src_dict != tgt_dict: raise AssertionError('The `-share_vocab` should be set during ' 'preprocess if you use share_embeddings!') tgt_embeddings.word_lut.weight = src_embeddings.word_lut.weight decoder = build_decoder(model_opt, tgt_embeddings) lm_aux = model_opt.encoder_type == "transformerAuxLTR" # Build NMTModel(= encoder + decoder). device = torch.device("cuda" if gpu else "cpu") # the model will return more stuff model = onmt.models.NMTModel(encoder, decoder, lm_aux=lm_aux) # Build Generator. # Hmmm...generator is just hidden states -> word in vocab # since we use shared embedding between encoder and decoder..plus shared embedding between # decoder src to tgt... if not model_opt.copy_attn: if model_opt.generator_function == "sparsemax": gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1) else: gen_func = nn.LogSoftmax(dim=-1) generator = nn.Sequential( nn.Linear(model_opt.dec_rnn_size, len(fields["tgt"].vocab)), gen_func) if model_opt.share_decoder_embeddings: generator[0].weight = decoder.embeddings.word_lut.weight else: generator = CopyGenerator(model_opt.dec_rnn_size, fields["tgt"].vocab) # Build Source Generator # not considering copy attention right now if lm_aux: if model_opt.generator_function == "sparsemax": gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1) else: gen_func = nn.LogSoftmax(dim=-1) # source vocab does not have <s> </s>, but share_vocab might be different... src_generator = nn.Sequential( nn.Linear(model_opt.enc_rnn_size, len(fields["src"].vocab)), gen_func) # this would have made sure that both encoder and decoder share the same generator if model_opt.share_decoder_embeddings: src_generator[0].weight = src_embeddings.word_lut.weight # Load the model states from checkpoint or initialize them. if checkpoint is not None: model.load_state_dict(checkpoint['model'], strict=False) generator.load_state_dict(checkpoint['generator'], strict=False) # if lm_aux: # src_generator.load_state_dict(checkpoint['src_generator'], strict=False) else: if model_opt.param_init != 0.0: for p in model.parameters(): p.data.uniform_(-model_opt.param_init, model_opt.param_init) for p in generator.parameters(): p.data.uniform_(-model_opt.param_init, model_opt.param_init) if lm_aux: for p in src_generator.parameters(): p.data.uniform_(-model_opt.param_init, model_opt.param_init) if model_opt.param_init_glorot: for p in model.parameters(): if p.dim() > 1: xavier_uniform_(p) for p in generator.parameters(): if p.dim() > 1: xavier_uniform_(p) if lm_aux: for p in src_generator.parameters(): if p.dim() > 1: xavier_uniform_(p) if hasattr(model.encoder, 'embeddings'): model.encoder.embeddings.load_pretrained_vectors( model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc) if hasattr(model.decoder, 'embeddings'): model.decoder.embeddings.load_pretrained_vectors( model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec) # Add generator to model (this registers it as parameter of model). model.generator = generator if lm_aux: model.src_generator = src_generator model.to(device) return model
def build_base_model(model_opt, fields, gpu, checkpoint=None): """ Args: model_opt: the option loaded from checkpoint. fields: `Field` objects for the model. gpu(bool): whether to use gpu. checkpoint: the model gnerated by train phase, or a resumed snapshot model from a stopped training. Returns: the NMTModel. """ assert model_opt.model_type in ["text", "img", "audio"], \ ("Unsupported model type %s" % (model_opt.model_type)) # for backward compatibility if model_opt.rnn_size != -1: model_opt.enc_rnn_size = model_opt.rnn_size model_opt.dec_rnn_size = model_opt.rnn_size if model_opt.model_type == 'text' and \ model_opt.enc_rnn_size != model_opt.dec_rnn_size: raise AssertionError("""We do not support different encoder and decoder rnn sizes for translation now.""") # Build encoder. if model_opt.model_type == "text": src_dict = fields["src"].vocab feature_dicts = inputters.collect_feature_vocabs(fields, 'src') src_embeddings = build_embeddings(model_opt, src_dict, feature_dicts) encoder = build_encoder(model_opt, src_embeddings) elif model_opt.model_type == "img": if ("image_channel_size" not in model_opt.__dict__): image_channel_size = 3 else: image_channel_size = model_opt.image_channel_size encoder = ImageEncoder(model_opt.enc_layers, model_opt.brnn, model_opt.enc_rnn_size, model_opt.dropout, image_channel_size) elif model_opt.model_type == "audio": encoder = AudioEncoder(model_opt.rnn_type, model_opt.enc_layers, model_opt.dec_layers, model_opt.brnn, model_opt.enc_rnn_size, model_opt.dec_rnn_size, model_opt.audio_enc_pooling, model_opt.dropout, model_opt.sample_rate, model_opt.window_size) # Build decoder. tgt_dict = fields["tgt"].vocab feature_dicts = inputters.collect_feature_vocabs(fields, 'tgt') tgt_embeddings = build_embeddings(model_opt, tgt_dict, feature_dicts, for_encoder=False) # Share the embedding matrix - preprocess with share_vocab required. if model_opt.share_embeddings: # src/tgt vocab should be the same if `-share_vocab` is specified. if src_dict != tgt_dict: raise AssertionError('The `-share_vocab` should be set during ' 'preprocess if you use share_embeddings!') tgt_embeddings.word_lut.weight = src_embeddings.word_lut.weight tgt_encoder = build_encoder(model_opt, tgt_embeddings) if model_opt.interactive_attn: co_attention = CoAttention(model_opt.rnn_type, model_opt.brnn, model_opt.enc_layers, model_opt.enc_rnn_size, model_opt.dropout) decoder = build_decoder(model_opt, tgt_embeddings) # Build NMTModel(= encoder + decoder). device = torch.device("cuda" if gpu else "cpu") # Build Generator. if not model_opt.copy_attn: if model_opt.generator_function == "sparsemax": gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1) else: gen_func = nn.LogSoftmax(dim=-1) generator = nn.Sequential( nn.Linear(model_opt.dec_rnn_size, len(fields["tgt"].vocab)), gen_func) if model_opt.share_decoder_embeddings: generator[0].weight = decoder.embeddings.word_lut.weight else: generator = CopyGenerator(model_opt.dec_rnn_size, fields["tgt"].vocab) if model_opt.interactive_attn and model_opt.back_translation: model = onmt.models.NMTModel(encoder, tgt_encoder, decoder, co_attention=co_attention, generator=generator) elif model_opt.interactive_attn: model = onmt.models.NMTModel(encoder, tgt_encoder, decoder, co_attention=co_attention) elif model_opt.back_translation: model = onmt.models.NMTModel(encoder, tgt_encoder, decoder, generator=generator) else: model = onmt.models.NMTModel(encoder, tgt_encoder, decoder) # Load the model states from checkpoint or initialize them. if checkpoint is not None: # This preserves backward-compat for models using customed layernorm def fix_key(s): s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.b_2', r'\1.layer_norm\2.bias', s) s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.a_2', r'\1.layer_norm\2.weight', s) return s checkpoint['model'] = \ {fix_key(k): v for (k, v) in checkpoint['model'].items()} # end of patch for backward compatibility model.load_state_dict(checkpoint['model'], strict=False) generator.load_state_dict(checkpoint['generator'], strict=False) else: if model_opt.param_init != 0.0: for p in model.parameters(): p.data.uniform_(-model_opt.param_init, model_opt.param_init) for p in generator.parameters(): p.data.uniform_(-model_opt.param_init, model_opt.param_init) if model_opt.param_init_glorot: for p in model.parameters(): if p.dim() > 1: xavier_uniform_(p) for p in generator.parameters(): if p.dim() > 1: xavier_uniform_(p) if hasattr(model.src_encoder, 'embeddings'): model.src_encoder.embeddings.load_pretrained_vectors( model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc) if hasattr(model.tgt_encoder, 'embeddings'): model.tgt_encoder.embeddings.load_pretrained_vectors( model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec) if hasattr(model.decoder, 'embeddings'): model.decoder.embeddings.load_pretrained_vectors( model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec) # Add generator to model (this registers it as parameter of model). model.generator = generator model.to(device) return model
def build_base_model(model_opt, fields, gpu, length_model, length_penalty_a, length_penalty_b, checkpoint=None): """ Args: model_opt: the option loaded from checkpoint. fields: `Field` objects for the model. gpu(bool): whether to use gpu. checkpoint: the model gnerated by train phase, or a resumed snapshot model from a stopped training. Returns: the NMTModel. """ assert model_opt.model_type in ["text", "img", "audio"], \ ("Unsupported model type %s" % (model_opt.model_type)) # for backward compatibility if model_opt.rnn_size != -1: model_opt.enc_rnn_size = model_opt.rnn_size model_opt.dec_rnn_size = model_opt.rnn_size if model_opt.model_type == 'text' and \ model_opt.enc_rnn_size != model_opt.dec_rnn_size: raise AssertionError("""We do not support different encoder and decoder rnn sizes for translation now.""") # Build encoder. if model_opt.model_type == "text": src_dict = fields["src"].vocab feature_dicts = inputters.collect_feature_vocabs(fields, 'src') src_embeddings = build_embeddings(model_opt, src_dict, feature_dicts) encoder = build_encoder(model_opt, src_embeddings) elif model_opt.model_type == "img": if ("image_channel_size" not in model_opt.__dict__): image_channel_size = 3 else: image_channel_size = model_opt.image_channel_size encoder = ImageEncoder(model_opt.enc_layers, model_opt.brnn, model_opt.enc_rnn_size, model_opt.dropout, image_channel_size) elif model_opt.model_type == "audio": encoder = AudioEncoder(model_opt.rnn_type, model_opt.enc_layers, model_opt.dec_layers, model_opt.brnn, model_opt.enc_rnn_size, model_opt.dec_rnn_size, model_opt.audio_enc_pooling, model_opt.dropout, model_opt.sample_rate, model_opt.window_size) # Build decoder. tgt_dict = fields["tgt"].vocab feature_dicts = inputters.collect_feature_vocabs(fields, 'tgt') tgt_embeddings = build_embeddings(model_opt, tgt_dict, feature_dicts, for_encoder=False) # Share the embedding matrix - preprocess with share_vocab required. if model_opt.share_embeddings: # src/tgt vocab should be the same if `-share_vocab` is specified. if src_dict != tgt_dict: raise AssertionError('The `-share_vocab` should be set during ' 'preprocess if you use share_embeddings!') tgt_embeddings.word_lut.weight = src_embeddings.word_lut.weight decoder = build_decoder(model_opt, tgt_embeddings) # Build NMTModel(= encoder + decoder). device = torch.device("cuda" if gpu else "cpu") model = onmt.models.NMTModel(encoder, decoder) # Build Generator. if not model_opt.copy_attn: if model_opt.generator_function == "sparsemax": gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1) else: gen_func = nn.LogSoftmax(dim=-1) # generator = nn.Sequential( # nn.Linear(model_opt.dec_rnn_size, len(fields["tgt"].vocab)), # gen_func # ) # MMM class tune_out_prob(nn.Module): def __init__(self): super(tune_out_prob, self).__init__() self.t_lens = None self.eos_ind = None self.batch_max_len = None self.word_index = None self.tgt_vocab_size = None self.validation = False def length_model_loss(self, scale, value, a, b): #return -(value / scale) ** 2 - scale.log() #return -((value / scale) **2)/2 - (2.5066*scale).log() return -a * (value / scale)**2 + b #*abs(scale) # return -((value / scale) ** 2)*scale + scale #return -(value / scale)*4 + scale def forward(self, x): y = x.clone() #mask = np.ones(x.size()) # for i in range(self.t_lens.size(-1)): # y[i*self.batch_size + self.t_lens[i], self.eos_ind] = \ # y[i * self.batch_size + self.t_lens[i], self.eos_ind].clone() + math.log(0.9) if self.training or self.validation: # training phase y = y.view(self.batch_max_len, -1, self.tgt_vocab_size) # eos_list = [(i * self.batch_max_len + self.t_lens.data.cpu().numpy()[i]) for i in # range(self.t_lens.size(-1))] # other_list = list(set(list(range(x.size(0)))) - set(eos_list)) # y[other_list, self.eos_ind] = -100 # y[eos_list, self.eos_ind] = 0 for wi in range(self.batch_max_len): delta_p = (self.t_lens - wi - 1).float() delta_p[delta_p < 0] = 0.05 * delta_p[delta_p < 0] scale = (self.t_lens.float()).sqrt() / 2.0 penalties = self.length_model_loss( scale, delta_p, length_penalty_a, length_penalty_b) #penalties[penalties > 0] = 0 y[wi, :, self.eos_ind] += penalties y = y.view(-1, self.tgt_vocab_size) #mask[eos_list, self.eos_ind] = +2 #mask[other_list, self.eos_ind] = -2 else: # translation phase if len( x.size() ) == 3: # x of shape [ tgt_len, batch_size, vocab ] is a full sentence # for i in range(len(self.t_lens)): # other_list = list(set(list(range(x.size(0)))) - set(list([self.t_lens.data.cpu().numpy()[i]]))) # #mask[other_list, i, self.eos_ind] = -2 # y[other_list, i, self.eos_ind] = -100 # if self.t_lens[i] < x.size(0): # #mask[self.t_lens[i], i, self.eos_ind] = +2 # y[self.t_lens[i], i, self.eos_ind] = 0 pass else: # x of shape [(batch_size x beam_size) , vocab ] is only for one step beam_size = x.size(0) // self.t_lens.numel() wi = self.word_index delta_p = (self.t_lens - wi - 2).float() delta_p[delta_p < 0] = 0.005 * delta_p[delta_p < 0] delta_p = delta_p.unsqueeze(1).expand( self.t_lens.numel(), beam_size).flatten() scale = (self.t_lens.float()).sqrt() / 2.0 scale = scale.unsqueeze(1).expand( self.t_lens.numel(), beam_size).flatten() penalties = self.length_model_loss( scale, delta_p, length_penalty_a, length_penalty_b) #penalties[penalties > 0] = 0 y[:, self.eos_ind] += penalties #y[eos_list ^ 1, self.eos_ind] = -100 return y #mask = torch.tensor(mask, dtype=x.dtype).to(device) #x= x+mask #return x # y = x.clone() # # 1. since y is the output of log_softmax, apply exponential # # to convert it to probabilistic form # y = torch.exp(y) # # 2. tune probabilities # eos_list = [(i * self.batch_max_len + self.t_lens.data.cpu().numpy()[i]) for i in # range(self.t_lens.size(-1))] # other_list = list(set(list(range(y.size(0)))) - set(eos_list)) # # z = y.clone() # # 2.1. tune probabilities for eos positions # z[eos_list, self.eos_ind] = 1 # z[eos_list, 0:self.eos_ind] = 0 # z[eos_list, self.eos_ind+1:-1] = 0 # # # 2.2. tune probabilities for non-eos positions # p_val = z[other_list, self.eos_ind] / (self.tgt_vocab_size - 1) # z[other_list, self.eos_ind] = 0 # non_eos_inds = list(set(list(range(self.tgt_vocab_size))) - set([self.eos_ind])) # for i in range(len(other_list)): # z[other_list[i], non_eos_inds] = y[other_list[i], non_eos_inds] + p_val[i] # # # 3. convert y back to log-probability form # z = torch.log(z) # return z # MMM if length_model == 'oracle' or length_model == 'fixed_ratio' or length_model == 'lstm': generator = nn.Sequential( nn.Linear(model_opt.dec_rnn_size, len(fields["tgt"].vocab)), gen_func, tune_out_prob()) else: generator = nn.Sequential( nn.Linear(model_opt.dec_rnn_size, len(fields["tgt"].vocab)), gen_func) # generator = nn.Sequential( # nn.Linear(model_opt.dec_rnn_size, len(fields["tgt"].vocab)), # gen_func # ) if model_opt.share_decoder_embeddings: generator[0].weight = decoder.embeddings.word_lut.weight else: generator = CopyGenerator(model_opt.dec_rnn_size, fields["tgt"].vocab) # Load the model states from checkpoint or initialize them. if checkpoint is not None: # This preserves backward-compat for models using customed layernorm def fix_key(s): s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.b_2', r'\1.layer_norm\2.bias', s) s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.a_2', r'\1.layer_norm\2.weight', s) return s checkpoint['model'] = \ {fix_key(k): v for (k, v) in checkpoint['model'].items()} # end of patch for backward compatibility model.load_state_dict(checkpoint['model'], strict=False) generator.load_state_dict(checkpoint['generator'], strict=False) else: if model_opt.param_init != 0.0: for p in model.parameters(): p.data.uniform_(-model_opt.param_init, model_opt.param_init) for p in generator.parameters(): p.data.uniform_(-model_opt.param_init, model_opt.param_init) if model_opt.param_init_glorot: for p in model.parameters(): if p.dim() > 1: xavier_uniform_(p) for p in generator.parameters(): if p.dim() > 1: xavier_uniform_(p) if hasattr(model.encoder, 'embeddings'): model.encoder.embeddings.load_pretrained_vectors( model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc) if hasattr(model.decoder, 'embeddings'): model.decoder.embeddings.load_pretrained_vectors( model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec) # Add generator to model (this registers it as parameter of model). model.generator = generator model.to(device) return model
def build_base_model(model_opt, fields, gpu, FeatureValues, FeatureTensors, FeatureTypes, FeaturesList, FeatureNames, FTInfos, FeatureTypesNames, SimulationLanguages, checkpoint=None): """ Args: model_opt: the option loaded from checkpoint. fields: `Field` objects for the model. gpu(bool): whether to use gpu. WALS info checkpoint: the model gnerated by train phase, or a resumed snapshot model from a stopped training. Returns: the NMTModel. """ assert model_opt.model_type in ["text", "img", "audio"], \ ("Unsupported model type %s" % (model_opt.model_type)) # Build encoder. if model_opt.model_type == "text": src_dict = fields["src"].vocab feature_dicts = inputters.collect_feature_vocabs(fields, 'src') src_embeddings = build_embeddings(model_opt, src_dict, feature_dicts) encoder = build_encoder(model_opt, src_embeddings) elif model_opt.model_type == "img": if ("image_channel_size" not in model_opt.__dict__): image_channel_size = 3 else: image_channel_size = model_opt.image_channel_size encoder = ImageEncoder(model_opt.enc_layers, model_opt.brnn, model_opt.rnn_size, model_opt.dropout, image_channel_size) elif model_opt.model_type == "audio": encoder = AudioEncoder(model_opt.enc_layers, model_opt.brnn, model_opt.rnn_size, model_opt.dropout, model_opt.sample_rate, model_opt.window_size) # Build decoder. tgt_dict = fields["tgt"].vocab feature_dicts = inputters.collect_feature_vocabs(fields, 'tgt') tgt_embeddings = build_embeddings(model_opt, tgt_dict, feature_dicts, for_encoder=False) # Share the embedding matrix - preprocess with share_vocab required. if model_opt.share_embeddings: # src/tgt vocab should be the same if `-share_vocab` is specified. if src_dict != tgt_dict: raise AssertionError('The `-share_vocab` should be set during ' 'preprocess if you use share_embeddings!') tgt_embeddings.word_lut.weight = src_embeddings.word_lut.weight decoder = build_decoder(model_opt, tgt_embeddings) # Wals print( 'Building embeddings for each WALS feature and MLP models for each feature type...' ) embeddings_list, embeddings_keys, mlp_list, mlp_keys = [], [], [], [] for FeatureType in FeatureTypes: list_features = FeatureType[1] for Feature in list_features: globals()['embedding_%s' % Feature] = build_feature_embeddings( gpu, FeatureTensors, FeaturesList, FeatureNames, Feature) # 192 embedding structures, one for each feature. embeddings_keys.append(Feature) embeddings_list.append(globals()['embedding_%s' % Feature]) globals()['mlp_%s' % FeatureType[0]] = build_mlp_feature_type( model_opt, FTInfos, FeatureTypesNames, FeatureType[0]) # 11 MLPs, one for each feature type. mlp_keys.append(FeatureType[0]) mlp_list.append(globals()['mlp_%s' % FeatureType[0]]) embeddings_dic_keys = dict(zip(embeddings_keys, embeddings_list)) EmbeddingFeatures = nn.ModuleDict(embeddings_dic_keys) mlp_dic_keys = dict(zip(mlp_keys, mlp_list)) # Build NMTModel(= encoder + decoder). device = torch.device("cuda" if gpu else "cpu") if model_opt.wals_model == 'EncInitHidden_Target': MLP2RNNHiddenSize_Target = build_mlp2rnnhiddensize_target( model_opt, FTInfos) print('Embeddings for WALS features and MLP models are built!') model = EncoderInitialization(model_opt.wals_model, encoder, decoder, MLP2RNNHiddenSize_Target, EmbeddingFeatures, FeatureValues, FeatureTypes, SimulationLanguages, model_opt) print( "Model created: uses WALS features from the target language to initialize encoder's hidden state." ) elif model_opt.wals_model == 'EncInitHidden_Both': MLP2RNNHiddenSize_Both = build_mlp2rnnhiddensize_both( model_opt, FTInfos) print('Embeddings for WALS features and MLP models are built!') model = EncoderInitialization(model_opt.wals_model, encoder, decoder, MLP2RNNHiddenSize_Both, EmbeddingFeatures, FeatureValues, FeatureTypes, SimulationLanguages, model_opt) print( "Model created: uses WALS features from the source and target languages to initialize encoder's hidden state." ) elif model_opt.wals_model == 'DecInitHidden_Target': MLP2RNNHiddenSize_Target = build_mlp2rnnhiddensize_target( model_opt, FTInfos) print('Embeddings for WALS features and MLP models are built!') model = DecoderInitialization(model_opt.wals_model, encoder, decoder, MLP2RNNHiddenSize_Target, EmbeddingFeatures, FeatureValues, FeatureTypes, SimulationLanguages, model_opt) print( "Model created: adds WALS features from the target language to the encoder's output to initialize decoder's hidden state." ) elif model_opt.wals_model == 'DecInitHidden_Both': MLP2RNNHiddenSize_Both = build_mlp2rnnhiddensize_both( model_opt, FTInfos) print('Embeddings for WALS features and MLP models are built!') model = DecoderInitialization(model_opt.wals_model, encoder, decoder, MLP2RNNHiddenSize_Both, EmbeddingFeatures, FeatureValues, FeatureTypes, SimulationLanguages, model_opt) print( "Model created: adds WALS features from the source and target languages to the encoder's output to initialize decoder's hidden state." ) elif model_opt.wals_model == 'WalstoSource_Target': MLP2WALSHiddenSize_Target = build_mlp2walshiddensize_target( model_opt, FTInfos) print('Embeddings for WALS features and MLP models are built!') model = CombineWalsSourceWords(model_opt.wals_model, encoder, decoder, MLP2WALSHiddenSize_Target, EmbeddingFeatures, FeatureValues, FeatureTypes, SimulationLanguages, model_opt) print( "Model created: concatenates WALS features from the target language to source words embeddings." ) elif model_opt.wals_model == 'WalstoSource_Both': MLP2WALSHiddenSize_Both = build_mlp2walshiddensize_both( model_opt, FTInfos) print('Embeddings for WALS features and MLP models are built!') model = CombineWalsSourceWords(model_opt.wals_model, encoder, decoder, MLP2WALSHiddenSize_Both, EmbeddingFeatures, FeatureValues, FeatureTypes, SimulationLanguages, model_opt) print( "Model created: concatenates WALS features from the source and target languages to source words embeddings." ) elif model_opt.wals_model == 'WalstoTarget_Target': MLP2WALSHiddenSize_Target = build_mlp2walshiddensize_target( model_opt, FTInfos) print('Embeddings for WALS features and MLP models are built!') model = CombineWalsTargetWords(model_opt.wals_model, encoder, decoder, MLP2WALSHiddenSize_Target, EmbeddingFeatures, FeatureValues, FeatureTypes, SimulationLanguages, model_opt) print( "Model created: concatenates WALS features from the target language to target words embeddings." ) elif model_opt.wals_model == 'WalstoTarget_Both': MLP2WALSHiddenSize_Both = build_mlp2walshiddensize_both( model_opt, FTInfos) print('Embeddings for WALS features and MLP models are built!') model = CombineWalsTargetWords(model_opt.wals_model, encoder, decoder, MLP2WALSHiddenSize_Both, EmbeddingFeatures, FeatureValues, FeatureTypes, SimulationLanguages, model_opt) print( "Model created: concatenates WALS features from the source and target languages to target words embeddings." ) elif model_opt.wals_model == 'WalsDoublyAttentive_Target': MLPFeatureTypes = nn.ModuleDict(mlp_dic_keys) MLP_AttentionTarget = build_doublyattentive_target(model_opt) print('Embeddings for WALS features and MLP models are built!') model = WalsDoublyAttention(model_opt.wals_model, encoder, decoder, MLP_AttentionTarget, MLPFeatureTypes, EmbeddingFeatures, FeatureValues, FeatureTypes, SimulationLanguages, model_opt) print( "Model created: the WALS features from the target language are incorporated as an additional attention mechanism." ) elif model_opt.wals_model == 'WalsDoublyAttentive_Both': MLPFeatureTypes = nn.ModuleDict(mlp_dic_keys) MLP_AttentionBoth = build_doublyattentive_both(model_opt) print('Embeddings for WALS features and MLP models are built!') model = WalsDoublyAttention(model_opt.wals_model, encoder, decoder, MLP_AttentionBoth, MLPFeatureTypes, EmbeddingFeatures, FeatureValues, FeatureTypes, SimulationLanguages, model_opt) print( "Model created: the WALS features from the source and target languages are incorporated as an additional attention mechanism." ) elif model_opt.wals_model == 'WalstoDecHidden_Target': MLP2WALSHiddenSize_Target = build_mlp2walshiddensize_target( model_opt, FTInfos) print('Embeddings for WALS features and MLP models are built!') model = WalstoDecHidden(model_opt.wals_model, encoder, decoder, MLP2WALSHiddenSize_Target, EmbeddingFeatures, FeatureValues, FeatureTypes, SimulationLanguages, model_opt) print( "Model created: concatenates WALS features from the target language to decoder hidden state." ) elif model_opt.wals_model == 'WalstoDecHidden_Both': MLP2WALSHiddenSize_Both = build_mlp2walshiddensize_both( model_opt, FTInfos) print('Embeddings for WALS features and MLP models are built!') model = WalstoDecHidden(model_opt.wals_model, encoder, decoder, MLP2WALSHiddenSize_Both, EmbeddingFeatures, FeatureValues, FeatureTypes, SimulationLanguages, model_opt) print( "Model created: concatenates WALS features from the source and target languages to decoder hidden state." ) else: raise Exception("WALS model type not yet implemented: %s" % (opt.wals_model)) model.model_type = model_opt.model_type # Build Generator. if not model_opt.copy_attn: if model_opt.generator_function == "sparsemax": gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1) else: gen_func = nn.LogSoftmax(dim=-1) generator = nn.Sequential( nn.Linear(model_opt.rnn_size, len(fields["tgt"].vocab)), gen_func) if model_opt.share_decoder_embeddings: generator[0].weight = decoder.embeddings.word_lut.weight else: generator = CopyGenerator(model_opt.rnn_size, fields["tgt"].vocab) # Load the model states from checkpoint or initialize them. if checkpoint is not None: model.load_state_dict(checkpoint['model']) generator.load_state_dict(checkpoint['generator']) else: if model_opt.param_init != 0.0: for p in model.parameters(): p.data.uniform_(-model_opt.param_init, model_opt.param_init) for p in generator.parameters(): p.data.uniform_(-model_opt.param_init, model_opt.param_init) if model_opt.param_init_glorot: for p in model.parameters(): if p.dim() > 1: xavier_uniform_(p) for p in generator.parameters(): if p.dim() > 1: xavier_uniform_(p) if hasattr(model.encoder, 'embeddings'): model.encoder.embeddings.load_pretrained_vectors( model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc) if hasattr(model.decoder, 'embeddings'): model.decoder.embeddings.load_pretrained_vectors( model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec) # Add generator to model (this registers it as parameter of model). model.generator = generator model.to(device) return model
def build_base_model(model_opt, fields, gpu, checkpoint=None): """ Args: model_opt: the option loaded from checkpoint. fields: `Field` objects for the model. gpu(bool): whether to use gpu. checkpoint: the model gnerated by train phase, or a resumed snapshot model from a stopped training. Returns: the NMTModel. """ assert model_opt.model_type in ["text", "img", "audio", "vector"], \ ("Unsupported model type %s" % (model_opt.model_type)) use_src_directly_for_dec = False # Build encoder. if model_opt.model_type == "text": src_dict = fields["src"].vocab feature_dicts = inputters.collect_feature_vocabs(fields, 'src') src_embeddings = build_embeddings(model_opt, src_dict, feature_dicts) encoder = build_encoder(model_opt, src_embeddings) elif model_opt.model_type == "img": if ("image_channel_size" not in model_opt.__dict__): image_channel_size = 3 else: image_channel_size = model_opt.image_channel_size encoder = ImageEncoder(model_opt.enc_layers, model_opt.brnn, model_opt.enc_rnn_size, model_opt.dropout, image_channel_size) elif model_opt.model_type == "audio": encoder = AudioEncoder(model_opt.rnn_type, model_opt.enc_layers, model_opt.dec_layers, model_opt.brnn, model_opt.enc_rnn_size, model_opt.dec_rnn_size, model_opt.audio_enc_pooling, model_opt.dropout, model_opt.sample_rate, model_opt.window_size) elif model_opt.model_type == "vector": use_src_directly_for_dec = True if not hasattr(fields["src"], 'vocab'): fields["src"].vocab = fields["tgt"].vocab src_dict = fields["src"].vocab #self.word_lut.weight.requires_grad = False feature_dicts = inputters.collect_feature_vocabs(fields, 'src') tgt_embeddings = build_embeddings(model_opt, src_dict, feature_dicts) if model_opt.encoder_type == "rnn" or model_opt.encoder_type == "brnn": encoder = RNNEncoder(model_opt.rnn_type, model_opt.brnn, model_opt.enc_layers, model_opt.enc_rnn_size, model_opt.dropout, None, model_opt.bridge) tgt_embeddings = None elif model_opt.decoder_type == "cnn": use_src_directly_for_dec = False encoder = CNNEncoder(model_opt.enc_layers, model_opt.enc_rnn_size, model_opt.cnn_kernel_width, model_opt.dropout, None) tgt_embeddings = None else: encoder = None # Build decoder. tgt_dict = fields["tgt"].vocab feature_dicts = inputters.collect_feature_vocabs(fields, 'tgt') if model_opt.model_type != "vector": tgt_embeddings = build_embeddings(model_opt, tgt_dict, feature_dicts, for_encoder=False) # else: # tgt_embeddings = None # Share the embedding matrix - preprocess with share_vocab required. if model_opt.share_embeddings: # src/tgt vocab should be the same if `-share_vocab` is specified. if src_dict != tgt_dict: raise AssertionError('The `-share_vocab` should be set during ' 'preprocess if you use share_embeddings!') tgt_embeddings.word_lut.weight = src_embeddings.word_lut.weight decoder = build_decoder(model_opt, tgt_embeddings) # Build NMTModel(= encoder + decoder). device = torch.device("cuda" if gpu else "cpu") if model_opt.decoder_type.startswith("vecdif"): model = onmt.models.VecModel( encoder, decoder, use_src_directly_for_dec=use_src_directly_for_dec) else: model = onmt.models.NMTModel( encoder, decoder, use_src_directly_for_dec=use_src_directly_for_dec) # Build Generator. if not model_opt.copy_attn: if model_opt.generator_function == "sparsemax": gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1) elif model_opt.generator_function == "sigmoid": gen_func = nn.Sigmoid() else: gen_func = nn.LogSoftmax(dim=-1) if model_opt.model_type == "vector": if model_opt.generator_function == "none": # if model_opt.final_vec_size != model_opt.dec_rnn_size: # generator = nn.Sequential( # nn.Linear(model_opt.dec_rnn_size, model_opt.final_vec_size)) # else: generator = None else: generator = nn.Sequential( nn.Linear(model_opt.dec_rnn_size, model_opt.final_vec_size), gen_func) else: generator = nn.Sequential( nn.Linear(model_opt.dec_rnn_size, len(fields["tgt"].vocab)), gen_func) if model_opt.share_decoder_embeddings: generator[0].weight = decoder.embeddings.word_lut.weight else: generator = CopyGenerator(model_opt.dec_rnn_size, fields["tgt"].vocab) # Load the model states from checkpoint or initialize them. if checkpoint is not None: model.load_state_dict(checkpoint['model'], strict=False) if generator is not None: generator.load_state_dict(checkpoint['generator'], strict=False) else: if model_opt.param_init != 0.0: for p in model.parameters(): p.data.uniform_(-model_opt.param_init, model_opt.param_init) if generator is not None: for p in generator.parameters(): p.data.uniform_(-model_opt.param_init, model_opt.param_init) if model_opt.param_init_glorot: for p in model.parameters(): if p.dim() > 1: xavier_uniform_(p) if generator is not None: for p in generator.parameters(): if p.dim() > 1: xavier_uniform_(p) if hasattr(model.encoder, 'embeddings') and model_opt.model_type != "vector": model.encoder.embeddings.load_pretrained_vectors( model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc) if hasattr(model.decoder, 'embeddings') and model_opt.model_type != "vector": model.decoder.embeddings.load_pretrained_vectors( model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec) # Add generator to model (this registers it as parameter of model). model.generator = generator model.to(device) return model
def build_base_model(model_opt, fields, gpu, checkpoint=None): """ Args: model_opt: the option loaded from checkpoint. fields: `Field` objects for the model. gpu(bool): whether to use gpu. checkpoint: the model gnerated by train phase, or a resumed snapshot model from a stopped training. Returns: the NMTModel. """ assert model_opt.model_type in ["text", "img", "audio"], \ ("Unsupported model type %s" % (model_opt.model_type)) # for backward compatibility if model_opt.rnn_size != -1: model_opt.enc_rnn_size = model_opt.rnn_size model_opt.dec_rnn_size = model_opt.rnn_size if model_opt.model_type == 'text' and \ model_opt.enc_rnn_size != model_opt.dec_rnn_size: raise AssertionError("""We do not support different encoder and decoder rnn sizes for translation now.""") # Build encoder. logger.info('Building encoder......') if model_opt.model_type == "text": src_dict = fields["src"].vocab feature_dicts = inputters.collect_feature_vocabs(fields, 'src') src_embeddings = build_embeddings(model_opt, src_dict, feature_dicts) encoder = build_encoder(model_opt, src_embeddings) elif model_opt.model_type == "img": if ("image_channel_size" not in model_opt.__dict__): image_channel_size = 3 else: image_channel_size = model_opt.image_channel_size encoder = ImageEncoder(model_opt.enc_layers, model_opt.brnn, model_opt.enc_rnn_size, model_opt.dropout, image_channel_size) elif model_opt.model_type == "audio": encoder = AudioEncoder(model_opt.rnn_type, model_opt.enc_layers, model_opt.dec_layers, model_opt.brnn, model_opt.enc_rnn_size, model_opt.dec_rnn_size, model_opt.audio_enc_pooling, model_opt.dropout, model_opt.sample_rate, model_opt.window_size) # Build decoder. logger.info('Building decoder......') tgt_dict = fields["tgt"].vocab feature_dicts = inputters.collect_feature_vocabs(fields, 'tgt') tgt_embeddings = build_embeddings(model_opt, tgt_dict, feature_dicts, for_encoder=False) # Share the embedding matrix - preprocess with share_vocab required. if model_opt.share_embeddings: # src/tgt vocab should be the same if `-share_vocab` is specified. if src_dict != tgt_dict: raise AssertionError('The `-share_vocab` should be set during ' 'preprocess if you use share_embeddings!') logger.info('** Sharing word embedding matrix between src/tgt') tgt_embeddings.word_lut.weight = src_embeddings.word_lut.weight elif src_dict == tgt_dict: logger.info('WARNING: NOT SHARING WORD EMBEDDINGS FOR TIED VOCAB???') exit(0) decoder = build_decoder(model_opt, tgt_embeddings) # Build NMTModel(= encoder + decoder). logger.info('Building NMTModel......') device = torch.device("cuda" if gpu else "cpu") model = onmt.models.NMTModel(encoder, decoder) # Build Generator. logger.info('Building generator......') # (standard generator) # # Given final hidden state (after attention) at t-th decoding step, return # # s_t = log(softmax(W h_t + b)) # # where W is optionally tied to the decoder word embedding matrix. if not model_opt.copy_attn: if model_opt.generator_function == "sparsemax": gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1) else: gen_func = nn.LogSoftmax(dim=-1) generator = nn.Sequential( nn.Linear(model_opt.dec_rnn_size, len(fields["tgt"].vocab)), gen_func ) if model_opt.share_decoder_embeddings: logger.info('** Sharing generator softmax with tgt word embedding') generator[0].weight = decoder.embeddings.word_lut.weight else: logger.info('WARNING: NOT SHARING GENERATOR SOFTMAX WITH TGT WORD ' 'EMBEDDING MATRIX - IS THERE A GOOD REASON?') else: generator = CopyGenerator(model_opt.dec_rnn_size, fields["tgt"].vocab) # Load the model states from checkpoint or initialize them. if checkpoint is not None: logger.info('Loade model states from checkpoint......') model.load_state_dict(checkpoint['model'], strict=False) generator.load_state_dict(checkpoint['generator'], strict=False) else: logger.info('Initializing parameters......') if not model_opt.param_init_glorot: logger.info('WARNING: NOT USING XAVIER INITIALIZATION? WILL JUST ' 'USE UNIF(+- %.2f)' % (model_opt.param_init)) if model_opt.param_init != 0.0: for p in model.parameters(): p.data.uniform_(-model_opt.param_init, model_opt.param_init) for p in generator.parameters(): p.data.uniform_(-model_opt.param_init, model_opt.param_init) if model_opt.param_init_glorot: for p in model.parameters(): if p.dim() > 1: xavier_uniform_(p) for p in generator.parameters(): if p.dim() > 1: xavier_uniform_(p) if hasattr(model.encoder, 'embeddings'): if model_opt.pre_word_vecs_enc: logger.info('** Using pretrained encoder word embeddings') model.encoder.embeddings.load_pretrained_vectors( model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc) if hasattr(model.decoder, 'embeddings'): if model_opt.pre_word_vecs_dec: logger.info('** Using pretrained decoder word embeddings') model.decoder.embeddings.load_pretrained_vectors( model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec) # Add generator to model (this registers it as parameter of model). model.generator = generator model.to(device) return model
def build_base_model(model_opt, fields, gpu, checkpoint=None): """ Args: model_opt: the option loaded from checkpoint. fields: `Field` objects for the model. gpu(bool): whether to use gpu. checkpoint: the model gnerated by train phase, or a resumed snapshot model from a stopped training. Returns: the NMTModel. """ assert model_opt.model_type in [ "text", "img", "audio", ], "Unsupported model type %s" % (model_opt.model_type) # for backward compatibility if model_opt.rnn_size != -1: model_opt.enc_rnn_size = model_opt.rnn_size model_opt.dec_rnn_size = model_opt.rnn_size if (model_opt.model_type == "text" and model_opt.enc_rnn_size != model_opt.dec_rnn_size): raise AssertionError("""We do not support different encoder and decoder rnn sizes for translation now.""") # Build encoder. if model_opt.model_type == "text": src_dict = fields["src"].vocab feature_dicts = inputters.collect_feature_vocabs(fields, "src") src_embeddings = build_embeddings(model_opt, src_dict, feature_dicts) encoder = build_encoder(model_opt, src_embeddings) elif model_opt.model_type == "img": if "image_channel_size" not in model_opt.__dict__: image_channel_size = 3 else: image_channel_size = model_opt.image_channel_size encoder = ImageEncoder( model_opt.enc_layers, model_opt.brnn, model_opt.enc_rnn_size, model_opt.dropout, image_channel_size, ) elif model_opt.model_type == "audio": encoder = AudioEncoder( model_opt.rnn_type, model_opt.enc_layers, model_opt.dec_layers, model_opt.brnn, model_opt.enc_rnn_size, model_opt.dec_rnn_size, model_opt.audio_enc_pooling, model_opt.dropout, model_opt.sample_rate, model_opt.window_size, ) # Build decoder. tgt_dict = fields["tgt"].vocab feature_dicts = inputters.collect_feature_vocabs(fields, "tgt") tgt_embeddings = build_embeddings(model_opt, tgt_dict, feature_dicts, for_encoder=False) # Share the embedding matrix - preprocess with share_vocab required. if model_opt.share_embeddings: # src/tgt vocab should be the same if `-share_vocab` is specified. if src_dict != tgt_dict: raise AssertionError("The `-share_vocab` should be set during " "preprocess if you use share_embeddings!") tgt_embeddings.word_lut.weight = src_embeddings.word_lut.weight decoder = build_decoder(model_opt, tgt_embeddings) # Build NMTModel(= encoder + decoder). device = torch.device("cuda" if gpu else "cpu") model = onmt.models.NMTModel(encoder, decoder) # The generator function only matters at translation time, so it is not # necessary to create it here anymore. At translation time, the model's # model_opt will still have a value for generator_function or loss_alpha. # This is sufficient to figure out what function to use at translation # time. generator = nn.Sequential( nn.Linear(model_opt.dec_rnn_size, len(fields["tgt"].vocab))) if model_opt.share_decoder_embeddings: generator[0].weight = decoder.embeddings.word_lut.weight # Load the model states from checkpoint or initialize them. if checkpoint is not None: # This preserves backward-compat for models using customed layernorm def fix_key(s): s = re.sub(r"(.*)\.layer_norm((_\d+)?)\.b_2", r"\1.layer_norm\2.bias", s) s = re.sub(r"(.*)\.layer_norm((_\d+)?)\.a_2", r"\1.layer_norm\2.weight", s) return s checkpoint["model"] = { fix_key(k): v for (k, v) in checkpoint["model"].items() } # end of patch for backward compatibility model.load_state_dict(checkpoint["model"], strict=False) generator.load_state_dict(checkpoint["generator"], strict=False) else: if model_opt.param_init != 0.0: for p in model.parameters(): p.data.uniform_(-model_opt.param_init, model_opt.param_init) for p in generator.parameters(): p.data.uniform_(-model_opt.param_init, model_opt.param_init) if model_opt.param_init_glorot: for p in model.parameters(): if p.dim() > 1: xavier_uniform_(p) for p in generator.parameters(): if p.dim() > 1: xavier_uniform_(p) if hasattr(model.encoder, "embeddings"): model.encoder.embeddings.load_pretrained_vectors( model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc) if hasattr(model.decoder, "embeddings"): model.decoder.embeddings.load_pretrained_vectors( model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec) # Add generator to model (this registers it as parameter of model). model.generator = generator model.to(device) return model