def _build(self, batch_size): src_time_dim = 4 vocab_size = 7 emb = Embeddings(embedding_dim=self.emb_size, vocab_size=vocab_size, padding_idx=self.pad_index) encoder = RecurrentEncoder(emb_size=self.emb_size, num_layers=self.num_layers, hidden_size=self.encoder_hidden_size, bidirectional=True) decoder = RecurrentDecoder(hidden_size=self.hidden_size, encoder=encoder, attention="bahdanau", emb_size=self.emb_size, vocab_size=self.vocab_size, num_layers=self.num_layers, init_hidden="bridge", input_feeding=True) encoder_output = torch.rand(size=(batch_size, src_time_dim, encoder.output_size)) for p in decoder.parameters(): torch.nn.init.uniform_(p, -0.5, 0.5) src_mask = torch.ones(size=(batch_size, 1, src_time_dim)) == 1 encoder_hidden = torch.rand(size=(batch_size, encoder.output_size)) return src_mask, emb, decoder, encoder_output, encoder_hidden
def setUp(self): self.emb_size = 10 self.num_layers = 3 self.hidden_size = 6 self.encoder_hidden_size = 3 self.vocab_size = 5 seed = 42 torch.manual_seed(seed) bidi_encoder = RecurrentEncoder(emb_size=self.emb_size, num_layers=self.num_layers, hidden_size=self.encoder_hidden_size, bidirectional=True) uni_encoder = RecurrentEncoder(emb_size=self.emb_size, num_layers=self.num_layers, hidden_size=self.encoder_hidden_size*2, bidirectional=False) self.encoders = [uni_encoder, bidi_encoder]
def setUp(self): self.addTypeEqualityFunc( torch.Tensor, lambda x, y, msg: self.failureException(msg) if not torch.equal(x, y) else True) self.emb_size = 10 self.num_layers = 3 self.hidden_size = 7 self.vocab_size = 5 seed = 42 torch.manual_seed(seed) bidi_encoder = RecurrentEncoder(emb_size=self.emb_size, num_layers=self.num_layers, hidden_size=self.hidden_size, bidirectional=True) uni_encoder = RecurrentEncoder(emb_size=self.emb_size, num_layers=self.num_layers, hidden_size=self.hidden_size, bidirectional=False) self.encoders = [uni_encoder, bidi_encoder]
def test_recurrent_encoder_size(self): for bidirectional in [True, False]: directional_factor = 2 if bidirectional else 1 encoder = RecurrentEncoder(hidden_size=self.hidden_size, emb_size=self.emb_size, num_layers=self.num_layers, bidirectional=bidirectional) self.assertEqual(encoder.rnn.hidden_size, self.hidden_size) # output size is affected by bidirectionality self.assertEqual(encoder.output_size, self.hidden_size * directional_factor) self.assertEqual(encoder.rnn.bidirectional, bidirectional)
def build_model(cfg: dict = None, src_vocab: Vocabulary = None, trg_vocab: Vocabulary = None) -> Model: """ Build and initialize the model according to the configuration. :param cfg: dictionary configuration containing model specifications :param src_vocab: source vocabulary :param trg_vocab: target vocabulary :return: built and initialized model """ src_padding_idx = src_vocab.stoi[PAD_TOKEN] trg_padding_idx = trg_vocab.stoi[PAD_TOKEN] src_embed = Embeddings(**cfg["encoder"]["embeddings"], vocab_size=len(src_vocab), padding_idx=src_padding_idx) if cfg.get("tied_embeddings", False): if src_vocab.itos == trg_vocab.itos: # share embeddings for src and trg trg_embed = src_embed else: raise ConfigurationError( "Embedding cannot be tied since vocabularies differ.") else: trg_embed = Embeddings(**cfg["decoder"]["embeddings"], vocab_size=len(trg_vocab), padding_idx=trg_padding_idx) encoder = RecurrentEncoder(**cfg["encoder"], emb_size=src_embed.embedding_dim) decoder = RecurrentDecoder(**cfg["decoder"], encoder=encoder, vocab_size=len(trg_vocab), emb_size=trg_embed.embedding_dim) model = Model(encoder=encoder, decoder=decoder, src_embed=src_embed, trg_embed=trg_embed, src_vocab=src_vocab, trg_vocab=trg_vocab) # custom initialization of model parameters initialize_model(model, cfg, src_padding_idx, trg_padding_idx) return model
def test_recurrent_forward(self): time_dim = 4 batch_size = 2 bidirectional = True directions = 2 if bidirectional else 1 encoder = RecurrentEncoder(emb_size=self.emb_size, num_layers=self.num_layers, hidden_size=self.hidden_size, bidirectional=bidirectional) x = torch.rand(size=(batch_size, time_dim, self.emb_size)) # no padding, no mask x_length = torch.Tensor([time_dim]*batch_size).int() mask = torch.ones_like(x) output, hidden, _ = encoder( embed_src=x, src_length=x_length, mask=mask) self.assertEqual(output.shape, torch.Size( [batch_size, time_dim, directions*self.hidden_size])) self.assertEqual(hidden.shape, torch.Size( [batch_size, directions*self.hidden_size])) hidden_target = torch.Tensor( [[0.1323, 0.0125, 0.2900, -0.0725, -0.0102, -0.4405, 0.1226, -0.3333, -0.3186, -0.2411, 0.1790, 0.1281, 0.0739, -0.0536], [0.1431, 0.0085, 0.2828, -0.0933, -0.0139, -0.4525, 0.0946, -0.3279, -0.3001, -0.2223, 0.2023, 0.0708, 0.0131, -0.0124]]) output_target = torch.Tensor( [[[[ 0.0041, 0.0324, 0.0846, -0.0056, 0.0353, -0.2528, 0.0289, -0.3333, -0.3186, -0.2411, 0.1790, 0.1281, 0.0739, -0.0536], [ 0.0159, 0.0248, 0.1496, -0.0176, 0.0457, -0.3839, 0.0780, -0.3137, -0.2731, -0.2310, 0.1866, 0.0758, 0.0366, -0.0069], [ 0.0656, 0.0168, 0.2182, -0.0391, 0.0214, -0.4389, 0.1100, -0.2625, -0.1970, -0.2249, 0.1374, 0.0337, 0.0139, 0.0284], [ 0.1323, 0.0125, 0.2900, -0.0725, -0.0102, -0.4405, 0.1226, -0.1649, -0.1023, -0.1823, 0.0712, 0.0039, -0.0228, 0.0444]], [[ 0.0296, 0.0254, 0.1007, -0.0225, 0.0207, -0.2612, 0.0061, -0.3279, -0.3001, -0.2223, 0.2023, 0.0708, 0.0131, -0.0124], [ 0.0306, 0.0096, 0.1566, -0.0386, 0.0387, -0.3958, 0.0556, -0.3034, -0.2701, -0.2165, 0.2061, 0.0364, -0.0012, 0.0184], [ 0.0842, 0.0075, 0.2181, -0.0696, 0.0121, -0.4389, 0.0874, -0.2432, -0.1979, -0.2168, 0.1519, 0.0066, -0.0080, 0.0485], [ 0.1431, 0.0085, 0.2828, -0.0933, -0.0139, -0.4525, 0.0946, -0.1608, -0.1140, -0.1646, 0.0796, -0.0202, -0.0207, 0.0379]]]]) self.assertTensorAlmostEqual(hidden_target, hidden) self.assertTensorAlmostEqual(output_target, output)
def build_model(cfg: dict = None, src_vocab: Vocabulary = None, trg_vocab: Vocabulary = None): src_padding_idx = src_vocab.stoi[PAD_TOKEN] trg_padding_idx = trg_vocab.stoi[PAD_TOKEN] src_embed = Embeddings(**cfg["encoder"]["embeddings"], vocab_size=len(src_vocab), padding_idx=src_padding_idx) if cfg.get("tied_embeddings", False) \ and src_vocab.itos == trg_vocab.itos: # share embeddings for src and trg trg_embed = src_embed else: trg_embed = Embeddings(**cfg["decoder"]["embeddings"], vocab_size=len(trg_vocab), padding_idx=trg_padding_idx) encoder = RecurrentEncoder(**cfg["encoder"], emb_size=src_embed.embedding_dim) decoder = RecurrentDecoder(**cfg["decoder"], encoder=encoder, vocab_size=len(trg_vocab), emb_size=trg_embed.embedding_dim) model = Model(encoder=encoder, decoder=decoder, src_embed=src_embed, trg_embed=trg_embed, src_vocab=src_vocab, trg_vocab=trg_vocab) # custom initialization of model parameters initialize_model(model, cfg, src_padding_idx, trg_padding_idx) return model
def test_recurrent_freeze(self): encoder = RecurrentEncoder(freeze=True) for n, p in encoder.named_parameters(): self.assertFalse(p.requires_grad)
def test_recurrent_input_dropout(self): drop_prob = 0.5 encoder = RecurrentEncoder(dropout=drop_prob) input_tensor = torch.Tensor([2, 3, 1, -1]) encoder.train() dropped = encoder.rnn_input_dropout(input=input_tensor) # eval switches off dropout encoder.eval() no_drop = encoder.rnn_input_dropout(input=input_tensor) # when dropout is applied, remaining values are divided by drop_prob self.assertGreaterEqual((no_drop - (drop_prob * dropped)).abs().sum(), 0) drop_prob = 1.0 encoder = RecurrentEncoder(dropout=drop_prob) all_dropped = encoder.rnn_input_dropout(input=input_tensor) self.assertEqual(all_dropped.sum(), 0) encoder.eval() none_dropped = encoder.rnn_input_dropout(input=input_tensor) self.assertTensorEqual(no_drop, none_dropped) self.assertTensorEqual((no_drop - all_dropped), no_drop)
def test_recurrent_encoder_type(self): valid_rnn_types = {"gru": GRU, "lstm": LSTM} for name, obj in valid_rnn_types.items(): encoder = RecurrentEncoder(rnn_type=name) self.assertEqual(type(encoder.rnn), obj)
def build_model(cfg: dict = None, src_vocab: Vocabulary = None, trg_vocab: Vocabulary = None) -> Model: """ Build and initialize the model according to the configuration. :param cfg: dictionary configuration containing model specifications :param src_vocab: source vocabulary :param trg_vocab: target vocabulary :return: built and initialized model """ src_padding_idx = src_vocab.stoi[PAD_TOKEN] trg_padding_idx = trg_vocab.stoi[PAD_TOKEN] # TODO if continue-us src_embed = PretrainedEmbeddings(src_vocab, trg_vocab, **cfg["encoder"]["embeddings"], vocab_size=len(src_vocab), padding_idx=src_padding_idx) # this ties source and target embeddings # for softmax layer tying, see further below if cfg.get("tied_embeddings", False): if src_vocab.itos == trg_vocab.itos: # share embeddings for src and trg trg_embed = src_embed else: raise ConfigurationError( "Embedding cannot be tied since vocabularies differ.") else: src_embed = PretrainedEmbeddings(src_vocab, trg_vocab, **cfg["encoder"]["embeddings"], vocab_size=len(src_vocab), padding_idx=src_padding_idx) # build encoder enc_dropout = cfg["encoder"].get("dropout", 0.) enc_emb_dropout = cfg["encoder"]["embeddings"].get("dropout", enc_dropout) if cfg["encoder"].get("type", "recurrent") == "transformer": assert cfg["encoder"]["embeddings"]["embedding_dim"] == \ cfg["encoder"]["hidden_size"], \ "for transformer, emb_size must be hidden_size" encoder = TransformerEncoder(**cfg["encoder"], emb_size=src_embed.embedding_dim, emb_dropout=enc_emb_dropout) else: encoder = RecurrentEncoder(**cfg["encoder"], emb_size=src_embed.embedding_dim, emb_dropout=enc_emb_dropout) # build decoder dec_dropout = cfg["decoder"].get("dropout", 0.) dec_emb_dropout = cfg["decoder"]["embeddings"].get("dropout", dec_dropout) if cfg["decoder"].get("type", "recurrent") == "transformer": decoder = TransformerDecoder(**cfg["decoder"], encoder=encoder, vocab_size=len(trg_vocab), emb_size=trg_embed.embedding_dim, emb_dropout=dec_emb_dropout) else: decoder = RecurrentDecoder(**cfg["decoder"], encoder=encoder, vocab_size=len(trg_vocab), emb_size=trg_embed.embedding_dim, emb_dropout=dec_emb_dropout) model = Model(encoder=encoder, decoder=decoder, src_embed=src_embed, trg_embed=trg_embed, src_vocab=src_vocab, trg_vocab=trg_vocab) # tie softmax layer with trg embeddings """ if cfg.get("tied_softmax", False): if trg_embed.lut.weight.shape == \ model.decoder.output_layer.weight.shape: # (also) share trg embeddings and softmax layer: model.decoder.output_layer.weight = trg_embed.lut.weight else: raise ConfigurationError( "For tied_softmax, the decoder embedding_dim and decoder " "hidden_size must be the same." "The decoder must be a Transformer." f"shapes: output_layer.weight: {model.decoder.output_layer.weight.shape}; target_embed.lut.weight:{trg_embed.lut.weight.shape}") """ # custom initialization of model parameters initialize_model(model, cfg, src_padding_idx, trg_padding_idx) return model
def build_model(cfg: dict = None, src_vocab: Vocabulary = None, trg_vocab: Vocabulary = None, trv_vocab: Vocabulary = None, canonizer=None) -> Model: """ Build and initialize the model according to the configuration. :param cfg: dictionary configuration containing model specifications :param src_vocab: source vocabulary :param trg_vocab: target vocabulary :param trv_vocab: kb true value lookup vocabulary :return: built and initialized model """ src_padding_idx = src_vocab.stoi[PAD_TOKEN] trg_padding_idx = trg_vocab.stoi[PAD_TOKEN] if "embedding_files" in cfg.keys(): #init from pretrained assert not cfg.get( "tied_embeddings", False ), "TODO implement tied embeddings along with pretrained initialization" raise NotImplementedError( "TODO implement kbsrc embed loading for embedding files") weight_tensors = [] for weight_file in cfg["embedding_files"]: with open(weight_file, "r") as f: weight = [] for line in f.readlines(): line = line.split() line = [float(x) for x in line] weight.append(line) weight = FloatTensor(weight) weight_tensors.append(weight) # Set source Embeddings to Pretrained Embeddings src_embed = Embeddings( int(weight_tensors[0][0].shape[0]), False, #TODO transformer: change to True len(weight_tensors[0]), ) src_embed.lut.weight.data = weight_tensors[0] # Set target Embeddings to Pretrained Embeddings trg_embed = Embeddings( int(weight_tensors[1][0].shape[0]), False, #TODO transformer: change to True len(weight_tensors[1]), ) trg_embed.lut.weight.data = weight_tensors[1] else: src_embed = Embeddings(**cfg["encoder"]["embeddings"], vocab_size=len(src_vocab), padding_idx=src_padding_idx) if cfg.get("kb_embed_separate", False): kbsrc_embed = Embeddings(**cfg["encoder"]["embeddings"], vocab_size=len(src_vocab), padding_idx=src_padding_idx) else: kbsrc_embed = src_embed # this ties source and target embeddings # for softmax layer tying, see further below if cfg.get("tied_embeddings", False): if src_vocab.itos == trg_vocab.itos: # share embeddings for src and trg trg_embed = src_embed else: raise ConfigurationError( "Embedding cannot be tied since vocabularies differ.") else: # Latest TODO: init embeddings with vocab_size = len(trg_vocab joined with kb_vocab) trg_embed = Embeddings(**cfg["decoder"]["embeddings"], vocab_size=len(trg_vocab), padding_idx=trg_padding_idx) # build encoder enc_dropout = cfg["encoder"].get("dropout", 0.) enc_emb_dropout = cfg["encoder"]["embeddings"].get("dropout", enc_dropout) if cfg["encoder"].get("type", "recurrent") == "transformer": assert cfg["encoder"]["embeddings"]["embedding_dim"] == \ cfg["encoder"]["hidden_size"], \ "for transformer, emb_size must be hidden_size" encoder = TransformerEncoder(**cfg["encoder"], emb_size=src_embed.embedding_dim, emb_dropout=enc_emb_dropout) else: encoder = RecurrentEncoder(**cfg["encoder"], emb_size=src_embed.embedding_dim, emb_dropout=enc_emb_dropout) # retrieve kb task info kb_task = bool(cfg.get("kb", False)) k_hops = int( cfg.get("k_hops", 1) ) # k number of kvr attention layers in decoder (eric et al/default: 1) same_module_for_all_hops = bool(cfg.get("same_module_for_all_hops", False)) do_postproc = bool(cfg.get("do_postproc", True)) copy_from_source = bool(cfg.get("copy_from_source", True)) canonization_func = None if canonizer is None else canonizer( copy_from_source=copy_from_source) kb_input_feeding = bool(cfg.get("kb_input_feeding", True)) kb_feed_rnn = bool(cfg.get("kb_feed_rnn", True)) kb_multihead_feed = bool(cfg.get("kb_multihead_feed", False)) posEncKBkeys = cfg.get("posEncdKBkeys", False) tfstyletf = cfg.get("tfstyletf", True) infeedkb = bool(cfg.get("infeedkb", False)) outfeedkb = bool(cfg.get("outfeedkb", False)) add_kb_biases_to_output = bool(cfg.get("add_kb_biases_to_output", True)) kb_max_dims = cfg.get("kb_max_dims", (16, 32)) # should be tuple double_decoder = cfg.get("double_decoder", False) tied_side_softmax = cfg.get( "tied_side_softmax", False) # actually use separate linear layers, tying only the main one do_pad_kb_keys = cfg.get( "pad_kb_keys", True ) # doesnt need to be true for 1 hop (=>BIG PERFORMANCE SAVE), needs to be true for >= 2 hops if hasattr(kb_max_dims, "__iter__"): kb_max_dims = tuple(kb_max_dims) else: assert type(kb_max_dims) == int, kb_max_dims kb_max_dims = (kb_max_dims, ) assert cfg["decoder"]["hidden_size"] dec_dropout = cfg["decoder"].get("dropout", 0.) dec_emb_dropout = cfg["decoder"]["embeddings"].get("dropout", dec_dropout) if cfg["decoder"].get("type", "recurrent") == "transformer": if tfstyletf: decoder = TransformerDecoder( **cfg["decoder"], encoder=encoder, vocab_size=len(trg_vocab), emb_size=trg_embed.embedding_dim, emb_dropout=dec_emb_dropout, kb_task=kb_task, kb_key_emb_size=kbsrc_embed.embedding_dim, feed_kb_hidden=kb_input_feeding, infeedkb=infeedkb, outfeedkb=outfeedkb, double_decoder=double_decoder) else: decoder = TransformerKBrnnDecoder( **cfg["decoder"], encoder=encoder, vocab_size=len(trg_vocab), emb_size=trg_embed.embedding_dim, emb_dropout=dec_emb_dropout, kb_task=kb_task, k_hops=k_hops, kb_max=kb_max_dims, same_module_for_all_hops=same_module_for_all_hops, kb_key_emb_size=kbsrc_embed.embedding_dim, kb_input_feeding=kb_input_feeding, kb_feed_rnn=kb_feed_rnn, kb_multihead_feed=kb_multihead_feed) else: if not kb_task: decoder = RecurrentDecoder(**cfg["decoder"], encoder=encoder, vocab_size=len(trg_vocab), emb_size=trg_embed.embedding_dim, emb_dropout=dec_emb_dropout) else: decoder = KeyValRetRNNDecoder( **cfg["decoder"], encoder=encoder, vocab_size=len(trg_vocab), emb_size=trg_embed.embedding_dim, emb_dropout=dec_emb_dropout, k_hops=k_hops, kb_max=kb_max_dims, same_module_for_all_hops=same_module_for_all_hops, kb_key_emb_size=kbsrc_embed.embedding_dim, kb_input_feeding=kb_input_feeding, kb_feed_rnn=kb_feed_rnn, kb_multihead_feed=kb_multihead_feed, do_pad_kb_keys=do_pad_kb_keys) # specify generator which is mostly just the output layer generator = Generator(dec_hidden_size=cfg["decoder"]["hidden_size"], vocab_size=len(trg_vocab), add_kb_biases_to_output=add_kb_biases_to_output, double_decoder=double_decoder) model = Model( encoder=encoder, decoder=decoder, generator=generator, src_embed=src_embed, trg_embed=trg_embed, src_vocab=src_vocab, trg_vocab=trg_vocab,\ kb_key_embed=kbsrc_embed,\ trv_vocab=trv_vocab, k_hops=k_hops, do_postproc=do_postproc, canonize=canonization_func, kb_att_dims=len(kb_max_dims), posEncKBkeys=posEncKBkeys ) # tie softmax layer with trg embeddings if cfg.get("tied_softmax", False): if trg_embed.lut.weight.shape == \ model.generator.output_layer.weight.shape: # (also) share trg embeddings and softmax layer: model.generator.output_layer.weight = trg_embed.lut.weight if model.generator.double_decoder: # (also also) share trg embeddings and side softmax layer assert hasattr(model.generator, "side_output_layer") if tied_side_softmax: # because of distributivity this becomes O (x_1+x_2) instead of O_1 x_1 + O_2 x_2 model.generator.side_output_layer.weight = trg_embed.lut.weight else: raise ConfigurationError( "For tied_softmax, the decoder embedding_dim and decoder " "hidden_size must be the same." "The decoder must be a Transformer.") # custom initialization of model parameters initialize_model(model, cfg, src_padding_idx, trg_padding_idx) return model
def build_model(cfg: dict = None, src_vocab: Vocabulary = None, trg_vocab: Vocabulary = None) -> Model: """ Build and initialize the model according to the configuration. :param cfg: dictionary configuration containing model specifications :param src_vocab: source vocabulary :param trg_vocab: target vocabulary :return: built and initialized model """ logger.info("Building an encoder-decoder model...") src_padding_idx = src_vocab.stoi[PAD_TOKEN] trg_padding_idx = trg_vocab.stoi[PAD_TOKEN] src_embed = Embeddings(**cfg["encoder"]["embeddings"], vocab_size=len(src_vocab), padding_idx=src_padding_idx) # this ties source and target embeddings # for softmax layer tying, see further below if cfg.get("tied_embeddings", False): if src_vocab.itos == trg_vocab.itos: # share embeddings for src and trg trg_embed = src_embed else: raise ConfigurationError( "Embedding cannot be tied since vocabularies differ.") else: trg_embed = Embeddings(**cfg["decoder"]["embeddings"], vocab_size=len(trg_vocab), padding_idx=trg_padding_idx) # build encoder enc_dropout = cfg["encoder"].get("dropout", 0.) enc_emb_dropout = cfg["encoder"]["embeddings"].get("dropout", enc_dropout) if cfg["encoder"].get("type", "recurrent") == "transformer": assert cfg["encoder"]["embeddings"]["embedding_dim"] == \ cfg["encoder"]["hidden_size"], \ "for transformer, emb_size must be hidden_size" encoder = TransformerEncoder(**cfg["encoder"], emb_size=src_embed.embedding_dim, emb_dropout=enc_emb_dropout) else: encoder = RecurrentEncoder(**cfg["encoder"], emb_size=src_embed.embedding_dim, emb_dropout=enc_emb_dropout) # build decoder dec_dropout = cfg["decoder"].get("dropout", 0.) dec_emb_dropout = cfg["decoder"]["embeddings"].get("dropout", dec_dropout) if cfg["decoder"].get("type", "recurrent") == "transformer": decoder = TransformerDecoder(**cfg["decoder"], encoder=encoder, vocab_size=len(trg_vocab), emb_size=trg_embed.embedding_dim, emb_dropout=dec_emb_dropout) else: decoder = RecurrentDecoder(**cfg["decoder"], encoder=encoder, vocab_size=len(trg_vocab), emb_size=trg_embed.embedding_dim, emb_dropout=dec_emb_dropout) model = Model(encoder=encoder, decoder=decoder, src_embed=src_embed, trg_embed=trg_embed, src_vocab=src_vocab, trg_vocab=trg_vocab) # tie softmax layer with trg embeddings if cfg.get("tied_softmax", False): if trg_embed.lut.weight.shape == \ model.decoder.output_layer.weight.shape: # (also) share trg embeddings and softmax layer: model.decoder.output_layer.weight = trg_embed.lut.weight else: raise ConfigurationError( "For tied_softmax, the decoder embedding_dim and decoder " "hidden_size must be the same." "The decoder must be a Transformer.") # custom initialization of model parameters initialize_model(model, cfg, src_padding_idx, trg_padding_idx) # initialize embeddings from file pretrained_enc_embed_path = cfg["encoder"]["embeddings"].get( "load_pretrained", None) pretrained_dec_embed_path = cfg["decoder"]["embeddings"].get( "load_pretrained", None) if pretrained_enc_embed_path: logger.info("Loading pretraind src embeddings...") model.src_embed.load_from_file(pretrained_enc_embed_path, src_vocab) if pretrained_dec_embed_path and not cfg.get("tied_embeddings", False): logger.info("Loading pretraind trg embeddings...") model.trg_embed.load_from_file(pretrained_dec_embed_path, trg_vocab) logger.info("Enc-dec model built.") return model
def build_unsupervised_nmt_model( cfg: dict = None, src_vocab: Vocabulary = None, trg_vocab: Vocabulary = None) -> UnsupervisedNMTModel: """ Build an UnsupervisedNMTModel. :param cfg: model configuration :param src_vocab: Vocabulary for the src language :param trg_vocab: Vocabulary for the trg language :return: Unsupervised NMT model as specified in cfg """ src_padding_idx = src_vocab.stoi[PAD_TOKEN] trg_padding_idx = trg_vocab.stoi[PAD_TOKEN] # build source and target embedding layers # embeddings in the encoder are pretrained and stay fixed loaded_src_embed = PretrainedEmbeddings(**cfg["encoder"]["embeddings"], vocab_size=len(src_vocab), padding_idx=src_padding_idx, vocab=src_vocab, freeze=True) loaded_trg_embed = PretrainedEmbeddings(**cfg["decoder"]["embeddings"], vocab_size=len(trg_vocab), padding_idx=trg_padding_idx, vocab=trg_vocab, freeze=True) # embeddings in the decoder are randomly initialised and will be learned src_embed = Embeddings(**cfg["encoder"]["embeddings"], vocab_size=len(src_vocab), padding_idx=src_padding_idx, freeze=False) trg_embed = Embeddings(**cfg["decoder"]["embeddings"], vocab_size=len(trg_vocab), padding_idx=trg_padding_idx, freeze=False) # build shared encoder enc_dropout = cfg["encoder"].get("dropout", 0.) enc_emb_dropout = cfg["encoder"]["embeddings"].get("dropout", enc_dropout) if cfg["encoder"].get("type", "recurrent") == "transformer": assert cfg["encoder"]["embeddings"]["embedding_dim"] == \ cfg["encoder"]["hidden_size"], \ "for transformer, emb_size must be hidden_size" shared_encoder = TransformerEncoder(**cfg["encoder"], emb_size=src_embed.embedding_dim, emb_dropout=enc_emb_dropout) else: shared_encoder = RecurrentEncoder(**cfg["encoder"], emb_size=src_embed.embedding_dim, emb_dropout=enc_emb_dropout) # build src and trg language decoder dec_dropout = cfg["decoder"].get("dropout", 0.) dec_emb_dropout = cfg["decoder"]["embeddings"].get("dropout", dec_dropout) if cfg["decoder"].get("type", "recurrent") == "transformer": src_decoder = TransformerDecoder(**cfg["decoder"], encoder=shared_encoder, vocab_size=len(src_vocab), emb_size=src_embed.embedding_dim, emb_dropout=dec_emb_dropout) trg_decoder = TransformerDecoder(**cfg["decoder"], encoder=shared_encoder, vocab_size=len(trg_vocab), emb_size=trg_embed.embedding_dim, emb_dropout=dec_emb_dropout) else: src_decoder = RecurrentDecoder(**cfg["decoder"], encoder=shared_encoder, vocab_size=len(src_vocab), emb_size=src_embed.embedding_dim, emb_dropout=dec_emb_dropout) trg_decoder = RecurrentDecoder(**cfg["decoder"], encoder=shared_encoder, vocab_size=len(trg_vocab), emb_size=trg_embed.embedding_dim, emb_dropout=dec_emb_dropout) # build unsupervised NMT model model = UnsupervisedNMTModel(loaded_src_embed, loaded_trg_embed, src_embed, trg_embed, shared_encoder, src_decoder, trg_decoder, src_vocab, trg_vocab) # initialise model # embed_initializer should be none so loaded encoder embeddings won't be overwritten initialize_model(model.src2src_translator, cfg, src_padding_idx, src_padding_idx) initialize_model(model.src2trg_translator, cfg, src_padding_idx, trg_padding_idx) initialize_model(model.trg2src_translator, cfg, trg_padding_idx, src_padding_idx) initialize_model(model.trg2src_translator, cfg, trg_padding_idx, trg_padding_idx) return model