Exemple #1
0
    def test_py_lstm_mask(self):
        model = DefaultTranslator(
            src_reader=self.src_reader,
            trg_reader=self.trg_reader,
            src_embedder=SimpleWordEmbedder(self.exp_global, vocab_size=100),
            encoder=PyramidalLSTMSeqTransducer(self.exp_global, layers=1),
            attender=MlpAttender(self.exp_global),
            trg_embedder=SimpleWordEmbedder(self.exp_global, vocab_size=100),
            decoder=MlpSoftmaxDecoder(self.exp_global, vocab_size=100),
        )

        batcher = xnmt.batcher.TrgBatcher(batch_size=3)
        train_src, _ = \
          batcher.pack(self.src_data, self.trg_data)

        self.set_train(True)
        for sent_i in range(3):
            dy.renew_cg()
            src = train_src[sent_i]
            self.start_sent(src)
            embeddings = model.src_embedder.embed_sent(src)
            encodings = model.encoder(embeddings)
            if train_src[sent_i].mask is None:
                assert encodings.mask is None
            else:
                np.testing.assert_array_almost_equal(
                    train_src[sent_i].mask.np_arr, encodings.mask.np_arr)
Exemple #2
0
 def test_py_lstm_encoder_len(self):
     layer_dim = 512
     model = DefaultTranslator(
         src_reader=self.src_reader,
         trg_reader=self.trg_reader,
         src_embedder=SimpleWordEmbedder(emb_dim=layer_dim, vocab_size=100),
         encoder=PyramidalLSTMSeqTransducer(input_dim=layer_dim,
                                            hidden_dim=layer_dim,
                                            layers=3),
         attender=MlpAttender(input_dim=layer_dim,
                              state_dim=layer_dim,
                              hidden_dim=layer_dim),
         trg_embedder=SimpleWordEmbedder(emb_dim=layer_dim, vocab_size=100),
         decoder=MlpSoftmaxDecoder(input_dim=layer_dim,
                                   lstm_dim=layer_dim,
                                   mlp_hidden_dim=layer_dim,
                                   trg_embed_dim=layer_dim,
                                   vocab_size=100),
     )
     self.set_train(True)
     for sent_i in range(10):
         dy.renew_cg()
         src = self.src_data[sent_i].get_padded_sent(
             Vocab.ES, 4 - (len(self.src_data[sent_i]) % 4))
         self.start_sent(src)
         embeddings = model.src_embedder.embed_sent(src)
         encodings = model.encoder(embeddings)
         self.assertEqual(int(math.ceil(len(embeddings) / float(4))),
                          len(encodings))