Exemplo n.º 1
0
 def testTrainLanguageModel(self):
     src = test_util.make_data_file(
         os.path.join(self.get_temp_dir(), "src.txt"),
         ["1 2 3 4", "5 6 7 8 9", "3 2"])
     vocab = test_util.make_vocab(
         os.path.join(self.get_temp_dir(), "vocab.txt"),
         list(map(str, range(10))))
     config = {
         "data": {
             "train_features_file": src,
             "vocabulary": vocab,
         },
         "params": {
             "learning_rate": 0.0005,
             "optimizer": "Adam"
         },
         "train": {
             "batch_size": 10,
             "max_step": 2,
         },
     }
     model = models.LanguageModel(decoders.SelfAttentionDecoder(
         2, num_units=32, ffn_inner_dim=32),
                                  embedding_size=16,
                                  reuse_embedding=False)
     runner = Runner(model, config)
     runner.train()
Exemplo n.º 2
0
 def testCreateVariablesLanguageModel(self):
   _, data_config = self._makeToyLMData()
   decoder = decoders.SelfAttentionDecoder(
       2, num_units=16, num_heads=4, ffn_inner_dim=32, num_sources=0)
   model = models.LanguageModel(decoder, embedding_size=16)
   model.initialize(data_config)
   model.create_variables()
   self.assertTrue(len(model.trainable_variables) > 0)
Exemplo n.º 3
0
 def testLanguageModelWithMissingStart(self):
   _, data_config = self._makeToyLMData()
   decoder = decoders.SelfAttentionDecoder(
       2, num_units=16, num_heads=4, ffn_inner_dim=32, num_sources=0)
   model = models.LanguageModel(decoder, embedding_size=16)
   model.initialize(data_config)
   features, _ = model.features_inputter.make_features(tf.constant(""))
   with self.assertRaises(tf.errors.InvalidArgumentError):
     model(features)
Exemplo n.º 4
0
 def testLanguageModelServing(self):
     _, data_config = self._makeToyLMData()
     decoder = decoders.SelfAttentionDecoder(2,
                                             num_units=16,
                                             num_heads=4,
                                             ffn_inner_dim=32,
                                             num_sources=0)
     model = models.LanguageModel(decoder, embedding_size=16)
     model.initialize(data_config)
     function = model.serve_function()
     function.get_concrete_function()
Exemplo n.º 5
0
 def testLanguageModelWithStartOfSentence(self):
   _, data_config = self._makeToyLMData()
   data_config["sequence_controls"] = dict(start=True, end=False)
   decoder = decoders.SelfAttentionDecoder(
       2, num_units=16, num_heads=4, ffn_inner_dim=32, num_sources=0)
   model = models.LanguageModel(decoder, embedding_size=16)
   model.initialize(data_config, params={"maximum_decoding_length": 1})
   features, _ = model.features_inputter.make_features(tf.constant(""))
   features = tf.nest.map_structure(lambda t: tf.expand_dims(t, 0), features)  # Add batch dim.
   _, predictions = self.evaluate(model(features))
   # Predictions should not include the leading <s>.
   self.assertEqual(predictions["length"][0], 1)
   self.assertTupleEqual(predictions["tokens"].shape, (1, 1))
Exemplo n.º 6
0
 def testLanguageModel(self, mode):
   # Mainly test that the code does not throw.
   decoder = decoders.SelfAttentionDecoder(
       2, num_units=16, num_heads=4, ffn_inner_dim=32, num_sources=0)
   model = models.LanguageModel(decoder, embedding_size=16)
   features_file, data_config = self._makeToyLMData()
   params = {
       "optimizer": "SGD",
       "learning_rate": 0.1}
   self._testGenericModel(
       model,
       mode,
       features_file,
       data_config=data_config,
       batch_size=1 if mode == tf.estimator.ModeKeys.PREDICT else 16,
       prediction_heads=["tokens", "length"],
       params=params)