Exemple #1
0
 def __init__(self):
     super().__init__(
         inputter=inputters.MixedInputter(
             [
                 inputters.WordEmbedder(embedding_size=100),
                 inputters.CharConvEmbedder(
                     embedding_size=30,
                     num_outputs=30,
                     kernel_size=3,
                     stride=1,
                     dropout=0.5,
                 ),
             ],
             dropout=0.5,
         ),
         encoder=encoders.RNNEncoder(
             num_layers=1,
             num_units=400,
             bidirectional=True,
             dropout=0.5,
             residual_connections=False,
             cell_class=tf.keras.layers.LSTMCell,
         ),
         crf_decoding=True,
     )
Exemple #2
0
 def testBidirectionalRNNEncoder(self, cell_class):
     encoder = encoders.RNNEncoder(3, 20, bidirectional=True, cell_class=cell_class)
     inputs = tf.random.uniform([4, 5, 10])
     lengths = tf.constant([4, 3, 5, 2])
     outputs, states, _ = encoder(inputs, sequence_length=lengths, training=True)
     self.assertListEqual(outputs.shape.as_list(), [4, 5, 40])
     self.assertEqual(len(states), 3)
Exemple #3
0
 def __init__(self):
     super(LuongAttention, self).__init__(
         source_inputter=inputters.WordEmbedder(embedding_size=512),
         target_inputter=inputters.WordEmbedder(embedding_size=512),
         encoder=encoders.RNNEncoder(num_layers=4,
                                     num_units=1000,
                                     dropout=0.2,
                                     residual_connections=False,
                                     cell_class=tf.keras.layers.LSTMCell),
         decoder=decoders.AttentionalRNNDecoder(
             num_layers=4,
             num_units=1000,
             bridge_class=layers.CopyBridge,
             attention_mechanism_class=tfa.seq2seq.LuongAttention,
             cell_class=tf.keras.layers.LSTMCell,
             dropout=0.2,
             residual_connections=False))
Exemple #4
0
 def __init__(self):
     # pylint: disable=bad-continuation
     super(LstmCnnCrfTagger, self).__init__(
         inputter=inputters.MixedInputter([
             inputters.WordEmbedder(embedding_size=100),
             inputters.CharConvEmbedder(embedding_size=30,
                                        num_outputs=30,
                                        kernel_size=3,
                                        stride=1,
                                        dropout=0.5)
         ],
                                          dropout=0.5),
         encoder=encoders.RNNEncoder(num_layers=1,
                                     num_units=400,
                                     bidirectional=True,
                                     dropout=0.5,
                                     residual_connections=False,
                                     cell_class=tf.keras.layers.LSTMCell),
         crf_decoding=True)
Exemple #5
0
 def __init__(self):
     super(NMTMediumV1, self).__init__(
         source_inputter=inputters.WordEmbedder(embedding_size=512),
         target_inputter=inputters.WordEmbedder(embedding_size=512),
         encoder=encoders.RNNEncoder(num_layers=4,
                                     num_units=256,
                                     bidirectional=True,
                                     residual_connections=False,
                                     dropout=0.3,
                                     reducer=layers.ConcatReducer(),
                                     cell_class=tf.keras.layers.LSTMCell),
         decoder=decoders.AttentionalRNNDecoder(
             num_layers=4,
             num_units=512,
             bridge_class=layers.CopyBridge,
             attention_mechanism_class=tfa.seq2seq.LuongAttention,
             attention_layer_activation=None,
             cell_class=tf.keras.layers.LSTMCell,
             dropout=0.3,
             residual_connections=False))