Ejemplo n.º 1
0
    def test_gru(self):
        """Test invoking GRU in eager mode."""
        with context.eager_mode():
            with tfe.IsolateTest():
                batch_size = 10
                n_hidden = 7
                in_channels = 4
                n_steps = 6
                input = np.random.rand(batch_size, n_steps,
                                       in_channels).astype(np.float32)
                layer = layers.GRU(n_hidden, batch_size)
                result, state = layer(input)
                assert result.shape == (batch_size, n_steps, n_hidden)
                assert len(layer.variables) == 4

                # Creating a second layer should produce different results, since it has
                # different random weights.

                layer2 = layers.GRU(n_hidden, batch_size)
                result2, state2 = layer2(input)
                assert not np.allclose(result, result2)

                # But evaluating the first layer again should produce the same result as before.

                result3, state3 = layer(input)
                assert np.allclose(result, result3)

                # But if we specify a different starting state, that should produce a
                # different result.

                result4, state4 = layer(input, initial_state=state3)
                assert not np.allclose(result, result4)
Ejemplo n.º 2
0
 def _create_decoder(self, n_layers, dropout):
   """Create the decoder layers."""
   prev_layer = layers.Repeat(
       self._max_output_length, in_layers=self.embedding)
   for i in range(n_layers):
     if dropout > 0.0:
       prev_layer = layers.Dropout(dropout, in_layers=prev_layer)
     prev_layer = layers.GRU(
         self._embedding_dimension, self.batch_size, in_layers=prev_layer)
   return layers.Dense(
       len(self._output_tokens),
       in_layers=prev_layer,
       activation_fn=tf.nn.softmax)
Ejemplo n.º 3
0
 def _create_encoder(self, n_layers, dropout):
   """Create the encoder layers."""
   prev_layer = self._features
   for i in range(n_layers):
     if dropout > 0.0:
       prev_layer = layers.Dropout(dropout, in_layers=prev_layer)
     prev_layer = layers.GRU(
         self._embedding_dimension, self.batch_size, in_layers=prev_layer)
   prev_layer = layers.Gather(in_layers=[prev_layer, self._gather_indices])
   if self._variational:
     self._embedding_mean = layers.Dense(
         self._embedding_dimension, in_layers=prev_layer)
     self._embedding_stddev = layers.Dense(
         self._embedding_dimension, in_layers=prev_layer)
     prev_layer = layers.CombineMeanStd(
         [self._embedding_mean, self._embedding_stddev], training_only=True)
   return prev_layer