def rnn_logit_fn(features, mode): """Recurrent Neural Network logit_fn. Args: features: This is the first item returned from the `input_fn` passed to `train`, `evaluate`, and `predict`. This should be a single `Tensor` or `dict` of same. mode: Optional. Specifies if this training, evaluation or prediction. See `ModeKeys`. Returns: A tuple of `Tensor` objects representing the logits and the sequence length mask. """ with ops.name_scope('sequence_input_layer'): sequence_input, sequence_length = fc.SequenceFeatures( sequence_feature_columns)(features) summary.histogram('sequence_length', sequence_length) if context_feature_columns: context_input = fc.DenseFeatures(context_feature_columns)( features) sequence_input = fc.concatenate_context_input( context_input, sequence_input=sequence_input) # Ignore output state. sequence_length_mask = array_ops.sequence_mask(sequence_length) rnn_layer = rnn_layer_fn() rnn_outputs = rnn_layer(sequence_input, mask=sequence_length_mask, training=(mode == model_fn.ModeKeys.TRAIN)) logits = keras_layers.Dense(units=output_units, name='logits')(rnn_outputs) return logits, sequence_length_mask
def call(self, inputs, training=None): """Computes the RNN output. By default no activation is applied and the logits are returned. To output probabilites an activation needs to be specified such as sigmoid or softmax. Args: inputs: A dict mapping keys to input tensors. training: Python boolean indicating whether the layers should behave in training mode or in inference mode. This argument is passed to the model's layers. This is for instance used with cells that use dropout. Returns: A `Tensor` with logits from RNN model. It has shape (batch_size, time_step, logits_size) if `return_sequence` is `True`, (batch_size, logits_size) otherwise. """ if not isinstance(inputs, dict): raise ValueError('inputs should be a dictionary of `Tensor`s. ' 'Given type: {}'.format(type(inputs))) with ops.name_scope('sequence_input_layer'): try: sequence_input, sequence_length = self._sequence_features_layer( inputs, training=training) except TypeError: sequence_input, sequence_length = self._sequence_features_layer( inputs) tf.compat.v1.summary.histogram('sequence_length', sequence_length) if self._context_feature_columns: try: context_input = self._dense_features_layer( inputs, training=training) except TypeError: context_input = self._dense_features_layer(inputs) sequence_input = fc.concatenate_context_input( context_input, sequence_input=sequence_input) sequence_length_mask = tf.sequence_mask(sequence_length) rnn_outputs = self._rnn_layer(sequence_input, mask=sequence_length_mask, training=training) logits = self._logits_layer(rnn_outputs) if self._return_sequences: # Passes sequence mask as `_keras_mask` to be used in Keras model for # loss and metrics aggregation to exclude padding in the sequential case. logits._keras_mask = sequence_length_mask # pylint: disable=protected-access return logits