def construct_state_saving_rnn(cell,
                               inputs,
                               num_label_columns,
                               state_saver,
                               state_name,
                               scope='rnn'):
  """Build a state saving RNN and apply a fully connected layer.

  Args:
    cell: An instance of `RNNCell`.
    inputs: A length `T` list of inputs, each a `Tensor` of shape
      `[batch_size, input_size, ...]`.
    num_label_columns: The desired output dimension.
    state_saver: A state saver object with methods `state` and `save_state`.
    state_name: Python string or tuple of strings.  The name to use with the
      state_saver. If the cell returns tuples of states (i.e.,
      `cell.state_size` is a tuple) then `state_name` should be a tuple of
      strings having the same length as `cell.state_size`.  Otherwise it should
      be a single string.
    scope: `VariableScope` for the created subgraph; defaults to "rnn".

  Returns:
    activations: The output of the RNN, projected to `num_label_columns`
      dimensions, a `Tensor` of shape `[batch_size, T, num_label_columns]`.
    final_state: The final state output by the RNN
  """
  with ops.name_scope(scope):
    rnn_outputs, final_state = core_rnn.static_state_saving_rnn(
        cell=cell,
        inputs=inputs,
        state_saver=state_saver,
        state_name=state_name,
        scope=scope)
    # Convert rnn_outputs from a list of time-major order Tensors to a single
    # Tensor of batch-major order.
    rnn_outputs = array_ops.stack(rnn_outputs, axis=1)
    activations = layers.fully_connected(
        inputs=rnn_outputs,
        num_outputs=num_label_columns,
        activation_fn=None,
        trainable=True)
    # Use `identity` to rename `final_state`.
    final_state = array_ops.identity(
        final_state, name=rnn_common.RNNKeys.FINAL_STATE_KEY)
    return activations, final_state
Ejemplo n.º 2
0
def construct_state_saving_rnn(cell,
                               inputs,
                               num_label_columns,
                               state_saver,
                               state_name,
                               scope='rnn'):
    """Build a state saving RNN and apply a fully connected layer.

  Args:
    cell: An instance of `RNNCell`.
    inputs: A length `T` list of inputs, each a `Tensor` of shape
      `[batch_size, input_size, ...]`.
    num_label_columns: The desired output dimension.
    state_saver: A state saver object with methods `state` and `save_state`.
    state_name: Python string or tuple of strings.  The name to use with the
      state_saver. If the cell returns tuples of states (i.e.,
      `cell.state_size` is a tuple) then `state_name` should be a tuple of
      strings having the same length as `cell.state_size`.  Otherwise it should
      be a single string.
    scope: `VariableScope` for the created subgraph; defaults to "rnn".

  Returns:
    activations: The output of the RNN, projected to `num_label_columns`
      dimensions, a `Tensor` of shape `[batch_size, T, num_label_columns]`.
    final_state: The final state output by the RNN
  """
    with ops.name_scope(scope):
        rnn_outputs, final_state = core_rnn.static_state_saving_rnn(
            cell=cell,
            inputs=inputs,
            state_saver=state_saver,
            state_name=state_name,
            scope=scope)
        # Convert rnn_outputs from a list of time-major order Tensors to a single
        # Tensor of batch-major order.
        rnn_outputs = array_ops.stack(rnn_outputs, axis=1)
        activations = layers.fully_connected(inputs=rnn_outputs,
                                             num_outputs=num_label_columns,
                                             activation_fn=None,
                                             trainable=True)
        # Use `identity` to rename `final_state`.
        final_state = array_ops.identity(final_state,
                                         name=RNNKeys.FINAL_STATE_KEY)
        return activations, final_state