Exemplo n.º 1
0
    def __init__(self,
                 encoders,
                 outputs_reducer=ConcatReducer(axis=1),
                 states_reducer=JoinReducer(),
                 outputs_layer_fn=None,
                 combined_output_layer_fn=None,
                 share_parameters=False):
        """Initializes the parameters of the encoder.

    Args:
      encoders: A list of :class:`opennmt.encoders.encoder.Encoder` or a single
        one, in which case the same encoder is applied to each input.
      outputs_reducer: A :class:`opennmt.layers.reducer.Reducer` to merge all
        outputs. If ``None``, defaults to
        :class:`opennmt.layers.reducer.JoinReducer`.
      states_reducer: A :class:`opennmt.layers.reducer.Reducer` to merge all
        states. If ``None``, defaults to
        :class:`opennmt.layers.reducer.JoinReducer`.
      outputs_layer_fn: A callable or list of callables applied to the
        encoders outputs If it is a single callable, it is on each encoder
        output. Otherwise, the ``i`` th callable is applied on encoder ``i``
        output.
      combined_output_layer_fn: A callable to apply on the combined output
        (i.e. the output of :obj:`outputs_reducer`).
      share_parameters: If ``True``, share parameters between the parallel
        encoders. For stateful encoders, simply pass a single encoder instance
        to :obj:`encoders` for parameter sharing.

    Raises:
      ValueError: if :obj:`outputs_layer_fn` is a list with a size not equal
        to the number of encoders.
    """
        if (isinstance(encoders, list) and outputs_layer_fn is not None
                and isinstance(outputs_layer_fn, list)
                and len(outputs_layer_fn) != len(encoders)):
            raise ValueError(
                "The number of output layers must match the number of encoders; "
                "expected %d layers but got %d." %
                (len(encoders), len(outputs_layer_fn)))
        super(ParallelEncoder, self).__init__()
        self.encoders = encoders
        self.outputs_reducer = outputs_reducer if outputs_reducer is not None else JoinReducer(
        )
        self.states_reducer = states_reducer if states_reducer is not None else JoinReducer(
        )
        self.outputs_layer_fn = outputs_layer_fn
        self.combined_output_layer_fn = combined_output_layer_fn
        self.share_parameters = share_parameters
Exemplo n.º 2
0
    def __init__(self,
                 encoders,
                 states_reducer=JoinReducer(),
                 transition_layer_fn=None):
        """Initializes the parameters of the encoder.

    Args:
      encoders: A list of :class:`opennmt.encoders.Encoder`.
      states_reducer: A :class:`opennmt.layers.Reducer` to merge all
        states.
      transition_layer_fn: A callable or list of callables applied to the
        output of an encoder before passing it as input to the next. If it is a
        single callable, it is applied between every encoders. Otherwise, the
        ``i`` th callable will be applied between encoders ``i`` and ``i + 1``.

    Raises:
      ValueError: if :obj:`transition_layer_fn` is a list with a size not equal
        to the number of encoder transitions ``len(encoders) - 1``.
    """
        if (transition_layer_fn is not None
                and isinstance(transition_layer_fn, list)
                and len(transition_layer_fn) != len(encoders) - 1):
            raise ValueError(
                "The number of transition layers must match the number of encoder "
                "transitions, expected %d layers but got %d." %
                (len(encoders) - 1, len(transition_layer_fn)))
        super(SequentialEncoder, self).__init__()
        self.encoders = encoders
        self.states_reducer = states_reducer
        self.transition_layer_fn = transition_layer_fn
Exemplo n.º 3
0
    def __init__(self,
                 num_layers,
                 num_units,
                 reduction_factor=2,
                 cell_class=None,
                 dropout=0.3):
        """Initializes the parameters of the encoder.

    Args:
      num_layers: The number of layers.
      num_units: The number of units in each layer.
      reduction_factor: The time reduction factor.
      cell_class: The inner cell class or a callable taking :obj:`num_units` as
        argument and returning a cell. Defaults to a LSTM cell.
      dropout: The probability to drop units in each layer output.
    """
        super(PyramidalRNNEncoder, self).__init__()
        self.reduction_factor = reduction_factor
        self.state_reducer = JoinReducer()
        self.layers = []

        for _ in range(num_layers):
            self.layers.append(
                BidirectionalRNNEncoder(1,
                                        num_units,
                                        reducer=ConcatReducer(),
                                        cell_class=cell_class,
                                        dropout=dropout))
Exemplo n.º 4
0
    def encode(self,
               inputs,
               sequence_length=None,
               mode=tf.estimator.ModeKeys.TRAIN):
        inputs = tf.layers.dropout(
            inputs,
            rate=self._dropout,
            training=mode == tf.estimator.ModeKeys.TRAIN)

        states = []
        for i, layer in enumerate(self._layers):
            with tf.variable_scope("layer_%d" % i):
                outputs, state, sequence_length = layer.encode(
                    inputs, sequence_length=sequence_length, mode=mode)
                outputs = tf.layers.dropout(
                    outputs,
                    rate=self._dropout,
                    training=mode == tf.estimator.ModeKeys.TRAIN)
                inputs = outputs + inputs if i >= 2 else outputs
                states.append(state)

        with tf.variable_scope("projection"):
            projected = tf.layers.dense(inputs, self._num_units)
        state = JoinReducer()(states)
        return (projected, state, sequence_length)
Exemplo n.º 5
0
    def __init__(self, encoders, states_reducer=JoinReducer()):
        """Initializes the parameters of the encoder.

    Args:
      encoders: A list of :class:`opennmt.encoders.encoder.Encoder`.
      states_reducer: A :class:`opennmt.layers.reducer.Reducer` to merge all
        states.
    """
        self.encoders = encoders
        self.states_reducer = states_reducer
Exemplo n.º 6
0
    def encode(self,
               inputs,
               sequence_length=None,
               mode=tf.estimator.ModeKeys.TRAIN):
        encoder_outputs, bidirectional_state, sequence_length = self.bidirectional.encode(
            inputs, sequence_length=sequence_length, mode=mode)
        encoder_outputs, unidirectional_state, sequence_length = self.unidirectional.encode(
            encoder_outputs, sequence_length=sequence_length, mode=mode)

        encoder_state = JoinReducer()(
            [bidirectional_state, unidirectional_state])

        return (encoder_outputs, encoder_state, sequence_length)
Exemplo n.º 7
0
    def __init__(self,
                 encoders,
                 outputs_reducer=ConcatReducer(axis=1),
                 states_reducer=JoinReducer(),
                 outputs_layer_fn=None,
                 combined_output_layer_fn=None):
        """Initializes the parameters of the encoder.

    Args:
      encoders: A list of :class:`opennmt.encoders.encoder.Encoder`.
      outputs_reducer: A :class:`opennmt.layers.reducer.Reducer` to merge all
        outputs.
      states_reducer: A :class:`opennmt.layers.reducer.Reducer` to merge all
        states.
      outputs_layer_fn: A callable or list of callables applied to the
        encoders outputs If it is a single callable, it is on each encoder
        output. Otherwise, the ``i`` th callable is applied on encoder ``i``
        output.
      combined_output_layer_fn: A callable to apply on the combined output
        (i.e. the output of :obj:`outputs_reducer`).

    Raises:
      ValueError: if :obj:`outputs_layer_fn` is a list with a size not equal
        to the number of encoders.
    """
        if (outputs_layer_fn is not None
                and isinstance(outputs_layer_fn, list)
                and len(outputs_layer_fn) != len(encoders)):
            raise ValueError(
                "The number of output layers must match the number of encoders; "
                "expected %d layers but got %d." %
                (len(encoders), len(outputs_layer_fn)))
        self.encoders = encoders
        self.outputs_reducer = outputs_reducer
        self.states_reducer = states_reducer
        self.outputs_layer_fn = outputs_layer_fn
        self.combined_output_layer_fn = combined_output_layer_fn