Пример #1
0
    def __init__(self,
                 num_layers,
                 num_units,
                 bidirectional=False,
                 residual_connections=False,
                 dropout=0.3,
                 reducer=ConcatReducer(),
                 cell_class=None,
                 **kwargs):
        """Initializes the parameters of the encoder.

    Args:
      num_layers: The number of layers.
      num_units: The number of units in each layer.
      bidirectional: Use a bidirectional RNN.
      residual_connections: If ``True``, each layer input will be added to its
        output.
      reducer: A :class:`opennmt.layers.reducer.Reducer` instance to merge
        bidirectional state and outputs.
      dropout: The probability to drop units in each layer output.
      cell_class: The inner cell class or a callable taking :obj:`num_units` as
        argument and returning a cell. Defaults to a LSTM cell.
      **kwargs: Additional layer arguments.
    """
        super(RNNEncoderV2, self).__init__(**kwargs)
        cell = rnn.make_rnn_cell(num_layers,
                                 num_units,
                                 dropout=dropout,
                                 residual_connections=residual_connections,
                                 cell_class=cell_class)
        self.rnn = rnn.RNN(cell, bidirectional=bidirectional, reducer=reducer)
Пример #2
0
    def __init__(self,
                 num_layers,
                 num_units,
                 reduction_factor=2,
                 cell_class=None,
                 dropout=0.3):
        """Initializes the parameters of the encoder.

    Args:
      num_layers: The number of layers.
      num_units: The number of units in each layer.
      reduction_factor: The time reduction factor.
      cell_class: The inner cell class or a callable taking :obj:`num_units` as
        argument and returning a cell. Defaults to a LSTM cell.
      dropout: The probability to drop units in each layer output.
    """
        super(PyramidalRNNEncoder, self).__init__()
        self.reduction_factor = reduction_factor
        self.state_reducer = JoinReducer()
        self.layers = []

        for _ in range(num_layers):
            self.layers.append(
                BidirectionalRNNEncoder(1,
                                        num_units,
                                        reducer=ConcatReducer(),
                                        cell_class=cell_class,
                                        dropout=dropout))
Пример #3
0
    def __init__(self, num_layers, num_units, dropout=0.3):
        """Initializes the parameters of the encoder.

    Args:
      num_layers: The number of layers.
      num_units: The number of units in each layer.
      dropout: The probability to drop units in each layer output.

    Raises:
      ValueError: if :obj:`num_layers` < 2.
    """
        super(GoogleRNNEncoder, self).__init__()
        if num_layers < 2:
            raise ValueError("GoogleRNNEncoder requires at least 2 layers")

        self.bidirectional = BidirectionalRNNEncoder(
            1,
            num_units,
            reducer=ConcatReducer(),
            cell_class=tf.nn.rnn_cell.LSTMCell,
            dropout=dropout)
        self.unidirectional = UnidirectionalRNNEncoder(
            num_layers - 1,
            num_units,
            cell_class=tf.nn.rnn_cell.LSTMCell,
            dropout=dropout,
            residual_connections=True)
Пример #4
0
    def __init__(self,
                 num_layers=6,
                 num_units=1024,
                 cell_class=None,
                 dropout=0.3):
        """Initializes the parameters of the encoder.

    Args:
      num_layers: The number of layers.
      num_units: The number of units in each RNN layer and the final output.
      cell_class: The inner cell class or a callable taking :obj:`num_units` as
        argument and returning a cell. Defaults to a layer normalized LSTM cell.
        For efficiency, consider using the standard ``tf.nn.rnn_cell.LSTMCell``
        instead.
      dropout: The probability to drop units in each layer output.
    """
        super(RNMTPlusEncoder, self).__init__()
        if cell_class is None:
            cell_class = tf.contrib.rnn.LayerNormBasicLSTMCell
        self._num_units = num_units
        self._dropout = dropout
        self._layers = [
            BidirectionalRNNEncoder(num_layers=1,
                                    num_units=num_units * 2,
                                    reducer=ConcatReducer(),
                                    cell_class=cell_class,
                                    dropout=0.0) for _ in range(num_layers)
        ]
Пример #5
0
    def __init__(self,
                 num_layers,
                 num_units,
                 bidirectional=False,
                 residual_connections=False,
                 dropout=0.3,
                 reducer=ConcatReducer(),
                 **kwargs):
        """Initializes the parameters of the encoder.

        Args:
          num_layers: The number of layers.
          num_units: The number of units in each layer output.
          bidirectional: Make each LSTM layer bidirectional.
          residual_connections: If ``True``, each layer input will be added to its
            output.
          dropout: The probability to drop units in each layer output.
          reducer: A :class:`opennmt.layers.Reducer` instance to merge
            bidirectional state and outputs.
          **kwargs: Additional layer arguments.
        """
        lstm_layer = rnn.LSTM(
            num_layers,
            num_units,
            bidirectional=bidirectional,
            reducer=reducer,
            dropout=dropout,
            residual_connections=residual_connections,
        )
        super().__init__(lstm_layer, **kwargs)
Пример #6
0
    def __init__(self, inputters, reducer=ConcatReducer(), dropout=0.0):
        """Initializes a mixed inputter.

    Args:
      inputters: A list of :class:`opennmt.inputters.Inputter`.
      reducer: A :class:`opennmt.layers.Reducer` to merge all inputs.
      dropout: The probability to drop units in the merged inputs.
    """
        super(MixedInputter, self).__init__(inputters, reducer=reducer)
        self.dropout = dropout
Пример #7
0
 def __init__(self, maximum_position=32, reducer=ConcatReducer()):
     """Initializes the position encoder.
 Args:
   maximum_position: The maximum position to embed. Positions greater
     than this value will be set to :obj:`maximum_position`.
   reducer: A :class:`opennmt.layers.reducer.Reducer` to merge inputs and
     position encodings.
 """
     super(PositionEmbedder, self).__init__()
     self.maximum_position = maximum_position
     self.embedding = None
Пример #8
0
    def __init__(self,
                 encoders,
                 outputs_reducer=ConcatReducer(axis=1),
                 states_reducer=JoinReducer(),
                 outputs_layer_fn=None,
                 combined_output_layer_fn=None,
                 share_parameters=False):
        """Initializes the parameters of the encoder.

    Args:
      encoders: A list of :class:`opennmt.encoders.encoder.Encoder` or a single
        one, in which case the same encoder is applied to each input.
      outputs_reducer: A :class:`opennmt.layers.reducer.Reducer` to merge all
        outputs. If ``None``, defaults to
        :class:`opennmt.layers.reducer.JoinReducer`.
      states_reducer: A :class:`opennmt.layers.reducer.Reducer` to merge all
        states. If ``None``, defaults to
        :class:`opennmt.layers.reducer.JoinReducer`.
      outputs_layer_fn: A callable or list of callables applied to the
        encoders outputs If it is a single callable, it is on each encoder
        output. Otherwise, the ``i`` th callable is applied on encoder ``i``
        output.
      combined_output_layer_fn: A callable to apply on the combined output
        (i.e. the output of :obj:`outputs_reducer`).
      share_parameters: If ``True``, share parameters between the parallel
        encoders. For stateful encoders, simply pass a single encoder instance
        to :obj:`encoders` for parameter sharing.

    Raises:
      ValueError: if :obj:`outputs_layer_fn` is a list with a size not equal
        to the number of encoders.
    """
        if (isinstance(encoders, list) and outputs_layer_fn is not None
                and isinstance(outputs_layer_fn, list)
                and len(outputs_layer_fn) != len(encoders)):
            raise ValueError(
                "The number of output layers must match the number of encoders; "
                "expected %d layers but got %d." %
                (len(encoders), len(outputs_layer_fn)))
        super(ParallelEncoder, self).__init__()
        self.encoders = encoders
        self.outputs_reducer = outputs_reducer if outputs_reducer is not None else JoinReducer(
        )
        self.states_reducer = states_reducer if states_reducer is not None else JoinReducer(
        )
        self.outputs_layer_fn = outputs_layer_fn
        self.combined_output_layer_fn = combined_output_layer_fn
        self.share_parameters = share_parameters
Пример #9
0
    def __init__(self,
                 encoders,
                 outputs_reducer=ConcatReducer(axis=1),
                 states_reducer=JoinReducer()):
        """Initializes the parameters of the encoder.

    Args:
      encoders: A list of :class:`opennmt.encoders.encoder.Encoder`.
      outputs_reducer: A :class:`opennmt.layers.reducer.Reducer` to merge all
        outputs.
      states_reducer: A :class:`opennmt.layers.reducer.Reducer` to merge all
        states.
    """
        self.encoders = encoders
        self.outputs_reducer = outputs_reducer
        self.states_reducer = states_reducer
Пример #10
0
    def __init__(self,
                 encoders,
                 outputs_reducer=ConcatReducer(axis=1),
                 states_reducer=JoinReducer(),
                 outputs_layer_fn=None,
                 combined_output_layer_fn=None):
        """Initializes the parameters of the encoder.

    Args:
      encoders: A list of :class:`opennmt.encoders.encoder.Encoder`.
      outputs_reducer: A :class:`opennmt.layers.reducer.Reducer` to merge all
        outputs.
      states_reducer: A :class:`opennmt.layers.reducer.Reducer` to merge all
        states.
      outputs_layer_fn: A callable or list of callables applied to the
        encoders outputs If it is a single callable, it is on each encoder
        output. Otherwise, the ``i`` th callable is applied on encoder ``i``
        output.
      combined_output_layer_fn: A callable to apply on the combined output
        (i.e. the output of :obj:`outputs_reducer`).

    Raises:
      ValueError: if :obj:`outputs_layer_fn` is a list with a size not equal
        to the number of encoders.
    """
        if (outputs_layer_fn is not None
                and isinstance(outputs_layer_fn, list)
                and len(outputs_layer_fn) != len(encoders)):
            raise ValueError(
                "The number of output layers must match the number of encoders; "
                "expected %d layers but got %d." %
                (len(encoders), len(outputs_layer_fn)))
        self.encoders = encoders
        self.outputs_reducer = outputs_reducer
        self.states_reducer = states_reducer
        self.outputs_layer_fn = outputs_layer_fn
        self.combined_output_layer_fn = combined_output_layer_fn