def __init__(self, reducer=SumReducer(), **kwargs):
        """Initializes the position encoder.

    Args:
      reducer: A :class:`opennmt.layers.Reducer` to merge inputs and position
        encodings.
      **kwargs: Additional layer keyword arguments.
    """
        super(PositionEncoder, self).__init__(**kwargs)
        self.reducer = reducer
Esempio n. 2
0
    def __init__(self, maximum_position=128, reducer=SumReducer()):
        """Initializes the position encoder.

    Args:
      maximum_position: The maximum position to embed. Positions greater
        than this value will be set to :obj:`maximum_position`.
      reducer: A :class:`opennmt.layers.reducer.Reducer` to merge inputs and
        position encodings.
    """
        super(PositionEmbedder, self).__init__(reducer=reducer)
        self.maximum_position = maximum_position
Esempio n. 3
0
    def __init__(self, reducer=None, **kwargs):
        """Initializes the position encoder.

    Args:
      reducer: A :class:`opennmt.layers.Reducer` to merge inputs and position
        encodings. Defaults to :class:`opennmt.layers.SumReducer`.
      **kwargs: Additional layer keyword arguments.
    """
        super(PositionEncoder, self).__init__(**kwargs)
        if reducer is None:
            reducer = SumReducer(dtype=kwargs.get("dtype"))
        self.reducer = reducer
Esempio n. 4
0
    def __init__(self,
                 num_layers,
                 num_units,
                 reducer=SumReducer(),
                 cell_class=None,
                 dropout=0.3,
                 residual_connections=False):
        """Initializes the parameters of the encoder.

    Args:
      num_layers: The number of layers.
      num_units: The number of units in each layer.
      reducer: A :class:`opennmt.layers.reducer.Reducer` instance to merge
        bidirectional state and outputs.
      cell_class: The inner cell class or a callable taking :obj:`num_units` as
        argument and returning a cell. Defaults to a LSTM cell.
      dropout: The probability to drop units in each layer output.
      residual_connections: If ``True``, each layer input will be added to its
        output.

    Raises:
      ValueError: when using :class:`opennmt.layers.reducer.ConcatReducer` and
        :obj:`num_units` is not divisible by 2.
    """
        if isinstance(reducer, ConcatReducer):
            if num_units % 2 != 0:
                raise ValueError(
                    "num_units must be divisible by 2 to use the ConcatReducer."
                )
            num_units /= 2

        self.reducer = reducer

        super(BidirectionalRNNEncoder,
              self).__init__(num_layers,
                             num_units,
                             cell_class=cell_class,
                             dropout=dropout,
                             residual_connections=residual_connections)
Esempio n. 5
0
 def __init__(self, reducer=SumReducer()):
     self.reducer = reducer
Esempio n. 6
0
 def __init__(self, reducer=SumReducer()):
     super(PositionEncoder, self).__init__()
     self.reducer = reducer