예제 #1
0
    def __init__(self, position_size, hparams=None):
        EmbedderBase.__init__(self, hparams=hparams)

        self._num_embeds = position_size
        self._dim = self._hparams.dim
        self._cache_embeddings = self._hparams.cache_embeddings

        num_timescales = self._dim // 2
        min_timescale = self._hparams.min_timescale
        max_timescale = self._hparams.max_timescale

        log_timescale_increment = math.log(
            float(max_timescale) /
            float(min_timescale)) / (tf.cast(num_timescales, tf.float32) - 1)
        num_range = tf.range(num_timescales, dtype=tf.float32)
        inv_timescales = min_timescale * tf.exp(
            num_range * -log_timescale_increment)
        self.inv_timescales = inv_timescales

        if self._cache_embeddings:
            if position_size is None:
                raise ValueError("'position_size' must not be None when "
                                 "'cache_embeddings' is set to True")

            positions = tf.range(position_size, dtype=tf.float32)
            signal = self._compute_embeddings(positions)
            self.signal = signal
예제 #2
0
    def __init__(self, init_value=None, position_size=None, hparams=None):
        EmbedderBase.__init__(self, hparams=hparams)

        if init_value is None and position_size is None:
            raise ValueError(
                "Either `init_value` or `position_size` is required.")

        self._init_parameterized_embedding(init_value, position_size,
                                           self._hparams)

        self._position_size = position_size
        if position_size is None:
            self._position_size = self._num_embeds
        if self._position_size != self._num_embeds:
            raise ValueError('position_size must equal to init_value.shape[0].'
                             'Got %d and %d' %
                             (self._position_size, self._num_embeds))

        self._built = True
예제 #3
0
    def __init__(self, position_size, hparams=None):
        EmbedderBase.__init__(self, hparams=hparams)

        dim = self._hparams.dim
        num_timescales = dim // 2
        min_timescale = self._hparams.min_timescale
        max_timescale = self._hparams.max_timescale

        positions = tf.to_float(tf.range(position_size, dtype=tf.int32))
        log_timescale_increment = (
            math.log(float(max_timescale) / float(min_timescale)) /
            (tf.to_float(num_timescales) - 1))
        inv_timescales = min_timescale * tf.exp(
            tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
        scaled_time = tf.expand_dims(positions, 1) \
            * tf.expand_dims(inv_timescales, 0)
        signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
        signal = tf.pad(signal, [[0, 0], [0, tf.mod(dim, 2)]])
        self.signal = signal
예제 #4
0
    def __init__(self, init_value: Optional[torch.Tensor] = None,
                 position_size: Optional[int] = None, hparams=None):

        if init_value is None and position_size is None:
            raise ValueError(
                "Either `init_value` or `position_size` is required.")

        EmbedderBase.__init__(self, init_value=init_value,
                              num_embeds=position_size, hparams=hparams)

        self._position_size = position_size
        if position_size is None:
            self._position_size = self._num_embeds
        if self._position_size != self._num_embeds:
            raise ValueError(
                f"position_size must be equal to init_value.shape[0]. "
                f"Got {self._position_size} and {self._num_embeds}")

        self._built = True
        self._dropout_layer = EmbeddingDropout(self._hparams.dropout_rate)
    def __init__(self, init_value=None, vocab_size=None, hparams=None):

        if init_value is None and vocab_size is None:
            raise ValueError(
                "Either `init_value` or `vocab_size` is required.")

        EmbedderBase.__init__(self,
                              init_value=init_value,
                              num_embeds=vocab_size,
                              hparams=hparams)

        self._vocab_size = vocab_size
        if vocab_size is None:
            self._vocab_size = self._num_embeds
        if self._vocab_size != self._num_embeds:
            raise ValueError('vocab_size must equal to init_value.shape[0].'
                             'Got %d and %d' %
                             (self._vocab_size, self._num_embeds))

        self._built = True
        self._dropout_layer = EmbeddingDropout(self._hparams.dropout_rate)
예제 #6
0
 def __init__(self, hparams=None):
     EmbedderBase.__init__(self, hparams=hparams)