示例#1
0
def embedding(x, vocab_size, dense_size, name=None, reuse=None, multiplier=1.0):
  """Embed x of type int64 into dense vectors, reducing to max 4 dimensions."""
  with tf.variable_scope(
      name, default_name="embedding", values=[x], reuse=reuse):
    embedding_var = tf.get_variable("kernel", [vocab_size, dense_size])
    # On the backwards pass, we want to convert the gradient from
    # an indexed-slices to a regular tensor before sending it back to the
    # parameter server. This avoids excess computation on the parameter server.
    embedding_var = eu.ConvertGradientToTensor(embedding_var)
    emb_x = tf.gather(embedding_var, x)
    if multiplier != 1.0:
      emb_x *= multiplier
    shape, static_shape = tf.shape(emb_x), emb_x.shape.as_list()
    if not static_shape or len(static_shape) < 5:
      return emb_x
    # If we had extra channel dimensions, assume it's 1, i.e. shape[3] == 1.
    assert len(static_shape) == 5
    return tf.reshape(emb_x, [shape[0], shape[1], shape[2], static_shape[4]])
示例#2
0
    def _get_weights(self):
        """Create or get concatenated embedding or softmax variable.

    Returns:
       a list of self._num_shards Tensors.
    """
        num_shards = self._model_hparams.symbol_modality_num_shards
        shards = []
        for i in xrange(num_shards):
            shard_size = (self._vocab_size // num_shards) + (
                1 if i < self._vocab_size % num_shards else 0)
            var_name = "weights_%d" % i
            shards.append(
                tf.get_variable(var_name, [shard_size, self._body_input_depth],
                                initializer=tf.random_normal_initializer(
                                    0.0, self._body_input_depth**-0.5)))
        if num_shards == 1:
            ret = shards[0]
        else:
            ret = tf.concat(shards, 0)
        ret = eu.ConvertGradientToTensor(ret)
        return ret