def testEmbedding(self):
     x = np.random.random_integers(1, high=8, size=(3, 5))
     with self.test_session() as session:
         y = common_layers.embedding(x, 10, 16)
         session.run(tf.global_variables_initializer())
         res = session.run(y)
     self.assertEqual(res.shape, (3, 5, 16))
 def testEmbedding(self):
   x = np.random.random_integers(1, high=8, size=(3, 5))
   with self.test_session() as session:
     y = common_layers.embedding(x, 10, 16)
     session.run(tf.global_variables_initializer())
     res = session.run(y)
   self.assertEqual(res.shape, (3, 5, 16))
 def testFlatten4D3D(self):
     x = np.random.random_integers(1, high=8, size=(3, 5, 2))
     with self.test_session() as session:
         y = common_layers.flatten4d3d(common_layers.embedding(x, 10, 7))
         session.run(tf.global_variables_initializer())
         res = session.run(y)
     self.assertEqual(res.shape, (3, 5 * 2, 7))
def transformer_prepare_encoder(inputs, target_space, hparams):
  """Prepare one shard of the model for the encoder.

  Args:
    inputs: a Tensor.
    target_space: a Tensor.
    hparams: run hyperparameters

  Returns:
    encoder_input: a Tensor, bottom of encoder stack
    encoder_self_attention_bias: a Tensor, containing large negative values
      to implement masked attention and possibly baises for diagonal
      alignments
    encoder_padding: a Tensor
  """
  # Flatten inputs.
  ishape_static = inputs.shape.as_list()
  encoder_input = inputs
  encoder_padding = common_attention.embedding_to_padding(encoder_input)
  encoder_self_attention_bias = common_attention.attention_bias_ignore_padding(
      encoder_padding)
  # Append target_space_id embedding to inputs.
  emb_target_space = common_layers.embedding(
      target_space, 32, ishape_static[-1], name="target_space_embedding")
  emb_target_space = tf.reshape(emb_target_space, [1, 1, -1])
  encoder_input += emb_target_space
  if hparams.pos == "timing":
    encoder_input = common_attention.add_timing_signal_1d(encoder_input)
  return (encoder_input, encoder_self_attention_bias, encoder_padding)
 def testFlatten4D3D(self):
   x = np.random.random_integers(1, high=8, size=(3, 5, 2))
   with self.test_session() as session:
     y = common_layers.flatten4d3d(common_layers.embedding(x, 10, 7))
     session.run(tf.global_variables_initializer())
     res = session.run(y)
   self.assertEqual(res.shape, (3, 5 * 2, 7))
def transformer_prepare_encoder(inputs, target_space, hparams):
    """Prepare one shard of the model for the encoder.

  Args:
    inputs: a Tensor.
    target_space: a Tensor.
    hparams: run hyperparameters

  Returns:
    encoder_input: a Tensor, bottom of encoder stack
    encoder_self_attention_bias: a Tensor, containing large negative values
      to implement masked attention and possibly baises for diagonal
      alignments
    encoder_padding: a Tensor
  """
    # Flatten inputs.
    ishape_static = inputs.shape.as_list()
    encoder_input = inputs
    encoder_padding = common_attention.embedding_to_padding(encoder_input)
    encoder_self_attention_bias = common_attention.attention_bias_ignore_padding(
        encoder_padding)
    # Append target_space_id embedding to inputs.
    emb_target_space = common_layers.embedding(target_space,
                                               32,
                                               ishape_static[-1],
                                               name="target_space_embedding")
    emb_target_space = tf.reshape(emb_target_space, [1, 1, -1])
    encoder_input += emb_target_space
    if hparams.pos == "timing":
        encoder_input = common_attention.add_timing_signal_1d(encoder_input)
    return (encoder_input, encoder_self_attention_bias, encoder_padding)
Beispiel #7
0
 def inputs_bottom_simple(self, x):
   with tf.variable_scope(self.name):
     return common_layers.embedding(
         x,
         self._vocab_size,
         self._body_input_depth,
         multiplier=self._body_input_depth**0.5 if
         self._model_hparams.multiply_embedding_mode == "sqrt_depth" else 1.0)
 def bottom(self, x):
   with tf.variable_scope(self.name):
     return common_layers.embedding(
         x,
         self._vocab_size,
         self._body_input_depth,
         multiplier=self._body_input_depth**0.5 if
         self._model_hparams.multiply_embedding_mode == "sqrt_depth" else 1.0)
Beispiel #9
0
 def targets_bottom_simple(self, inputs):
   with tf.variable_scope(self.name):
     # Reshape inputs to 2-d tensor and embed the RGB pixel values.
     inputs = common_layers.flatten4d3d(inputs)
     ret = common_layers.embedding(inputs, self.targets_dimensionality,
                                   self._body_input_depth,
                                   name="input_rgb_embedding")
     if self._model_hparams.multiply_embedding_mode == "sqrt_depth":
       ret *= self._body_input_depth**0.5
     return ret
 def targets_bottom(self, inputs):
   with tf.variable_scope(self.name):
     # Reshape inputs to 2-d tensor and embed the RGB pixel values.
     inputs = common_layers.flatten4d3d(inputs)
     ret = common_layers.embedding(
         inputs,
         self.top_dimensionality,
         self._body_input_depth,
         name="input_rgb_embedding")
     if self._model_hparams.multiply_embedding_mode == "sqrt_depth":
       ret *= self._body_input_depth**0.5
     return ret
Beispiel #11
0
def embed_target_space(target_space_id, hidden_size):
    target_space_emb = common_layers.embedding(target_space_id,
                                               32,
                                               hidden_size,
                                               name="target_space_embedding")
    return tf.reshape(target_space_emb, [1, 1, 1, -1])
def embed_target_space(target_space_id, hidden_size):
  target_space_emb = common_layers.embedding(
      target_space_id, 32, hidden_size, name="target_space_embedding")
  return tf.reshape(target_space_emb, [1, 1, 1, -1])