Ejemplo n.º 1
0
 def build(self, input_shape):
     shape = [self.maximum_position + 1, input_shape.as_list()[-1]]
     initializer = tf.keras.initializers.glorot_uniform()
     self.embedding = tf.Variable(
         initial_value=lambda: initializer(shape, dtype=self.dtype),
         name=compat.name_from_variable_scope("position_encoding/w_embs"))
     super(PositionEmbedder, self).build(input_shape)
Ejemplo n.º 2
0
 def build(self, i, dtype=tf.float32):
     shape = [self.maximum_position + 1, 4]
     initializer = tf.keras.initializers.glorot_uniform()
     self.embedding = tf.Variable(
         initial_value=lambda: initializer(shape, dtype=dtype),
         name=compat.name_from_variable_scope("position_encoding/w_embs/" +
                                              str(i)))
Ejemplo n.º 3
0
 def build(self, input_shape=None):
     if self.embedding_file:
         pretrained = load_pretrained_embeddings(
             self.embedding_file,
             self.vocabulary_file,
             num_oov_buckets=self.num_oov_buckets,
             with_header=self.embedding_file_with_header,
             case_insensitive_embeddings=self.case_insensitive_embeddings)
         self.embedding_size = pretrained.shape[-1]
         initializer = tf.constant_initializer(
             value=pretrained.astype(self.dtype))
     else:
         initializer = None
     shape = [self.vocabulary_size, self.embedding_size]
     if compat.is_tf2():
         self.embedding = self.add_variable(
             name=compat.name_from_variable_scope("w_embs"),
             shape=shape,
             initializer=initializer,
             trainable=self.trainable)
     else:
         self.embedding = tf.get_variable("w_embs",
                                          shape=shape,
                                          dtype=self.dtype,
                                          initializer=initializer,
                                          trainable=self.trainable)
     super(WordEmbedder, self).build(input_shape)
Ejemplo n.º 4
0
 def build(self, input_shape=None):
     shape = [self.vocabulary_size, self.embedding_size]
     initializer = tf.keras.initializers.glorot_uniform()
     self.embedding = tf.Variable(
         initial_value=lambda: initializer(shape, dtype=self.dtype),
         name=compat.name_from_variable_scope("w_char_embs"))
     super(CharEmbedder, self).build(input_shape)
Ejemplo n.º 5
0
 def build(self, input_shape):
     decoder_shape = input_shape[1]
     self.decoder_state_sizes = [
         shape.as_list()[-1] for shape in compat.nest.flatten(decoder_shape)
     ]
     self.linear = tf.keras.layers.Dense(
         sum(self.decoder_state_sizes),
         activation=self.activation,
         name=compat.name_from_variable_scope("dense"))
Ejemplo n.º 6
0
 def build(self, input_shape=None):
     shape = [self.vocabulary_size, self.embedding_size]
     if compat.is_tf2():
         self.embedding = self.add_variable(
             name=compat.name_from_variable_scope("w_char_embs"),
             shape=shape)
     else:
         self.embedding = tf.get_variable("w_char_embs",
                                          shape=shape,
                                          dtype=self.dtype)
     super(CharEmbedder, self).build(input_shape)
Ejemplo n.º 7
0
 def build(self, input_shape=None):
   if self.embedding_file:
     pretrained = load_pretrained_embeddings(
         self.embedding_file,
         self.vocabulary_file,
         num_oov_buckets=self.num_oov_buckets,
         with_header=self.embedding_file_with_header,
         case_insensitive_embeddings=self.case_insensitive_embeddings)
     self.embedding_size = pretrained.shape[-1]
     initializer = tf.constant_initializer(value=pretrained.astype(self.dtype))
   else:
     initializer = tf.keras.initializers.glorot_uniform()
   shape = [self.vocabulary_size, self.embedding_size]
   self.embedding = tf.Variable(
       initial_value=lambda: initializer(shape, dtype=self.dtype),
       trainable=self.trainable,
       name=compat.name_from_variable_scope("w_embs"))
   super(WordEmbedder, self).build(input_shape)