示例#1
0
 def get_config(self):
     base_config = super(PositionEmbedding, self).get_config()
     config = {'input_dim': self.input_dim,
               'output_dim': self.output_dim,
               'merge_mode': self.merge_mode,
               'embeddings_initializer': initializers.serialize(self.embeddings_initializer)}
     return dict(list(base_config.items()) + list(config.items()))
示例#2
0
 def get_config(self):
     base_config = super(FeedForward, self).get_config()
     config = {
         'units': self.units,
         'activation': self.activation,
         'use_bias': self.use_bias,
         'kernel_initializer': initializers.serialize(self.kernel_initializer)
     }
     return dict(list(base_config.items()) + list(config.items()))
示例#3
0
 def get_config(self):
     base_config = super(LayerNormalization, self).get_config()
     base_config.update({"center": self.center,
                         "scale": self.scale,
                         "epsilon": self.epsilon,
                         "conditional": self.conditional,
                         "condition_hidden_units": self.condition_hidden_units,
                         "condition_hidden_activation": activations.serialize(self.condition_hidden_activation),
                         "condition_hidden_initializer": initializers.serialize(self.condition_hidden_initializer)})
     return base_config
示例#4
0
 def get_config(self):
     config = super(MultiHeadAttention, self).get_config()
     config.update({'head_nums': self.head_nums,
                    'head_size': self.head_size,
                    'key_size': self.key_size,
                    'use_bias': self.use_bias,
                    'attention_scale': self.attention_scale,
                    'with_residual_attention': self.with_residual_attention,
                    'kernel_initializer': initializers.serialize(self.kernel_initializer)})
     return config