Example #1
0
 def bias_initializer(shape, *args, **kwargs):
     return K.concatenate([
         self.bias_initializer((self.units, ), *args, **kwargs),
         initializers.Ones()((self.units, ), *args, **kwargs),
         self.bias_initializer((self.units * 2, ), *args,
                               **kwargs),
     ])
Example #2
0
 def bias_initializer(_, *args, **kwargs):
     return K.concatenate([self.bias_initializer(
         (self.n_hidden,), *args, **kwargs),
                           initializers.Ones()((self.n_hidden,),
                                               *args, **kwargs),
                           self.bias_initializer(
                               (self.n_hidden * 2,), *args,
                               **kwargs)])
Example #3
0
 def build(self, input_shape):
     if len(input_shape) > 2:
         raise ValueError("Input to attention layer hasn't been flattened")
     self.input_dim = input_shape[-1]
     self.kernel = self.add_weight(
         shape=(self.input_dim, ),
         initializer=initializers.Ones(),
         name='kernel',
         constraint=constraints.NonNeg()
         #constraint=constraints.min_max_norm(min_value=0.0, max_value=1.0)
         #constraint=constraints.UnitNorm(axis=self.axis)
     )
     self.input_spec = InputSpec(min_ndim=2, axes={-1: self.input_dim})
     self.built = True