Esempio n. 1
0
 def __init__(self,
              groups=32,
              axis=-1,
              epsilon=1e-5,
              center=True,
              scale=True,
              beta_initializer='zeros',
              gamma_initializer='ones',
              beta_regularizer=None,
              gamma_regularizer=None,
              beta_constraint=None,
              gamma_constraint=None,
              **kwargs):
     super(GroupNormalization, self).__init__(**kwargs)
     self.supports_masking = True
     self.groups = groups
     self.axis = axis
     self.epsilon = epsilon
     self.center = center
     self.scale = scale
     self.beta_initializer = initializers.get(beta_initializer)
     self.gamma_initializer = initializers.get(gamma_initializer)
     self.beta_regularizer = regularizers.get(beta_regularizer)
     self.gamma_regularizer = regularizers.get(gamma_regularizer)
     self.beta_constraint = constraints.get(beta_constraint)
     self.gamma_constraint = constraints.get(gamma_constraint)
Esempio n. 2
0
 def __init__(self,
              epsilon=1e-3,
              mode=0,
              axis=-1,
              momentum=0.99,
              r_max_value=3.,
              d_max_value=5.,
              t_delta=1e-3,
              weights=None,
              beta_init='zero',
              gamma_init='one',
              gamma_regularizer=None,
              beta_regularizer=None,
              **kwargs):
     self.supports_masking = True
     self.beta_init = initializers.get(beta_init)
     self.gamma_init = initializers.get(gamma_init)
     self.epsilon = epsilon
     self.mode = mode
     self.axis = axis
     self.momentum = momentum
     self.gamma_regularizer = regularizers.get(gamma_regularizer)
     self.beta_regularizer = regularizers.get(beta_regularizer)
     self.initial_weights = weights
     self.r_max_value = r_max_value
     self.d_max_value = d_max_value
     self.t_delta = t_delta
     if self.mode == 0:
         self.uses_learning_phase = True
     super(BatchRenormalization, self).__init__(**kwargs)
Esempio n. 3
0
 def __init__(
         self,
         units,
         tied_to=None,  # Enter a layer as input to enforce weight-tying
         activation=None,
         use_bias=True,
         kernel_initializer='glorot_uniform',
         bias_initializer='zeros',
         kernel_regularizer=None,
         bias_regularizer=None,
         activity_regularizer=None,
         kernel_constraint=None,
         bias_constraint=None,
         **kwargs):
     if 'input_shape' not in kwargs and 'input_dim' in kwargs:
         kwargs['input_shape'] = (kwargs.pop('input_dim'), )
     super(DenseTransposeTied, self).__init__(**kwargs)
     self.units = units
     # We add these two properties to save the tied weights
     self.tied_to = tied_to
     self.tied_weights = self.tied_to.weights
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.input_spec = InputSpec(min_ndim=2)
     self.supports_masking = True
Esempio n. 4
0
 def __init__(self,
              alpha_initializer='zeros',
              activity_regularizer=None,
              alpha_constraint=None,
              shared_axes=None,
              **kwargs):
     super(SparseReLU, self).__init__(**kwargs)
     self.supports_masking = True
     self.alpha_initializer = initializers.get(alpha_initializer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.alpha_constraint = constraints.get(alpha_constraint)
Esempio n. 5
0
 def __init__(self, axis=0, activity_regularizer=None, **kwargs):
     self.axis = axis
     self.uses_learning_phase = True
     self.activity_regularizer = regularizers.get(activity_regularizer)
     super(SumToOne, self).__init__(**kwargs)