def __init__(self, groups=32, axis=-1, epsilon=1e-5, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None, **kwargs): super(GroupNormalization, self).__init__(**kwargs) self.supports_masking = True self.groups = groups self.axis = axis self.epsilon = epsilon self.center = center self.scale = scale self.beta_initializer = initializers.get(beta_initializer) self.gamma_initializer = initializers.get(gamma_initializer) self.beta_regularizer = regularizers.get(beta_regularizer) self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_constraint = constraints.get(beta_constraint) self.gamma_constraint = constraints.get(gamma_constraint)
def __init__(self, epsilon=1e-3, mode=0, axis=-1, momentum=0.99, r_max_value=3., d_max_value=5., t_delta=1e-3, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None, **kwargs): self.supports_masking = True self.beta_init = initializers.get(beta_init) self.gamma_init = initializers.get(gamma_init) self.epsilon = epsilon self.mode = mode self.axis = axis self.momentum = momentum self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_regularizer = regularizers.get(beta_regularizer) self.initial_weights = weights self.r_max_value = r_max_value self.d_max_value = d_max_value self.t_delta = t_delta if self.mode == 0: self.uses_learning_phase = True super(BatchRenormalization, self).__init__(**kwargs)
def __init__( self, units, tied_to=None, # Enter a layer as input to enforce weight-tying activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'), ) super(DenseTransposeTied, self).__init__(**kwargs) self.units = units # We add these two properties to save the tied weights self.tied_to = tied_to self.tied_weights = self.tied_to.weights self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.input_spec = InputSpec(min_ndim=2) self.supports_masking = True
def __init__(self, output_dim, activation=None, kernel_initializer='glorot_uniform', bias_initializer='zeros', **kwargs): self.output_dim = output_dim self.activation = activations.get(activation) self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) super().__init__(**kwargs)
def __init__(self, alpha_initializer='zeros', activity_regularizer=None, alpha_constraint=None, **kwargs): super(SparseReLU, self).__init__(**kwargs) self.supports_masking = True self.alpha_initializer = initializers.get(alpha_initializer) self.alpha_constraint = constraints.get(alpha_constraint)
def __init__(self, alpha_initializer='zeros', activity_regularizer=None, alpha_constraint=None, slope_constraint=None, slope=0.3, shared_axes=None, **kwargs): super(SparseLeakyReLU, self).__init__(**kwargs) self.supports_masking = True self.alpha_initializer = initializers.get(alpha_initializer) self.activity_regularizer = regularizers.get(activity_regularizer) self.alpha_constraint = constraints.get(alpha_constraint) self.slope_initializer = initializers.constant(slope) self.slope_constraint = constraints.get(slope_constraint)