def __init__(self, rank, filters, kernel_size, strides=1, padding='valid', data_format=None, dilation_rate=1, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, spectral_normalization=True, **kwargs): super(_ConvSN, self).__init__(**kwargs) self.rank = rank self.filters = filters self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size') self.strides = conv_utils.normalize_tuple(strides, rank, 'strides') self.padding = conv_utils.normalize_padding(padding) self.data_format = conv_utils.normalize_data_format(data_format) self.dilation_rate = conv_utils.normalize_tuple( dilation_rate, rank, 'dilation_rate') self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.input_spec = InputSpec(ndim=self.rank + 2) self.spectral_normalization = spectral_normalization self.u = None
def __init__(self, filters, kernel_size, activation=None, use_bias=True, kernel_initializer='uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, normalize=False, offset=None, in_channels=None, **kwargs): from tensorflow.keras import activations, initializers, regularizers self.filters = filters self.kernel_size = kernel_size self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.normalize = normalize if not (np.asarray(kernel_size) == kernel_size[0]).all(): raise Exception("Only cubic kernel sizes are supported.") if offset is None: if kernel_size[0] % 2: self.offset = tf.zeros(shape=(3, )) else: self.offset = tf.fill([3], -0.5) else: self.offset = offset self.fixed_radius_search = FixedRadiusSearch(metric='Linf', ignore_query_point=False, return_distances=False) super().__init__(**kwargs)
def __init__(self, t_left_initializer='zeros', a_left_initializer=initializers.RandomUniform(minval=0, maxval=1), t_right_initializer=initializers.RandomUniform(minval=0, maxval=5), a_right_initializer='ones', shared_axes=None, **kwargs): super(SReLU, self).__init__(**kwargs) self.supports_masking = True self.t_left_initializer = initializers.get(t_left_initializer) self.a_left_initializer = initializers.get(a_left_initializer) self.t_right_initializer = initializers.get(t_right_initializer) self.a_right_initializer = initializers.get(a_right_initializer) if shared_axes is None: self.shared_axes = None elif not isinstance(shared_axes, (list, tuple)): self.shared_axes = [shared_axes] else: self.shared_axes = list(shared_axes)
def __init__(self, exp_base: int, num_nodes: int, use_bias: Optional[bool] = True, activation: Optional[Text] = None, kernel_initializer: Optional[Text] = 'glorot_uniform', bias_initializer: Optional[Text] = 'zeros', **kwargs) -> None: if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super().__init__(**kwargs) self.exp_base = exp_base self.num_nodes = num_nodes self.nodes = [] self.use_bias = use_bias self.activation = activations.get(activation) self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer)
def __init__(self, kernel_initializer='ones', kernel_regularizer=None, kernel_constraint=regularizers.l1_l2(l1=1e-3, l2=1e-3), **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'), ) super(DFS, self).__init__(**kwargs) self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.supports_masking = True
def __init__(self, axis=-1, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', **kwargs): super(FixedNormalization, self).__init__(**kwargs) self.supports_masking = True self.axis = axis self.epsilon = 1e-3 self.center = center self.scale = scale self.beta_initializer = initializers.get(beta_initializer) self.gamma_initializer = initializers.get(gamma_initializer) self.moving_mean_initializer = initializers.get( moving_mean_initializer) self.moving_variance_initializer = initializers.get( moving_variance_initializer)
def __init__(self, heads, kernel_initializer='glorot_normal', use_ln=False, query_units=16, dropout_rate=0.0, res_connect=True, **kwargs): self.heads = heads self.kernel_initializer = initializers.get(get_initializer(kernel_initializer)) self.use_ln = use_ln self.query_units = query_units self.dropout_rate = dropout_rate self.res_connent = res_connect self.suuport_mask = True super(SelfAttention, self).__init__(**kwargs)
def __init__(self, units, dropout_list:list = None, activation=None, kernel_initializer='glorot_norm', bias_initializers='zeros', l2_reg_list=None, use_bn=False, use_gate=False, **kwargs): self.units = list(map(int, units)) self.dropout_list = dropout_list self.activation = activations.get(activation) self.kernel_initializer = initializers.get(get_initializer(kernel_initializer)) self.bias_initializer = initializers.get(bias_initializers) self.l2_reg_list = l2_reg_list self.use_bn = use_bn self.use_gate = use_gate super(Mlp, self).__init__(**kwargs)
def __init__(self, psi=0.0, cutoff=None, momentum=0.99, moving_mean_initializer='zeros', **kwargs): super(TruncationTrick, self).__init__(**kwargs) self.psi = psi self.cutoff = cutoff self.momentum = momentum self.moving_mean_initializer = initializers.get( moving_mean_initializer)
def __init__(self, part_outputs, kernel_initializer='glorot_uniform', activation=None, use_bias=True, bias_initializer='zeros'): super(PartsLayer, self).__init__() self.part_outputs = part_outputs self.kernel_initializer = initializers.get(kernel_initializer) self.activation = activations.get(activation) self.use_bias = use_bias self.bias_initializer = bias_initializer
def _get_regularisers_from_keywords(self, kwargs): self.kernel_initializer = initializers.get( kwargs.pop("kernel_initializer", "glorot_uniform")) self.kernel_regularizer = regularizers.get( kwargs.pop("kernel_regularizer", None)) self.kernel_constraint = constraints.get( kwargs.pop("kernel_constraint", None)) self.bias_initializer = initializers.get( kwargs.pop("bias_initializer", "zeros")) self.bias_regularizer = regularizers.get( kwargs.pop("bias_regularizer", None)) self.bias_constraint = constraints.get( kwargs.pop("bias_constraint", None)) self.attn_kernel_initializer = initializers.get( kwargs.pop("attn_kernel_initializer", "glorot_uniform")) self.attn_kernel_regularizer = regularizers.get( kwargs.pop("attn_kernel_regularizer", None)) self.attn_kernel_constraint = constraints.get( kwargs.pop("attn_kernel_constraint", None))
def __init__(self, units, out_units, hidden_layers=1, dropout_rate=0.0, random_input_order=False, activation='elu', out_activation='sigmoid', kernel_initializer='glorot_uniform', bias_initializer='zeros', out_kernel_initializer='glorot_uniform', out_bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'), ) super(MaskingDense, self).__init__(**kwargs) self.input_sel = None self.random_input_order = random_input_order self.rate = min(1., max(0., dropout_rate)) self.kernel_sels = [] self.units = units self.out_units = out_units self.hidden_layers = hidden_layers self.activation = activations.get(activation) self.out_activation = activations.get(activation) self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.out_kernel_initializer = initializers.get(out_kernel_initializer) self.out_bias_initializer = initializers.get(out_bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint)
def __init__(self, activation: OptStrOrCallable = None, use_bias: bool = True, kernel_initializer: OptStrOrCallable = 'glorot_uniform', bias_initializer: OptStrOrCallable = 'zeros', kernel_regularizer: OptStrOrCallable = None, bias_regularizer: OptStrOrCallable = None, activity_regularizer: OptStrOrCallable = None, kernel_constraint: OptStrOrCallable = None, bias_constraint: OptStrOrCallable = None, **kwargs): """ Args: activation (str): Default: None. The activation function used for each sub-neural network. Examples include 'relu', 'softmax', 'tanh', 'sigmoid' and etc. use_bias (bool): Default: True. Whether to use the bias term in the neural network. kernel_initializer (str): Default: 'glorot_uniform'. Initialization function for the layer kernel weights, bias_initializer (str): Default: 'zeros' activity_regularizer (str): Default: None. The regularization function for the output kernel_constraint (str): Default: None. Keras constraint for kernel values bias_constraint (str): Default: None .Keras constraint for bias values **kwargs: """ if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) self.activation = activations.get(activation) # noqa self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) super().__init__(**kwargs)
def __init__(self, axis=-1, momentum=0.99, epsilon=1e-3, final_gamma=False, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', mean_weights_initializer='ones', variance_weights_initializer='ones', moving_mean_initializer='ones', moving_variance_initializer='zeros', beta_regularizer=None, gamma_regularizer=None, mean_weights_regularizer=None, variance_weights_regularizer=None, beta_constraint=None, gamma_constraint=None, mean_weights_constraints=None, variance_weights_constraints=None, **kwargs): super(SwitchNormalization, self).__init__(**kwargs) self.supports_masking = True self.axis = axis self.momentum = momentum self.epsilon = epsilon self.center = center self.scale = scale self.beta_initializer = initializers.get(beta_initializer) if final_gamma: self.gamma_initializer = initializers.get('zeros') else: self.gamma_initializer = initializers.get(gamma_initializer) self.mean_weights_initializer = initializers.get( mean_weights_initializer) self.variance_weights_initializer = initializers.get( variance_weights_initializer) self.moving_mean_initializer = initializers.get( moving_mean_initializer) self.moving_variance_initializer = initializers.get( moving_variance_initializer) self.beta_regularizer = regularizers.get(beta_regularizer) self.gamma_regularizer = regularizers.get(gamma_regularizer) self.mean_weights_regularizer = regularizers.get( mean_weights_regularizer) self.variance_weights_regularizer = regularizers.get( variance_weights_regularizer) self.beta_constraint = constraints.get(beta_constraint) self.gamma_constraint = constraints.get(gamma_constraint) self.mean_weights_constraints = constraints.get( mean_weights_constraints) self.variance_weights_constraints = constraints.get( variance_weights_constraints)
def __init__(self, activation=None, use_bias=False, real_kernel_initializer='uniform', imag_kernel_initializer='zeros', kernel_regularizer=None, bias_initializer='zeros', seed=None, **kwargs): super(Complex_deconv, self).__init__(**kwargs) self.use_bias = use_bias self.activation = activations.get(activation) self.real_kernel_initializer = initializers.get( real_kernel_initializer) self.imag_kernel_initializer = initializers.get( imag_kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_initializer = initializers.get(bias_initializer) if seed == None: self.seed = np.random.randint(1, 10e6) else: self.seed = seed
def __init__( self, units, activation='relu', use_bias=True, kernel_initializer='glorot_uniform', **kwargs ): super(FeedForward, self).__init__(**kwargs) self.units = units self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer)
def __init__(self, L_dim, output_dim, kernel_initializer='glorot_uniform', kernel_regularizer=None, use_bias=True, use_gated=False, **kwargs): self.L_dim = L_dim self.output_dim = output_dim self.use_bias = use_bias self.use_gated = use_gated self.v_init = initializers.get(kernel_initializer) self.w_init = initializers.get(kernel_initializer) self.u_init = initializers.get(kernel_initializer) self.v_regularizer = regularizers.get(kernel_regularizer) self.w_regularizer = regularizers.get(kernel_regularizer) self.u_regularizer = regularizers.get(kernel_regularizer) super(Mil_Attention, self).__init__(**kwargs)
def __init__(self, hash_bucket_size, embedding_dim, regularizer=0.5, initializer='uniform', trainable=False, **kwargs): super(HashEmbeddings, self).__init__() self._hash_bucket_size = hash_bucket_size self._embedding_dim = embedding_dim self._regularizer = regularizers.l2(regularizer) self._initializer = initializers.get(initializer) self._trainable = trainable
def __init__( self, attn_kernel_initializer="glorot_uniform", attn_kernel_regularizer=None, attn_kernel_constraint=None, **kwargs ): super().__init__(**kwargs) self.attn_kernel_initializer = initializers.get( attn_kernel_initializer) self.attn_kernel_regularizer = regularizers.get( attn_kernel_regularizer) self.attn_kernel_constraint = constraints.get(attn_kernel_constraint)
def __init__(self, channels, dim_capsule, kernel_size, strides=(1, 1), kernel_initializer="glorot_uniform", **kwargs): self.out_channels = channels self.dim_capsule = dim_capsule self.kernel_size = normalize_tuple(kernel_size, 2, "kernel_size") self.strides = normalize_tuple(strides, 2, "strides") self.kernel_initializer = initializers.get(kernel_initializer) super(PrimaryCaps2D, self).__init__(**kwargs)
def __init__(self, units: int, kernel_initializer: t.Union[str, t.Callable] = 'glorot_uniform', bias_initializer: t.Union[str, t.Callable] = 'zeros', **kwargs): """ :param units: the number of hidden units :param kernel_initializer: :param bias_initializer: :param kwargs: """ # TODO check arguments super().__init__(**kwargs) self.units = units self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) # placeholders for weights self.mean_kernel = None self.mean_bias = None self.std_kernel = None self.std_bias = None
def __init__(self, input_dim, output_dim, merge_mode='add', embeddings_initializer='zeros', custom_position_ids=False, **kwargs): super(PositionEmbedding, self).__init__(**kwargs) self.input_dim = input_dim self.output_dim = output_dim self.merge_mode = merge_mode self.embeddings_initializer = initializers.get(embeddings_initializer) self.custom_position_ids = custom_position_ids
def __init__(self, units, concat=False, use_bias=True, agg_method='mean', activation=None, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super().__init__(**kwargs) self.units = units self.concat = concat self.use_bias = use_bias self.agg_method = agg_method self.aggregator = {'mean': tf.reduce_mean, 'sum': tf.reduce_sum, 'max': tf.reduce_max, 'min': tf.reduce_min}[agg_method] self.activation = activations.get(activation) self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) if concat: self.output_dim = units * 2 else: self.output_dim = units
def __init__(self, filters, kernel_size, groups, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(GroupConv2D, self).__init__( rank=2, filters=filters, kernel_size=kernel_size, groups=groups, strides=strides, padding=padding.upper(), data_format=data_format, dilation_rate=dilation_rate, activation=activations.get(activation), use_bias=use_bias, kernel_initializer=initializers.get(kernel_initializer), bias_initializer=initializers.get(bias_initializer), kernel_regularizer=regularizers.get(kernel_regularizer), bias_regularizer=regularizers.get(bias_regularizer), activity_regularizer=regularizers.get(activity_regularizer), kernel_constraint=constraints.get(kernel_constraint), bias_constraint=constraints.get(bias_constraint), **kwargs)
def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, spectral_normalization=True, **kwargs): if data_format is None: data_format = K.image_data_format() super(Conv2DTranspose, self).__init__( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, activation=activations.get(activation), use_bias=use_bias, kernel_initializer=initializers.get(kernel_initializer), bias_initializer=initializers.get(bias_initializer), kernel_regularizer=regularizers.get(kernel_regularizer), bias_regularizer=regularizers.get(bias_regularizer), activity_regularizer=regularizers.get(activity_regularizer), kernel_constraint=constraints.get(kernel_constraint), bias_constraint=constraints.get(bias_constraint), **kwargs) self.spectral_normalization = spectral_normalization self.u = K.random_normal_variable( [1, filters], 0, 1, dtype=self.dtype, name="sn_estimate") # [1, out_channels]
def __init__(self,units,num_experts,num_tasks,input_dimension): super(MMoE,self).__init__() self.expert_activation = activations.get('relu') self.gate_activation = activations.get('softmax') self.expert_kernel_initializer = initializers.get('VarianceScaling') self.gate_kernel_initializer = initializers.get('VarianceScaling') self.expert_bias_initializer = initializers.get('zeros') self.gate_bias_initializer = initializers.get('zeros') self.expert_kernels =self.add_weight( name='expert_kernel', shape=(input_dimension, units, num_experts), initializer=self.expert_kernel_initializer, trainable=True ) self.expert_bias = self.add_weight( name='expert_bias', shape=(units, num_experts), initializer=self.expert_bias_initializer, trainable=True ) self.gate_kernels = [self.add_weight( name='gate_kernel_task_{}'.format(i), shape=(input_dimension, num_experts), initializer=self.gate_kernel_initializer, trainable=True ) for i in range(num_tasks)] self.gate_bias = [self.add_weight( name='gate_bias_task_{}'.format(i), shape=(num_experts,), initializer=self.gate_bias_initializer, trainable=True ) for i in range(num_tasks)]
def __init__(self, T=3, n_hidden=512, activation=None, activation_lstm='tanh', recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', use_bias=True, unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, **kwargs): super().__init__(**kwargs) self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.activation_lstm = activations.get(activation_lstm) self.recurrent_activation = activations.get(recurrent_activation) self.recurrent_initializer = initializers.get(recurrent_initializer) self.unit_forget_bias = unit_forget_bias self.recurrent_regularizer = regularizers.get(recurrent_regularizer) self.recurrent_constraint = constraints.get(recurrent_constraint) self.T = T self.n_hidden = n_hidden
def __init__(self, kernel_size, num_capsule, pos_dim, app_dim, strides=1, padding='same', kernel_initializer='truncated_normal', activation='relu', data_format="channels_last", debug_print=True, **kwargs): super(PrimaryCaps2dMatwo, self).__init__(**kwargs) self.kernel_size = kernel_size self.num_capsule = num_capsule self.pos_dim = pos_dim self.app_dim = app_dim self.strides = strides self.padding = padding self.kernel_initializer = initializers.get(kernel_initializer) self.activation = activation self.data_format = data_format self.debug_print = debug_print
def __init__(self, units, use_bias=True, sparse_input=False, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'), ) super(GCNLayer, self).__init__(**kwargs) self.units = units self.use_bias = use_bias self.sparse_input = sparse_input self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint)
def __init__(self, trainable_kernel=False, activation=None, kernel_initializer='glorot_uniform', kernel_regularizer=None, kernel_constraint=None, **kwargs): super().__init__(**kwargs) self.trainable_kernel = trainable_kernel self.activation = activations.get(activation) self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.kernel_constraint = constraints.get(kernel_constraint)