def __init__(self, output_dim, init='glorot_uniform', activation='linear', reconstruction_activation='linear', weights=None, W_regularizer=None, b_regularizer=None, activity_regularizer=None, output_reconstruction=False, W_constraint=None, b_constraint=None, input_dim=None, **kwargs): self.init = initializations.get(init) self.activation = activations.get(activation) self.reconstruction_activation = activations.get(reconstruction_activation) self.output_reconstruction = output_reconstruction self.output_dim = output_dim self.pretrain = True self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.constraints = [self.W_constraint, self.b_constraint] self.initial_weights = weights self.input_dim = input_dim if self.input_dim: kwargs['input_shape'] = (self.input_dim,) self.input = K.placeholder(ndim=2) super(SymmetricAutoencoder, self).__init__(**kwargs)
def __init__(self, output_dim, init='glorot_uniform', activation='linear', weights=None, W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, input_dim=None, input_length1=None, input_length2=None, **kwargs): self.output_dim = output_dim self.init = initializations.get(init) self.activation = activations.get(activation) self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.constraints = [self.W_constraint, self.b_constraint] self.initial_weights = weights self.input_dim = input_dim self.input_length1 = input_length1 self.input_length2 = input_length2 if self.input_dim: kwargs['input_shape'] = (self.input_length1, self.input_length2, self.input_dim) self.input = K.placeholder(ndim=4) super(HigherOrderTimeDistributedDense, self).__init__(**kwargs)
def __init__(self, init='glorot_uniform', activation=None, weights=None, W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, bias=True, input_dim=None, **kwargs): self.init = initializers.get(init) self.activation = activations.get(activation) self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.initial_weights = weights self.input_spec = InputSpec(ndim=2) self.input_dim = input_dim if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(Highway, self).__init__(**kwargs)
def __init__(self, output_dim, nb_hsm_classes, batch_size, init='glorot_uniform', W1_weights=None, W1_regularizer=None, W1_constraint=None, W2_weights=None, W2_regularizer=None, W2_constraint=None, b1_regularizer=None, b1_constraint=None, b2_regularizer=None, b2_constraint=None, input_dim=None, **kwargs): self.__dict__.update(locals()) del self.self self.init = initializations.get(init) #self.output_dim = nb_classes * nb_outputs_per_class self.nb_outputs_per_class = int(np.ceil(output_dim / float(nb_hsm_classes))) self.W1_regularizer = regularizers.get(W1_regularizer) self.b1_regularizer = regularizers.get(b1_regularizer) self.W2_regularizer = regularizers.get(W2_regularizer) self.b2_regularizer = regularizers.get(b2_regularizer) self.W1_constraint = constraints.get(W1_constraint) self.b1_constraint = constraints.get(b1_constraint) self.W2_constraint = constraints.get(W2_constraint) self.b2_constraint = constraints.get(b2_constraint) self.constraints = [self.W1_constraint, self.b1_constraint, self.W2_constraint, self.b2_constraint] #self.initial_weights = weights self.input_dim = input_dim if self.input_dim: kwargs['input_shape'] = (self.input_dim,) self.input = T.matrix() super(HierarchicalSoftmax, self).__init__(**kwargs)
def __init__(self, axis=None, epsilon=1e-3, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None, **kwargs): self.beta = None self.gamma = None super(InstanceNormalization, self).__init__(**kwargs) self.supports_masking = True self.axis = axis self.epsilon = epsilon self.center = center self.scale = scale self.beta_initializer = initializers.get(beta_initializer) self.gamma_initializer = initializers.get(gamma_initializer) self.beta_regularizer = regularizers.get(beta_regularizer) self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_constraint = constraints.get(beta_constraint) self.gamma_constraint = constraints.get(gamma_constraint)
def __init__(self, offset_creator_class, weight_basis, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(RProjLocallyConnected2D, self).__init__(offset_creator_class, weight_basis, **kwargs) self.filters = filters self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size') self.strides = conv_utils.normalize_tuple(strides, 2, 'strides') self.padding = conv_utils.normalize_padding(padding) if self.padding != 'valid': raise ValueError('Invalid border mode for LocallyConnected2D ' '(only "valid" is supported): ' + padding) self.data_format = conv_utils.normalize_data_format(data_format) self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.input_spec = InputSpec(ndim=4)
def __init__(self, offset_creator_class, weight_basis, axis=-1, momentum=0.99, epsilon=1e-3, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None, **kwargs): super(RProjBatchNormalization, self).__init__(offset_creator_class, weight_basis, **kwargs) self.supports_masking = True self.axis = axis self.momentum = momentum self.epsilon = epsilon self.center = center self.scale = scale self.beta_initializer = initializers.get(beta_initializer) self.gamma_initializer = initializers.get(gamma_initializer) self.moving_mean_initializer = initializers.get(moving_mean_initializer) self.moving_variance_initializer = initializers.get(moving_variance_initializer) self.beta_regularizer = regularizers.get(beta_regularizer) self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_constraint = constraints.get(beta_constraint) self.gamma_constraint = constraints.get(gamma_constraint)
def __init__(self, nb_filter, nb_row, nb_col, mask_type=None, direction='Down', init='glorot_uniform', activation='linear', weights=None, border_mode='valid', subsample=(1, 1), dim_ordering='th', W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): self.mask_type = mask_type self.direction = direction if border_mode not in {'valid', 'same'}: raise Exception('Invalid border mode for Convolution2D:', border_mode) self.nb_filter = nb_filter self.nb_row = nb_row self.nb_col = nb_col self.init = initializations.get(init, dim_ordering=dim_ordering) self.activation = activations.get(activation) assert border_mode in {'valid', 'same'}, 'border_mode must be in {valid, same}' self.border_mode = border_mode self.subsample = tuple(subsample) assert dim_ordering in {'tf', 'th'}, 'dim_ordering must be in {tf, th}' self.dim_ordering = dim_ordering self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.input_spec = [InputSpec(ndim=4)] self.initial_weights = weights super(MaskedConvolution2D, self).__init__(**kwargs)
def __init__(self, nb_filter, nb_row, nb_col, init='glorot_uniform', activation='linear', weights=None, border_mode='valid', subsample=(1, 1), W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, **kwargs): if border_mode not in {'valid', 'full', 'same'}: raise Exception('Invalid border mode for Convolution2D:', border_mode) self.nb_filter = nb_filter self.nb_row = nb_row self.nb_col = nb_col self.init = initializations.get(init) self.activation = activations.get(activation) self.border_mode = border_mode self.subsample = tuple(subsample) self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.constraints = [self.W_constraint, self.b_constraint] self.initial_weights = weights super(Convolution2D, self).__init__(**kwargs)
def __init__(self, step_dim, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): """ Keras Layer that implements an Attention mechanism for temporal data. Supports Masking. Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756] # Input shape 3D tensor with shape: `(samples, steps, features)`. # Output shape 2D tensor with shape: `(samples, features)`. :param kwargs: Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True. The dimensions are inferred based on the output shape of the RNN. Example: model.add(LSTM(64, return_sequences=True)) model.add(Attention()) """ self.supports_masking = True # self.init = initializations.get('glorot_uniform') self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.step_dim = step_dim self.features_dim = 0 super(Attention, self).__init__(**kwargs)
def __init__(self, offset_creator_class, weight_basis, units, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(RProjDense, self).__init__(offset_creator_class, weight_basis, **kwargs) self.units = units self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.input_spec = InputSpec(min_ndim=2) self.supports_masking = True
def __init__(self, units, projection_units=None, activation='tanh', recurrent_activation='sigmoid', projection_activation='linear', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', projection_initializer='glorot_uniform', bias_initializer='zeros', unit_forget_bias=False, kernel_regularizer=None, recurrent_regularizer=None, projection_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, projection_constraint=None, bias_constraint=None, dropout=0., recurrent_dropout=0., implementation=2, **kwargs): super(NASCell, self).__init__(**kwargs) self.units = units self.projection_units = projection_units self.activation = activations.get(activation) self.recurrent_activation = activations.get(recurrent_activation) self.projection_activation = activations.get(projection_activation) self.cell_activation = activations.get('relu') self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.recurrent_initializer = initializers.get(recurrent_initializer) self.projection_initializer = initializers.get(projection_initializer) self.bias_initializer = initializers.get(bias_initializer) self.unit_forget_bias = unit_forget_bias self.kernel_regularizer = regularizers.get(kernel_regularizer) self.recurrent_regularizer = regularizers.get(recurrent_regularizer) self.projection_regularizer = regularizers.get(projection_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.recurrent_constraint = constraints.get(recurrent_constraint) self.projection_constraint = constraints.get(projection_constraint) self.bias_constraint = constraints.get(bias_constraint) self.dropout = min(1., max(0., dropout)) self.recurrent_dropout = min(1., max(0., recurrent_dropout)) self.implementation = implementation if self.projection_units is not None: self.state_size = (self.projection_units, self.units) else: self.state_size = (self.units, self.units) self._dropout_mask = None self._recurrent_dropout_mask = None
def __init__(self, input_dim, output_dim, causes_dim, hid2output, init='glorot_uniform', W_regularizer=None, W_constraint=None, b_regularizer=None, b_constraint=None, activation=lambda X: T.minimum(20, T.maximum(0, X)), activity_regularizer=None, truncate_gradient=-1, weights=None, name=None, return_mode='both', return_sequences=True): super(GAE, self).__init__() self.input_dim = input_dim self.output_dim = output_dim self.causes_dim = causes_dim self.activation = activations.get(activation) self.init = initializations.get(init) self.truncate_gradient = truncate_gradient self.input = T.tensor3() self.return_mode = return_mode self.return_sequences = return_sequences self.V = self.init((input_dim, output_dim)) self.U = self.init((input_dim, output_dim)) self.W = self.init((output_dim, causes_dim)) self.bo = shared_zeros((self.output_dim)) self.bc = shared_zeros((self.causes_dim)) self.params = [self.V, self.U, self.W] self.regularizers = [] self.W_regularizer = regularizers.get(W_regularizer) if self.W_regularizer: self.W_regularizer.set_param(self.W) self.regularizers.append(self.W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) if self.b_regularizer: self.b_regularizer.set_param(self.b) self.regularizers.append(self.b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) if self.activity_regularizer: self.activity_regularizer.set_layer(self) self.regularizers.append(self.activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.constraints = [self.W_constraint, self.b_constraint] if weights is not None: self.set_weights(weights) if name is not None: self.set_name(name)
def __init__(self, filters, kernel_size,strides=1,padding='same',dilation_rate=1, bias_initializer='zeros',kernel_initializer='glorot_uniform', activation='linear', weights=None, border_mode='valid', subsample_length=1, kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, use_bias=True, input_dim=None, input_length=None, tied_to=None,data_format='channels_last',rank=1,learnedKernel=None,layer_inner=None, **kwargs): if border_mode not in {'valid', 'same'}: raise Exception('Invalid border mode for Convolution1D:', border_mode) self.input_spec = [InputSpec(ndim=3)] self.input_dim = input_dim self.input_length = input_length self.tied_to = tied_to self.learnedKernel = np.array(learnedKernel) #self.tied_to.set_weights([weights, bias]) self.strides = conv_utils.normalize_tuple(strides, rank, 'strides') self.padding = conv_utils.normalize_padding(padding) self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank, 'dilation_rate') self.data_format = data_format self.filters = filters if self.tied_to is not None: self.kernel_size = self.tied_to.kernel_size else: self.kernel_size = kernel_size self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.activation = activations.get(activation) assert border_mode in {'valid', 'same'}, 'border_mode must be in {valid, same}' self.border_mode = border_mode self.subsample_length = subsample_length self.subsample = (subsample_length, 1) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.use_bias = use_bias self.rank = 1 self.layer_inner = layer_inner if self.input_dim: kwargs['input_shape'] = (self.input_length, self.input_dim) #self.layer_inner = kwargs.pop('layer_inner') super(Convolution1D_tied, self).__init__(**kwargs)
def __init__(self, input_dim, hidden_dim, init='glorot_uniform', weights=None, name=None, W_regularizer=None, bx_regularizer=None, bh_regularizer=None, #activity_regularizer=None, W_constraint=None, bx_constraint=None, bh_constraint=None): super(RBM, self).__init__() self.init = initializations.get(init) self.input_dim = input_dim self.hidden_dim = hidden_dim self.input = T.matrix() self.W = self.init((self.input_dim, self.hidden_dim)) self.bx = shared_zeros((self.input_dim)) self.bh = shared_zeros((self.hidden_dim)) self.params = [self.W, self.bx, self.bh] self.regularizers = [] self.W_regularizer = regularizers.get(W_regularizer) if self.W_regularizer: self.W_regularizer.set_param(self.W) self.regularizers.append(self.W_regularizer) self.bx_regularizer = regularizers.get(bx_regularizer) if self.bx_regularizer: self.bx_regularizer.set_param(self.bx) self.regularizers.append(self.bx_regularizer) self.bh_regularizer = regularizers.get(bh_regularizer) if self.bh_regularizer: self.bh_regularizer.set_param(self.bh) self.regularizers.append(self.bh_regularizer) #self.activity_regularizer = regularizers.get(activity_regularizer) #if self.activity_regularizer: # self.activity_regularizer.set_layer(self) # self.regularizers.append(self.activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.bx_constraint = constraints.get(bx_constraint) self.bh_constraint = constraints.get(bh_constraint) self.constraints = [self.W_constraint, self.bx_constraint, self.bh_constraint] if weights is not None: self.set_weights(weights) if name is not None: self.set_name(name) self.srng = RandomStreams(seed=np.random.randint(10e6))
def __init__(self, timesteps, bias=True, simple=False, W_regularizer=None, W_constraint=None, V_regularizer=None, V_constraint=None, **kwargs): self.supports_masking = True self.init = initializers.get('glorot_uniform') self.bias = bias self.timesteps = timesteps self.simple = simple self.W_regularizer = regularizers.get(W_regularizer) self.W_constraint = constraints.get(W_constraint) self.V_regularizer = regularizers.get(V_regularizer) self.V_constraint = constraints.get(V_constraint) super(Attention, self).__init__(**kwargs)
def __init__(self, timesteps, attention_size, bias=True, W_regularizer=regularizers.l1(0.01), W_constraint=None, U_regularizer=regularizers.l1(0.01), U_constraint=None, **kwargs): self.supports_masking = True self.init = initializers.get('glorot_uniform') self.bias = bias self.timesteps = timesteps self.attention_size = attention_size self.W_regularizer = regularizers.get(W_regularizer) self.W_constraint = constraints.get(W_constraint) self.U_regularizer = regularizers.get(U_regularizer) self.U_constraint = constraints.get(U_constraint) super(Attention, self).__init__(**kwargs)
def __init__(self, nb_filter, filter_length, init='glorot_uniform', activation='linear', weights=None, padding='valid', strides=[1,1,1], W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, input_dim=None, input_length=None, **kwargs): if padding not in {'valid','same'}: raise Exception('Invalid border mode for Convolution1D:', padding) #self.deconv_shape = deconv_shape # transform 1 D in 2D #deconv_shape = [batch_size, output_size_y, output_size_x, number_of_filters] # self.deconv_shape = [deconv_shape[0],1,deconv_shape[1],deconv_shape[2]] self.nb_filter = nb_filter self.filter_length = filter_length self.init = initializations.get(init) self.activation = activations.get(activation) assert padding in {'valid', 'same'}, 'border_mode must be in {valid, same}' self.padding = padding # necessary for loading, since a 4 dim. stride will be saved if len(strides) == 3: self.strides = [strides[0], 1, strides[1], strides[2]] else: self.strides = strides self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) # self.W_shape = [1, W_shape[0], W_shape[1], W_shape[2]] # self.b_shape = b_shape self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.constraints = [self.W_constraint, self.b_constraint] self.initial_weights = weights #self.input = K.placeholder(ndim=4) # old keras 0.3.x # Keras 1.0: self.input_spec = [InputSpec(ndim=3)] self.initial_weights = weights self.input_dim = input_dim self.input_length = input_length if self.input_dim: kwargs['input_shape'] = (self.input_length, self.input_dim) super(Convolution1D_Transpose_Arbitrary, self).__init__(**kwargs)
def __init__(self, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): self.supports_masking = True self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias super(Attention_layer, self).__init__(**kwargs)
def __init__(self, nb_filter, nb_row, nb_col, init='glorot_uniform', weights=None, border_mode='same', subsample=(1, 1), dim_ordering='th', W_regularizer=None, activity_regularizer=None, W_constraint=None, **kwargs): if border_mode not in {'valid', 'same'}: raise Exception('Invalid border mode for Deconvolution2D:', border_mode) self.nb_filter = nb_filter self.nb_row = nb_row self.nb_col = nb_col self.init = initializations.get(init, dim_ordering=dim_ordering) assert border_mode in {'same'}, 'border_mode must be same' self.border_mode = border_mode self.subsample = tuple(subsample) assert dim_ordering in {'tf', 'th'}, 'dim_ordering must be in {tf, th}' self.dim_ordering = dim_ordering self.W_regularizer = regularizers.get(W_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.input_spec = [InputSpec(ndim=4)] self.initial_weights = weights super(Deconvolution2D, self).__init__(**kwargs)
def __init__(self, input_dim, EmbedMatrix, input_length=None, init='uniform', W_regularizer=None, activity_regularizer=None, W_constraint=None, mask_zero=False, weights=None, **kwargs): ''' :param input_dim: 字典大小 :param input_length: max_seq_len最大序列长度 :param EmbedMatrix: 已经计算好的 word2vec 矩阵,单词 * 嵌入 ''' self.input_dim = input_dim self.EmbedMatrix = theano.shared(value=EmbedMatrix) self.output_dim = EmbedMatrix.shape[1] self.init = initializations.get(init) self.input_length = input_length self.mask_zero = mask_zero self.W_constraint = constraints.get(W_constraint) self.constraints = [self.W_constraint] self.W_regularizer = regularizers.get(W_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.initial_weights = weights kwargs['input_shape'] = (self.input_dim,) super(LookUpEmbeddingLayer, self).__init__(**kwargs) # 在初始化里input_shape = [None, self.input_dim]
def __init__(self, kernel_size, strides=(1, 1), padding='valid', depth_multiplier=1, data_format=None, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, bias_constraint=None, **kwargs): super(DepthwiseConv2D, self).__init__( filters=None, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, activation=activation, use_bias=use_bias, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, bias_constraint=bias_constraint, **kwargs) self.depth_multiplier = depth_multiplier self.depthwise_initializer = initializers.get(depthwise_initializer) self.depthwise_regularizer = regularizers.get(depthwise_regularizer) self.depthwise_constraint = constraints.get(depthwise_constraint) self.bias_initializer = initializers.get(bias_initializer)
def __init__(self, input_dim, hidden_dim, init='glorot_uniform', weights=None, name=None, W_regularizer=None, bx_regularizer=None, bh_regularizer=None, #activity_regularizer=None, W_constraint=None, bx_constraint=None, bh_constraint=None): super(RBM, self).__init__() self.init = initializations.get(init) self.input_dim = input_dim self.hidden_dim = hidden_dim self.input = K.placeholder(ndim = 2) self.W = self.init((self.input_dim, self.hidden_dim)) self.bx = K.zeros((self.input_dim)) self.bh = K.zeros((self.hidden_dim)) self.params = [self.W, self.bx, self.bh] self.regularizers = [] self.W_regularizer = regularizers.get(W_regularizer) if self.W_regularizer: self.W_regularizer.set_param(self.W) self.regularizers.append(self.W_regularizer) self.bx_regularizer = regularizers.get(bx_regularizer) if self.bx_regularizer: self.bx_regularizer.set_param(self.bx) self.regularizers.append(self.bx_regularizer) self.bh_regularizer = regularizers.get(bh_regularizer) if self.bh_regularizer: self.bh_regularizer.set_param(self.bh) self.regularizers.append(self.bh_regularizer) #self.activity_regularizer = regularizers.get(activity_regularizer) #if self.activity_regularizer: # self.activity_regularizer.set_layer(self) # self.regularizers.append(self.activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.bx_constraint = constraints.get(bx_constraint) self.bh_constraint = constraints.get(bh_constraint) self.constraints = [self.W_constraint, self.bx_constraint, self.bh_constraint] if weights is not None: self.set_weights(weights) if name is not None: self.set_name(name)
def __init__(self, units = 1, output_dim = 1, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, return_sequence = True, return_attention=False, return_probabilities = False, **kwargs): """ Keras Layer that implements an Attention mechanism for temporal data. Supports Masking. Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756] # Input shape 3D tensor with shape: `(samples, steps, features)`. # Output shape 2D tensor with shape: `(samples, features)`. :param kwargs: Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True. The dimensions are inferred based on the output shape of the RNN. Note: The layer has been tested with Keras 1.x Example: # 1 model.add(LSTM(64, return_sequences=True)) model.add(Attention()) # next add a Dense layer (for classification/regression) or whatever... # 2 - Get the attention scores hidden = LSTM(64, return_sequences=True)(words) sentence, word_scores = Attention(return_attention=True)(hidden) """ self.supports_masking = True self.return_attention = return_attention self.init = initializers.get('glorot_uniform') self.attention_dim = units self.output_dim = output_dim self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias super(AttentionDecoder, self).__init__(**kwargs)
def __init__(self, step_dim, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): self.supports_masking = True # self.init = initializations.get('glorot_uniform') self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.step_dim = step_dim self.features_dim = 0 super(Attention, self).__init__(**kwargs)
def __init__(self, input_dim, output_dim, init='glorot_uniform', activation='linear', weights=None, name=None, W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, corruption_level=0.0): super(DAE, self).__init__() self.srng = RandomStreams(seed=np.random.randint(10e6)) self.init = initializations.get(init) self.activation = activations.get(activation) self.input_dim = input_dim self.output_dim = output_dim self.corruption_level = corruption_level self.input = T.matrix() self.W = self.init((self.input_dim, self.output_dim)) self.b = shared_zeros((self.output_dim)) self.bT = shared_zeros((self.input_dim)) self.params = [self.W, self.b, self.bT] self.regularizers = [] self.W_regularizer = regularizers.get(W_regularizer) if self.W_regularizer: self.W_regularizer.set_param(self.W) self.regularizers.append(self.W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) if self.b_regularizer: self.b_regularizer.set_param(self.b) self.regularizers.append(self.b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) if self.activity_regularizer: self.activity_regularizer.set_layer(self) self.regularizers.append(self.activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.constraints = [self.W_constraint, self.b_constraint] if weights is not None: self.set_weights(weights) if name is not None: self.set_name(name)
def __init__(self, a_initializer='ones', k_initializer='ones', n_initializer='ones', z_initializer='zeros', a_regularizer=None, a_constraint=constraints.NonNeg(), k_regularizer=None, k_constraint=constraints.NonNeg(), n_regularizer=None, n_constraint=constraints.NonNeg(), z_regularizer=None, z_constraint=constraints.NonNeg(), shared_axes=None, a_shared=True, k_shared=True, n_shared=True, z_shared=True, z_one=False, **kwargs): super(Hill, self).__init__(**kwargs) self.supports_masking = True self.a_initializer = initializers.get(a_initializer) self.a_regularizer = regularizers.get(a_regularizer) self.a_constraint = constraints.get(a_constraint) self.k_initializer = initializers.get(a_initializer) self.k_regularizer = regularizers.get(a_regularizer) self.k_constraint = constraints.get(a_constraint) self.n_initializer = initializers.get(a_initializer) self.n_regularizer = regularizers.get(a_regularizer) self.n_constraint = constraints.get(a_constraint) self.z_initializer = initializers.get(a_initializer) self.z_regularizer = regularizers.get(a_regularizer) self.z_constraint = constraints.get(a_constraint) self.a_shared = a_shared self.k_shared = k_shared self.n_shared = n_shared self.z_shared = z_shared self.z_one = z_one if shared_axes is None: self.shared_axes = None elif not isinstance(shared_axes, (list, tuple)): self.shared_axes = [shared_axes] else: self.shared_axes = list(shared_axes)
def __init__(self, units, output_dim, activation='tanh', return_probabilities=False, name='AttentionDecoder', kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): """ Implements an AttentionDecoder that takes in a sequence encoded by an encoder and outputs the decoded states :param units: dimension of the hidden state and the attention matrices :param output_dim: the number of labels in the output space references: Bahdanau, Dzmitry, Kyunghyun Cho, and Yoshua Bengio. "Neural machine translation by jointly learning to align and translate." arXiv preprint arXiv:1409.0473 (2014). """ self.units = units self.output_dim = output_dim self.return_probabilities = return_probabilities self.activation = activations.get(activation) self.kernel_initializer = initializers.get(kernel_initializer) self.recurrent_initializer = initializers.get(recurrent_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.recurrent_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.recurrent_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) super(AttentionDecoder, self).__init__(**kwargs) self.name = name self.return_sequences = True # must return sequences
def test_serialization(): all_activations = ['max_norm', 'non_neg', 'unit_norm', 'min_max_norm'] for name in all_activations: fn = constraints.get(name) ref_fn = getattr(constraints, name)() assert fn.__class__ == ref_fn.__class__ config = constraints.serialize(fn) fn = constraints.deserialize(config) assert fn.__class__ == ref_fn.__class__
def __init__(self, nb_filter, nb_row, nb_col, init='glorot_uniform', activation='linear', weights=None, dim_ordering='th', W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, **kwargs): self.nb_filter = nb_filter self.nb_row = nb_row self.nb_col = nb_col self.init = initializations.get(init, dim_ordering=dim_ordering) self.activation = activations.get(activation) self.dim_ordering = dim_ordering self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.input_spec = [InputSpec(ndim=4)] self.initial_weights = weights super(DeConvLayer, self).__init__(**kwargs)
def __init__(self, alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None, **kwargs): super(PReLU, self).__init__(**kwargs) self.supports_masking = True self.alpha_initializer = initializers.get(alpha_initializer) self.alpha_regularizer = regularizers.get(alpha_regularizer) self.alpha_constraint = constraints.get(alpha_constraint) if shared_axes is None: self.shared_axes = None elif not isinstance(shared_axes, (list, tuple)): self.shared_axes = [shared_axes] else: self.shared_axes = list(shared_axes)
def __init__(self, alpha_initializer=Constant(3.0), alpha_regularizer=None, alpha_constraint=MinMaxValue(), **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'), ) super(PLSEU, self).__init__(**kwargs) self.alpha_initializer = initializers.get(alpha_initializer) self.alpha_regularizer = regularizers.get(alpha_regularizer) self.alpha_constraint = constraints.get(alpha_constraint) self.alpha = None self.input_spec = InputSpec(min_ndim=2) self.supports_masking = True
def __init__(self, r=None, kernel_initializer='glorot_uniform', kernel_regularizer=None, kernel_constraint=None, **kwargs): super(CCMProjection, self).__init__(**kwargs) self.radius = r self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) if self.radius == 'spherical': self.kernel_constraint = self.Pos() elif self.radius == 'hyperbolic': self.kernel_constraint = self.Neg() else: self.kernel_constraint = constraints.get(kernel_constraint)
def __init__(self, input_dim, output_dim, embeddings_initializer='uniform', embeddings_regularizer=None, embeddings_constraint=None, **kwargs): super(Embedding2D, self).__init__(**kwargs) self.input_dim = input_dim self.output_dim = output_dim self.embeddings_initializer = initializers.get(embeddings_initializer) self.embeddings_regularizer = regularizers.get(embeddings_regularizer) self.embeddings_constraint = constraints.get(embeddings_constraint)
def __init__( self, gamma_initializer="ones", gamma_regularizer=None, gamma_constraint=None, epsilon=1e-07, **kwargs, ): super(WeightNorm_Conv, self).__init__(**kwargs) if self.rank == 1: self.data_format = "channels_last" self.gamma_initializer = sanitizedInitGet(gamma_initializer) self.gamma_regularizer = regularizers.get(gamma_regularizer) self.gamma_constraint = constraints.get(gamma_constraint) self.epsilon = epsilon self.gamma = None
def __init__(self, kernel_initializer='glorot_uniform', kernel_regularizer=None, kernel_constraint=None, use_bias=True, mid_units=None, alpha=1., keepdims=False, **kwargs): self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.use_bias = use_bias self.mid_units = mid_units self.supports_masking = True super(PairAttention, self).__init__(alpha, keepdims, **kwargs)
def get_config(self): config = { 'x_imputation': self.x_imputation, 'input_decay': serialize_keras_object(self.input_decay), 'hidden_decay': serialize_keras_object(self.hidden_decay), 'use_decay_bias': self.use_decay_bias, 'feed_masking': self.feed_masking, 'masking_decay': serialize_keras_object(self.masking_decay), 'decay_initializer': initializers.get(self.decay_initializer), 'decay_regularizer': regularizers.get(self.decay_regularizer), 'decay_constraint': constraints.get(self.decay_constraint) } base_config = super(GRUD, self).get_config() for c in ['implementation', 'reset_after']: del base_config[c] return dict(list(base_config.items()) + list(config.items()))
def __init__(self, units, s=5., kernel_initializer='glorot_uniform', kernel_regularizer=None, kernel_constraint=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'), ) super(Dense, self).__init__(**kwargs) self.units = units self.s = s self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.kernel_constraint = constraints.get(kernel_constraint)
def __init__(self, theta_initializer=None, theta_regularizer=None, theta_constraint=None, shared_axes=None, **kwargs): super(SoftThresholdingLayer, self).__init__(**kwargs) self.supports_masking = True self.theta_initializer = initializers.get(theta_initializer) self.theta_regularizer = regularizers.get(theta_regularizer) self.theta_constraint = constraints.get(theta_constraint) if shared_axes is None: self.shared_axes = None elif not isinstance(shared_axes, (list, tuple)): self.shared_axes = [shared_axes] else: self.shared_axes = list(shared_axes)
def __init__(self, activation=None, bias_initializer='zeros', bias_regularizer=None, activity_regularizer=None, bias_constraint=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'), ) super(Bias, self).__init__(**kwargs) self.activation = activations.get(activation) self.bias_initializer = initializers.get(bias_initializer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.bias_constraint = constraints.get(bias_constraint) self.input_spec = InputSpec(min_ndim=2) self.supports_masking = True
def __init__(self, ratio, return_mask=False, sigmoid_gating=False, kernel_initializer='glorot_uniform', kernel_regularizer=None, activity_regularizer=None, kernel_constraint=None, **kwargs): super().__init__(**kwargs) self.ratio = ratio # Ratio of nodes to keep in each graph self.return_mask = return_mask self.sigmoid_gating = sigmoid_gating self.gating_op = K.sigmoid if self.sigmoid_gating else K.tanh self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint)
def __init__(self, output_dim, weights=None, regularizer=None, constraint=None, **kwargs): self.output_dim = output_dim self.input_dim = None self.regularizer = regularizers.get(regularizer) self.constraint = constraints.get(constraint) self.initial_weights = weights self.input_spec = None if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(Constant, self).__init__(**kwargs)
def __init__(self, input_units, input_metrix=False, input_square=False, add_square=False, kernel_initializer='uniform', kernel_regularizer=None, kernel_constraint=None, **kwargs): self.input_units = input_units self.input_metrix = input_metrix self.input_square = input_square self.add_square = add_square self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) super(Attention, self).__init__(**kwargs)
def __init__(self, trainable_kernel=False, activation=None, kernel_initializer='glorot_uniform', kernel_regularizer=None, activity_regularizer=None, kernel_constraint=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'), ) super(InnerProduct, self).__init__(**kwargs) self.trainable_kernel = trainable_kernel self.activation = activations.get(activation) self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint)
def __init__(self, units, is_norm=True, kernel_initializer='glorot_uniform', kernel_regularizer=None, kernel_constraint=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'), ) super(NormilizedDense, self).__init__(**kwargs) self.units = units self.is_norm = is_norm self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.input_spec = InputSpec(min_ndim=2) self.supports_masking = True
def __init__( self, input_dim, output_dim, embeddings_initializer="uniform", embeddings_regularizer=None, activity_regularizer=None, embeddings_constraint=None, mask_zero=False, input_length=None, **kwargs, ): if "input_shape" not in kwargs: if input_length: kwargs["input_shape"] = (input_length,) else: kwargs["input_shape"] = (None,) if input_dim <= 0 or output_dim <= 0: raise ValueError( "Both `input_dim` and `output_dim` should be positive, " f"Received input_dim = {input_dim} " f"and output_dim = {output_dim}" ) if ( not base_layer_utils.v2_dtype_behavior_enabled() and "dtype" not in kwargs ): # In TF1, the dtype defaults to the input dtype which is typically # int32, so explicitly set it to floatx kwargs["dtype"] = backend.floatx() # We set autocast to False, as we do not want to cast floating- point # inputs to self.dtype. In call(), we cast to int32, and casting to # self.dtype before casting to int32 might cause the int32 values to be # different due to a loss of precision. kwargs["autocast"] = False super().__init__(**kwargs) self.input_dim = input_dim self.output_dim = output_dim self.embeddings_initializer = initializers.get(embeddings_initializer) self.embeddings_regularizer = regularizers.get(embeddings_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.embeddings_constraint = constraints.get(embeddings_constraint) self.mask_zero = mask_zero self.supports_masking = mask_zero self.input_length = input_length
def __init__(self, units, x_imputation='zero', input_decay='exp_relu', hidden_decay='exp_relu', use_decay_bias=True, feed_masking=True, masking_decay=None, decay_initializer='zeros', decay_regularizer=None, decay_constraint=None, **kwargs): super(GRUDCell, self).__init__(units, **kwargs) assert 'reset_after' not in kwargs or not kwargs['reset_after'], ( 'Only the default GRU reset gate can be used in GRU-D.') assert ( 'implementation' not in kwargs or kwargs['implementation'] == 1), ( 'Only Implementation-1 (larger number of smaller operations) ' 'is supported in GRU-D.') assert x_imputation in _SUPPORTED_IMPUTATION, ( 'x_imputation {} argument is not supported.'.format(x_imputation)) self.x_imputation = x_imputation self.input_decay = get_activation(input_decay) self.hidden_decay = get_activation(hidden_decay) self.use_decay_bias = use_decay_bias assert ( feed_masking or masking_decay is None or masking_decay == 'None'), ( 'Mask needs to be fed into GRU-D to enable the mask_decay.') self.feed_masking = feed_masking if self.feed_masking: self.masking_decay = get_activation(masking_decay) self._masking_dropout_mask = None else: self.masking_decay = None if (self.input_decay is not None or self.hidden_decay is not None or self.masking_decay is not None): self.decay_initializer = initializers.get(decay_initializer) self.decay_regularizer = regularizers.get(decay_regularizer) self.decay_constraint = constraints.get(decay_constraint)
def __init__(self, kernel_size, strides=(1, 1), padding='valid', depth_multiplier=1, data_format=None, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, bias_constraint=None, **kwargs): super(DepthwiseConv2D, self).__init__( filters=None, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, activation=activation, use_bias=use_bias, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, bias_constraint=bias_constraint, **kwargs) self.depth_multiplier = depth_multiplier self.depthwise_initializer = initializers.get(depthwise_initializer) self.depthwise_regularizer = regularizers.get(depthwise_regularizer) self.depthwise_constraint = constraints.get(depthwise_constraint) self.bias_initializer = initializers.get(bias_initializer) self._padding = padding.upper() if K.image_data_format() == 'channels_last': self._strides = (1,) + strides + (1,) else: self._strides = (1, 1,) + strides if self.data_format == 'channels_last': self._data_format = "NHWC" else: self._data_format = "NCHW"
def __init__(self, nb_widths, kernel_length=100, init='uniform', activation='linear', weights=None, padding='same', strides=1, data_format='channels_last', use_bias=True, kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, input_shape=None, **kwargs): if padding.lower() not in {'valid', 'same'}: raise Exception('Invalid border mode for WaveletDeconvolution:', padding) if data_format.lower() not in {'channels_first', 'channels_last'}: raise Exception('Invalid data format for WaveletDeconvolution:', data_format) self.nb_widths = nb_widths self.kernel_length = kernel_length self.init = self.didactic #initializers.get(init, data_format='channels_first') self.activation = activations.get(activation) self.padding = padding self.strides = strides self.subsample = (strides, 1) self.data_format = data_format.lower() self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = Pos() self.bias_constraint = constraints.get(bias_constraint) self.use_bias = use_bias self.initial_weights = weights super(WaveletDeconvolution, self).__init__(**kwargs)
def __init__(self, num_filters, kernel_size, strides=(1, 1), padding='valid', kernel_initializer='glorot_uniform', kernel_constraint=None, **kwargs): super(Erosion2D, self).__init__(**kwargs) self.num_filters = num_filters self.kernel_size = kernel_size self.strides = strides self.padding = padding self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_constraint = constraints.get(kernel_constraint) # for we are assuming channel last self.channel_axis = -1
def __init__(self, nb_complex, filter_delays, kernel_initializer='glorot_uniform', kernel_regularizer=None, kernel_constraint=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'), ) self.nb_complex = nb_complex self.filter_delays = filter_delays self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.input_spec = [InputSpec(ndim=3)] super(SpatioTemporalFilterComplex, self).__init__(**kwargs)
def __init__(self, units, prec_initializer='zeros', prec_regularizer=None, prec_constraint=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'), ) super(GlobalProdRenormDiagNormalConstCovStdPrior2, self).__init__(**kwargs) self.units = units self.prec_initializer = initializers.get(prec_initializer) self.prec_regularizer = regularizers.get(prec_regularizer) self.prec_constraint = constraints.get(prec_constraint) self.input_spec = [InputSpec(ndim=3), InputSpec(min_ndim=2)] self.supports_masking = True
def __init__(self, units, n_hop=5, return_attend_weight=False, initializer='orthogonal', regularizer=None, constraint=None, **kwargs): self.units = units self.n_hop = n_hop self.return_attend_weight = return_attend_weight self.initializer = initializers.get(initializer) self.regularizer = regularizers.get(regularizer) self.constraint = constraints.get(constraint) self.supports_masking = True super(RecurrentAttention, self).__init__(**kwargs)
def __init__(self, units, with_H = False, s = 5., negative_k = 100, num_batch = 100, kernel_initializer='glorot_uniform', kernel_regularizer=None, kernel_constraint=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(Dense, self).__init__(**kwargs) self.units = units self.with_H = with_H self.s = s self.negative_k = negative_k self.num_batch = num_batch self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.kernel_constraint = constraints.get(kernel_constraint)
def __init__(self, filters, groups=1, hysteresis=0.1, H=1.0, bias_initializer='zeros', bias_regularizer=None, bias_constraint=None, useMix=False, **kwargs): super(EednConv2D, self).__init__(filters, **kwargs) self.H = H self.hysteresis = hysteresis self.groups = groups self.bias_initializer = initializers.get(bias_initializer) self.bias_regularizer = regularizers.get(bias_regularizer) self.bias_constraint = constraints.get(bias_constraint) self.useMix = useMix
def __init__( self, operation, initializer=None, weight_shape=None, input_as_operand=False, constraint=None, trainable=True, **kwargs, ): """ # Arguments operation: Operation to perform between input and the weight. It must be one of the allowed operations. Check `Arithmetic.allowed_operations` to see what operations you can use. initializer: Initializer of the weight. Accepts string, instance of Initializer, and numerical values. Set to None to use default initializer that performs identity function. E.g., if the operation is '+' or '-', default initializer will be 'zeros'. If the operation is '*' or '/', default initializer will be 'ones'. weight_shape: Default shape is for a scalar number. Shape will be inferred from initializer if it's numerical values. If weight_shape is set, it will broadcast initializer to have shape = weight_shape. If broadcasting fails, a ValueError will be raised. input_as_operand: Whether to use the input as operand or operator of the operation to the weight. trainable: Whether the weight is variable or fixed. """ super(Arithmetic, self).__init__(trainable=trainable, **kwargs) if not operation or operation not in self.allowed_operations: raise ValueError( f"Operation '{operation}' is not one of the allowed operations: '{self.allowed_operations}'" ) self.operation = operation self.weight_shape = weight_shape if initializer is None: initializer = "ones" if operation in "*/" else "zeros" try: self.initializer = initializers.get(initializer) except ValueError: initializer = tf.constant_initializer(initializer) self.initializer = initializers.get(initializer) self.input_as_operand = input_as_operand self.constraint = constraints.get(constraint) self.trainable = trainable
def __init__(self, nb_kernels, kernel_dim, init='glorot_uniform', weights=None, W_regularizer=None, activity_regularizer=None, W_constraint=None, input_dim=None, **kwargs): self.init = initializers.get(init) self.nb_kernels = nb_kernels self.kernel_dim = kernel_dim self.input_dim = input_dim self.W_regularizer = regularizers.get(W_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.initial_weights = weights self.input_spec = [InputSpec(ndim=2)] if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(MinibatchDiscrimination, self).__init__(**kwargs)
def __init__(self, nb_gaussian, init='normal', weights=None, W_regularizer=None, activity_regularizer=None, W_constraint=None, **kwargs): self.nb_gaussian = nb_gaussian self.init = initializers.get(init) #, dim_ordering='th') self.W_regularizer = regularizers.get(W_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.input_spec = [InputSpec(ndim=4)] self.initial_weights = weights super(LearningPrior, self).__init__(**kwargs)
def __init__(self, ratio, return_mask=False, sigmoid_gating=False, kernel_initializer='glorot_uniform', kernel_regularizer=None, activity_regularizer=None, kernel_constraint=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(TopKPool, self).__init__(**kwargs) self.ratio = ratio # Ratio of nodes to keep in each graph self.return_mask = return_mask self.sigmoid_gating = sigmoid_gating self.gating_op = K.sigmoid if self.sigmoid_gating else K.tanh self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint)
def build(self, input_shape): output_shape = self.layer.get_output_shape_for(input_shape) if output_shape != input_shape: raise Exception('Cannot apply residual to layer "{}": ' 'mismatching input and output shapes' '"{}" and "{}"'.format(self.layer.name, input_shape, output_shape)) if not self.layer.built: self.layer.build(input_shape) self.layer.built = True self.input_spec = [InputSpec(shape=input_shape)] super(Residual, self).build() if not self.p: self.p = self.add_weight((1, ), initializer='uniform', name='{}_p'.format(self.layer.name), regularizer=regularizers.get(None), constraint=constraints.get(None)) self.res = None