Пример #1
0
 def __init__(self,
              alpha_initializer='zeros',
              b_initializer='zeros',
              S=1,
              alpha_regularizer=None,
              b_regularizer=None,
              alpha_constraint=None,
              b_constraint=None,
              shared_axes=None,
              **kwargs):
     super(APLUnit, self).__init__(**kwargs)
     self.supports_masking = True
     self.alpha_initializer = initializers.get(alpha_initializer)
     self.alpha_regularizer = regularizers.get(alpha_regularizer)
     self.alpha_constraint = constraints.get(alpha_constraint)
     self.b_initializer = initializers.get(b_initializer)
     self.b_regularizer = regularizers.get(b_regularizer)
     self.b_constraint = constraints.get(b_constraint)
     if shared_axes is None:
         self.shared_axes = None
     elif not isinstance(shared_axes, (list, tuple)):
         self.shared_axes = [shared_axes]
     else:
         self.shared_axes = list(shared_axes)
     self.S = S
     self.alpha_arr = []
     self.b_arr = []
 def __init__(self, units,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
     if 'input_shape' not in kwargs and 'input_dim' in kwargs:
         kwargs['input_shape'] = (kwargs.pop('input_dim'),)
     super(MultiHead, self).__init__(**kwargs)
     self.units = units
     self.heads = 8
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.input_spec = InputSpec(min_ndim=2)
     self.supports_masking = True
     self.input_spec = [InputSpec(min_ndim=3), InputSpec(min_ndim=3), InputSpec(min_ndim=3)]
Пример #3
0
    def __init__(self,
                 units,
                 encoder_ts,
                 encoder_latdim,
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        self.units = units
        self.state_size = units

        #self.x_seq = encoder_H
        self.encoder_ts = encoder_ts  # encoder 's timesteps
        self.encoder_latDim = encoder_latdim  # encoder 's latent dim

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.bias_initializer = initializers.get(bias_initializer)

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        super(AttentionRNNCell, self).__init__(**kwargs)
Пример #4
0
    def __init__(self,
                 units,
                 bond_classes,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):

        super(EdgeNetwork, self).__init__(**kwargs)
        self.units = units
        self.bond_classes = bond_classes

        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
Пример #5
0
    def __init__(self, filters=1, kernel_size=80, rank=1, strides=1, padding='valid',
                 data_format='channels_last', dilation_rate=1, activation=None, use_bias=True,
                 fsHz=1000.,
                 fc_initializer=initializers.RandomUniform(minval=10, maxval=400),
                 n_order_initializer=initializers.constant(4.),
                 amp_initializer=initializers.constant(10 ** 5),
                 beta_initializer=initializers.RandomNormal(mean=30, stddev=6),
                 bias_initializer='zeros',
                 **kwargs):
        super(Conv1D_gammatone, self).__init__(**kwargs)
        self.rank = rank
        self.filters = filters
        self.kernel_size_ = kernel_size
        self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size')
        self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
        self.padding = conv_utils.normalize_padding(padding)
        self.data_format = conv_utils.normalize_data_format(data_format)
        self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank, 'dilation_rate')
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.bias_initializer = initializers.get(bias_initializer)
        self.fc_initializer = initializers.get(fc_initializer)
        self.n_order_initializer = initializers.get(n_order_initializer)
        self.amp_initializer = initializers.get(amp_initializer)
        self.beta_initializer = initializers.get(beta_initializer)
        self.input_spec = InputSpec(ndim=self.rank + 2)

        self.fsHz = fsHz
        self.t = tf.range(start=0, limit=kernel_size / float(fsHz),
                          delta=1 / float(fsHz), dtype=K.floatx())
        self.t = tf.expand_dims(input=self.t, axis=-1)
Пример #6
0
 def __init__(self, weights=None, axis=-1, momentum=0.9, beta_init='he_normal', gamma_init='he_normal', **kwargs):
     self.momentum = momentum
     self.axis = axis
     self.beta_init = initializers.get(beta_init)
     self.gamma_init = initializers.get(gamma_init)
     self.initial_weights = weights
     super(Scale, self).__init__(**kwargs)
Пример #7
0
 def __init__(self, filters, kernel_size, rank=1, strides=1, padding='valid', data_format='channels_last',
              dilation_rate=1, activation=None, use_bias=True, kernel_initializer='glorot_uniform',
              bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,
              kernel_constraint=None, bias_constraint=None, type = 1, **kwargs):
     super(Conv1D_linearphaseType, self).__init__(**kwargs)
     self.rank = rank
     self.filters = filters
     self.kernel_size_=kernel_size
     if kernel_size % 2:
         self.kernel_size = conv_utils.normalize_tuple(kernel_size // 2 + 1, rank, 'kernel_size')
     else:
         self.kernel_size = conv_utils.normalize_tuple(kernel_size // 2, rank, 'kernel_size')
     self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
     self.padding = conv_utils.normalize_padding(padding)
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank, 'dilation_rate')
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.input_spec = InputSpec(ndim=self.rank + 2)
     if type > 4:
         raise ValueError('FIR type should be between 1-4')
     else:
         self.type = type
Пример #8
0
 def __init__(self,
              kernel_size,
              strides=(1, 1),
              padding='valid',
              depth_multiplier=1,
              data_format=None,
              activation=None,
              use_bias=True,
              depthwise_initializer='glorot_uniform',
              bias_initializer='zeros',
              depthwise_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              depthwise_constraint=None,
              bias_constraint=None,
              **kwargs):
     super(DepthwiseConv2D,
           self).__init__(filters=None,
                          kernel_size=kernel_size,
                          strides=strides,
                          padding=padding,
                          data_format=data_format,
                          activation=activation,
                          use_bias=use_bias,
                          bias_regularizer=bias_regularizer,
                          activity_regularizer=activity_regularizer,
                          bias_constraint=bias_constraint,
                          **kwargs)
     self.depth_multiplier = depth_multiplier
     self.depthwise_initializer = initializers.get(depthwise_initializer)
     self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
     self.depthwise_constraint = constraints.get(depthwise_constraint)
     self.bias_initializer = initializers.get(bias_initializer)
Пример #9
0
 def __init__(self,
              units,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              W_regularizer=None,
              bias_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
     # self.output_dim = output_dim
     # self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.units = units
     self.activation = activation
     self.activation_fn = activations.get(activation)
     self.use_bias = use_bias
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.W_regularizer = W_regularizer
     self.bias_regularizer = bias_regularizer
     self.kernel_regularizer = regularizers.get(W_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = bias_constraint
     super(Diagonal, self).__init__(**kwargs)
Пример #10
0
 def __init__(self,
              units,
              map=None,
              nonzero_ind=None,
              kernel_initializer='glorot_uniform',
              W_regularizer=None,
              activation='tanh',
              use_bias=True,
              bias_initializer='zeros',
              bias_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
     self.units = units
     self.activation = activation
     self.map = map
     self.nonzero_ind = nonzero_ind
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.kernel_regularizer = regularizers.get(W_regularizer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activation_fn = activations.get(activation)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     super(SparseTF, self).__init__(**kwargs)
Пример #11
0
 def __init__(self,
              epsilon=1e-3,
              mode=0,
              axis=-1,
              momentum=0.99,
              weights=None,
              beta_init='zero',
              gamma_init='one',
              gamma_regularizer=None,
              beta_regularizer=None,
              center=True,
              scale=True,
              **kwargs):
     super(NewBatchNormalization, self).__init__(**kwargs)
     self.supports_masking = True
     self.beta_init = initializers.get(beta_init)
     self.gamma_init = initializers.get(gamma_init)
     self.epsilon = epsilon
     self.mode = mode
     self.axis = axis
     self.momentum = momentum
     self.center = center
     self.scale = scale
     self.gamma_regularizer = regularizers.get(gamma_regularizer)
     self.beta_regularizer = regularizers.get(beta_regularizer)
     self.initial_weights = weights
     if self.mode == 0:
         self.uses_learning_phase = True
Пример #12
0
 def __init__(self,
              scale_initializer='ones',
              bias_initializer='zeros',
              **kwargs):
     super(LayerNorm, self).__init__(**kwargs)
     self.epsilon = 1e-6
     self.scale_initializer = initializers.get(scale_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
Пример #13
0
    def __init__(self,
                 units,
                 activation='tanh',
                 recurrent_activation='hard_sigmoid',
                 attention_activation='tanh',
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 attention_initializer='orthogonal',
                 bias_initializer='zeros',
                 unit_forget_bias=True,
                 kernel_regularizer=None,
                 recurrent_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 attention_regularizer=None,
                 kernel_constraint=None,
                 recurrent_constraint=None,
                 bias_constraint=None,
                 attention_constraint=None,
                 dropout=0.,
                 recurrent_dropout=0.,
                 return_attention=False,
                 implementation=1,
                 **kwargs):
        super(AttentionLSTM, self).__init__(**kwargs)
        self.units = units
        self.activation = activations.get(activation)
        self.recurrent_activation = activations.get(recurrent_activation)
        self.attention_activation = activations.get(attention_activation)
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.attention_initializer = initializers.get(attention_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.unit_forget_bias = unit_forget_bias

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.attention_regularizer = regularizers.get(attention_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.attention_constraint = constraints.get(attention_constraint)

        self.dropout = min(1., max(0., dropout))
        self.recurrent_dropout = min(1., max(0., recurrent_dropout))
        self.return_attention = return_attention
        self.state_spec = [
            InputSpec(shape=(None, self.units)),
            InputSpec(shape=(None, self.units))
        ]
        self.implementation = implementation
Пример #14
0
 def build(self, input_shape):
     self.W = self.add_weight(shape=(int(input_shape[2]), 1),
                              name='{}_W'.format(self.name),
                              initializer=initializers.get('uniform'))
     if self.use_bias:
         self.b = self.add_weight(shape=(1,),
                                  name='{}_b'.format(self.name),
                                  initializer=initializers.get('zeros'))
     super(SeqWeightedAttention, self).build(input_shape)
Пример #15
0
 def __init__(self,
              kernel_regularizer=None,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              **kwargs):
     if 'input_shape' not in kwargs and 'input_dim' in kwargs:
         kwargs['input_shape'] = (kwargs.pop('input_dim'), )
     super(MoGLayer, self).__init__(**kwargs)
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
Пример #16
0
    def __init__(self,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 **kwargs):
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)

        super(CrossLayer, self).__init__(**kwargs)
Пример #17
0
    def __init__(
            self,
            units,  #units in decoder 
            steps,  # steps for output
            output_dim,  # dimension of output
            atten_units,  #attencion units in dense layer
            gmax,  #sub hidden units maxout
            activation='tanh',
            recurrent_activation='hard_sigmoid',
            kernel_initializer='glorot_uniform',
            recurrent_initializer='orthogonal',
            bias_initializer='zeros',
            kernel_regularizer=None,
            recurrent_regularizer=None,
            bias_regularizer=None,
            kernel_constraint=None,
            recurrent_constraint=None,
            bias_constraint=None,
            dropout=0.,
            recurrent_dropout=0.,
            return_probabilities=False,
            **kwargs):

        self.units = units
        self.steps = steps
        self.output_dim = output_dim
        self.atten_units = atten_units
        self.activation = activations.get(activation)
        self.gmax = gmax
        self.recurrent_activation = activations.get(recurrent_activation)

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.bias_initializer = initializers.get(bias_initializer)

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.dropout = min(1., max(0., dropout))
        self.recurrent_dropout = min(1., max(0., recurrent_dropout))
        self._dropout_mask = None
        self._recurrent_dropout_mask = None

        self.return_probabilities = return_probabilities
        """if self.dropout or self.recurrent_dropout:
            self.uses_learning_phase = True"""
        super(attention_LSTM, self).__init__(**kwargs)
Пример #18
0
    def __init__(self,
                 step_dim,
                 get_alpha=False,
                 return_sequence=False,
                 W_regularizer=None,
                 b_regularizer=None,
                 L_regularizer=None,
                 W_constraint=None,
                 b_constraint=None,
                 L_constraint=None,
                 bias=False,
                 **kwargs):
        self.supports_masking = True
        self.init = initializers.get('glorot_uniform')
        self.l_init = initializers.constant(value=0.5)
        self.return_sequence = return_sequence
        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.L_regularizer = regularizers.get(L_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)
        self.L_constraint = constraints.get(L_constraint)
        self.bias = bias
        self.step_dim = step_dim
        self.get_alpha = get_alpha
        self.features_dim = 0
        super(ISA, self).__init__(**kwargs)
Пример #19
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 mode=MODE_EXPAND,
                 embeddings_initializer='uniform',
                 embeddings_regularizer=None,
                 activity_regularizer=None,
                 embeddings_constraint=None,
                 mask_zero=False,
                 **kwargs):
        """
        :param input_dim: The maximum absolute value of positions.
        :param output_dim: The embedding dimension.
        :param embeddings_initializer:
        :param embeddings_regularizer:
        :param activity_regularizer:
        :param embeddings_constraint:
        :param mask_zero: The index that represents padding. Only works in `append` mode.
        :param kwargs:
        """
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.mode = mode
        self.embeddings_initializer = initializers.get(embeddings_initializer)
        self.embeddings_regularizer = regularizers.get(embeddings_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.embeddings_constraint = constraints.get(embeddings_constraint)
        self.mask_zero = mask_zero
        self.supports_masking = mask_zero is not False

        self.embeddings = None
        super(PositionEmbedding, self).__init__(**kwargs)
Пример #20
0
    def build(self, input_shape):
        super(DenseQ, self).build(input_shape)

        kernel_type = tf.float32
        kernel_init = initializers.get('zeros')
        self.kernel_q = self.add_weight(shape=self.kernel.get_shape().as_list(),
                                        dtype=kernel_type,
                                        initializer=kernel_init,
                                        name='kernel_q')

        bias_type = tf.float32
        bias_init = initializers.get('zeros')
        self.bias_q = self.add_weight(shape=self.bias.get_shape().as_list(),
                                      dtype=bias_type,
                                      initializer=bias_init,
                                      name='bias_q')
Пример #21
0
    def __init__(
            self,
            F_,
            attn_heads=1,
            attn_heads_reduction='concat',  # ['concat', 'average']
            attn_dropout=0.5,
            activation='relu',
            kernel_initializer='glorot_uniform',
            attn_kernel_initializer='glorot_uniform',
            kernel_regularizer=None,
            attn_kernel_regularizer=None,
            activity_regularizer=None,
            kernel_constraint=None,
            att_kernel_constraint=None,
            **kwargs):
        assert attn_heads_reduction in ['concat', 'average'], \
            'Possbile reduction methods: \'concat\', \'average\''

        self.F_ = F_  # Number of output features (F' in the paper)
        self.attn_heads = attn_heads  # Number of attention heads (K in the paper)
        self.attn_heads_reduction = attn_heads_reduction  # 'concat' or 'average' (Eq 5 and 6 in the paper)
        self.attn_dropout = attn_dropout  # Internal dropout rate for attention coefficients
        self.activation = activations.get(
            activation)  # Optional nonlinearity (Eq 4 in the paper)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.attn_kernel_initializer = initializers.get(
            attn_kernel_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.attn_kernel_regularizer = regularizers.get(
            attn_kernel_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.attn_kernel_constraint = constraints.get(att_kernel_constraint)
        self.supports_masking = False

        # Populated by build()
        self.kernels = []  # Layer kernels for attention heads
        self.attn_kernels = []  # Attention kernels for attention heads

        if attn_heads_reduction == 'concat':
            # Output will have shape (..., K * F')
            self.output_dim = self.F_ * self.attn_heads
        else:
            # Output will have shape (..., F')
            self.output_dim = self.F_

        super(GraphAttention, self).__init__(**kwargs)
    def __init__(self,
                 units,
                 cnn_encoder_k=None,
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        """
        :param units: decoder hidden_size, d in WTL 在WTL中lstm的hidden_size和Vi的维度d是相等
        :param cnn_encoder_k: cnn_encoder中导出来的特征向量个数k
        :param kernel_initializer:
        :param recurrent_initializer:
        :param bias_initializer:
        :param kernel_regularizer:
        :param bias_regularizer:
        :param activity_regularizer:
        :param kernel_constraint:
        :param bias_constraint:
        :param kwargs:
        """
        self.units = units
        self.state_size = (units, units)

        self.cnn_encoder_k = cnn_encoder_k

        if self.cnn_encoder_k is None:
            raise ValueError('''self.cnn_encoder_k can not be NoneType''')

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.bias_initializer = initializers.get(bias_initializer)

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.kernel_contraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        super(AdaptiveAttentionLSTMCell, self).__init__(**kwargs)
Пример #23
0
 def __init__(self,
              trainable_beta=False,
              beta_initializer='ones',
              **kwargs):
     super(SwishBeta, self).__init__(**kwargs)
     self.supports_masking = True
     self.trainable_beta = trainable_beta
     self.beta_initializer = initializers.get(beta_initializer)
Пример #24
0
 def __init__(self, mode="bilinear", attention_embedding_size=None, inner_activation="tanh",
              return_attention_matrix=False, initializer="glorot_uniform", **kwargs):
     self.mode = mode
     self.initializer = initializers.get(initializer)
     self.attention_embedding_size = attention_embedding_size
     self.inner_activation = activations.get(inner_activation)
     self.return_attention_matrix = return_attention_matrix
     super(CrossAttention, self).__init__(**kwargs)
Пример #25
0
    def __init__(self, units, rank,
                 activation='tanh',
                 recurrent_activation='hard_sigmoid',
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 bias_initializer='zeros',
                 unit_forget_bias=True,
                 kernel_regularizer=None,
                 recurrent_regularizer=None,
                 bias_regularizer=None,
                 kernel_constraint=None,
                 recurrent_constraint=None,
                 bias_constraint=None,
                 dropout=0.,
                 recurrent_dropout=0.,
                 implementation=1,
                 **kwargs):
        super(CLSTMCell, self).__init__(**kwargs)
        self.units = units
        self.rank = rank
        self.activation = activations.get(activation)
        self.recurrent_activation = activations.get(recurrent_activation)
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.unit_forget_bias = unit_forget_bias

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.dropout = min(1., max(0., dropout))
        self.recurrent_dropout = min(1., max(0., recurrent_dropout))
        self.implementation = implementation
        self.state_size = (self.rank, self.units)
        self.output_size = self.rank
        self._dropout_mask = None
        self._recurrent_dropout_mask = None
Пример #26
0
    def __init__(self,
                 output_dim,
                 support,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 **kwargs):
        self.output_dim = output_dim
        self.support = list(support)
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernels = []
        self.bias = None

        super(GraphConvolution, self).__init__(**kwargs)
    def __init__(self,
                 init='glorot_uniform',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        self.supports_masking = True
        self.init = initializers.get(init)
        self.kernel_initializer = initializers.get('glorot_uniform')

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        super(AttentionWithContext, self).__init__(**kwargs)
Пример #28
0
 def __init__(self,
              embed_size,
              embeddings_initializer='uniform',
              embeddings_regularizer=None,
              **kwargs):
     self.embed_size = embed_size
     self.embeddings_initializer = initializers.get(embeddings_initializer)
     self.embeddings_regularizer = regularizers.get(embeddings_regularizer)
     super(DenseEmbedding, self).__init__(**kwargs)
Пример #29
0
    def __init__(self,
                 feature_map_num,
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 **kwargs):
        self.feature_map_num = feature_map_num
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)

        super(CompressInteractLayer, self).__init__(**kwargs)
Пример #30
0
 def __init__(self,
              filters,
              kernel_size,
              strides=1,
              padding='valid',
              data_format=None,
              dilation_rate=1,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              conv_select_activation=None,
              select_weight_init_value=0,
              **kwargs):
     super(ConvBank, self).__init__(**kwargs)
     rank = 2
     self.rank = 2
     self.filters = filters
     self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
                                                   'kernel_size')
     self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
     self.padding = conv_utils.normalize_padding(padding)
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.dilation_rate = conv_utils.normalize_tuple(
         dilation_rate, rank, 'dilation_rate')
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.select_activation = None if conv_select_activation == None else activations.get(
         conv_select_activation)
     self.select_weight_init = initializers.Constant(
         select_weight_init_value)
Пример #31
0
    def __init__(self,
                 units=50,
                 normalize=False,
                 init_diag=False,
                 activation='tanh',
                 recurrent_activation='hard_sigmoid',
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 bias_initializer='zeros',
                 **kwargs):

        super(SpatialGRU, self).__init__(**kwargs)
        self.units = units
        self.normalize = normalize
        self.init_diag = init_diag
        self.supports_masking = True
        self.activation = activations.get(activation)
        self.recurrent_activation = activations.get(recurrent_activation)
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.bias_initializer = initializers.get(bias_initializer)