Beispiel #1
0
    def __init__(self,
                 output_dim,
                 init='glorot_uniform',
                 inner_init='orthogonal',
                 activation='tanh',
                 inner_activation='hard_sigmoid',
                 W_regularizer=None,
                 U_regularizer=None,
                 b_regularizer=None,
                 dropout_W=0.,
                 dropout_U=0.,
                 **kwargs):
        self.output_dim = output_dim
        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
        self.W_regularizer = regularizers.get(W_regularizer)
        self.U_regularizer = regularizers.get(U_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.dropout_W = dropout_W
        self.dropout_U = dropout_U

        if self.dropout_W or self.dropout_U:
            self.uses_learning_phase = True
        super(MGU, self).__init__(**kwargs)
Beispiel #2
0
 def __init__(self, output_dim,
              init='glorot_uniform', inner_init='orthogonal',
              activation='tanh', inner_activation='hard_sigmoid', **kwargs):
   self.output_dim       = output_dim
   self.init             = initializations.get(init)
   self.inner_init       = initializations.get(inner_init)
   self.activation       = activations.get(activation)
   self.inner_activation = activations.get(inner_activation)
   super(DecoderGRU, self).__init__(**kwargs)
Beispiel #3
0
    def __init__(self,
                 units,
                 activation='tanh',
                 recurrent_activation='hard_sigmoid',
                 attention_activation='tanh',
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 attention_initializer='orthogonal',
                 bias_initializer='zeros',
                 unit_forget_bias=True,
                 kernel_regularizer=None,
                 recurrent_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 attention_regularizer=None,
                 kernel_constraint=None,
                 recurrent_constraint=None,
                 bias_constraint=None,
                 attention_constraint=None,
                 dropout=0.,
                 recurrent_dropout=0.,
                 return_attention=False,
                 implementation=1,
                 **kwargs):
        super(AttentionLSTM, self).__init__(**kwargs)
        self.units = units
        self.activation = activations.get(activation)
        self.recurrent_activation = activations.get(recurrent_activation)
        self.attention_activation = activations.get(attention_activation)
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.attention_initializer = initializers.get(attention_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.unit_forget_bias = unit_forget_bias

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.attention_regularizer = regularizers.get(attention_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.attention_constraint = constraints.get(attention_constraint)

        self.dropout = min(1., max(0., dropout))
        self.recurrent_dropout = min(1., max(0., recurrent_dropout))
        self.return_attention = return_attention
        self.state_spec = [
            InputSpec(shape=(None, self.units)),
            InputSpec(shape=(None, self.units))
        ]
        self.implementation = implementation
    def __init__(self, output_dim, attention_vec, attn_activation='tanh',
                 attn_inner_activation='tanh', single_attn=False,
                 n_attention_dim=None, **kwargs):
        self.attention_vec = attention_vec
        self.attn_activation = activations.get(attn_activation)
        self.attn_inner_activation = activations.get(attn_inner_activation)
        self.single_attention_param = single_attn
        self.n_attention_dim = output_dim if n_attention_dim is None else n_attention_dim

        super(AttentionLSTM, self).__init__(output_dim, **kwargs)
Beispiel #5
0
    def __init__(
            self,
            units,  #units in decoder 
            steps,  # steps for output
            output_dim,  # dimension of output
            atten_units,  #attencion units in dense layer
            gmax,  #sub hidden units maxout
            activation='tanh',
            recurrent_activation='hard_sigmoid',
            kernel_initializer='glorot_uniform',
            recurrent_initializer='orthogonal',
            bias_initializer='zeros',
            kernel_regularizer=None,
            recurrent_regularizer=None,
            bias_regularizer=None,
            kernel_constraint=None,
            recurrent_constraint=None,
            bias_constraint=None,
            dropout=0.,
            recurrent_dropout=0.,
            return_probabilities=False,
            **kwargs):

        self.units = units
        self.steps = steps
        self.output_dim = output_dim
        self.atten_units = atten_units
        self.activation = activations.get(activation)
        self.gmax = gmax
        self.recurrent_activation = activations.get(recurrent_activation)

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.bias_initializer = initializers.get(bias_initializer)

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.dropout = min(1., max(0., dropout))
        self.recurrent_dropout = min(1., max(0., recurrent_dropout))
        self._dropout_mask = None
        self._recurrent_dropout_mask = None

        self.return_probabilities = return_probabilities
        """if self.dropout or self.recurrent_dropout:
            self.uses_learning_phase = True"""
        super(attention_LSTM, self).__init__(**kwargs)
 def __init__(self,output_dim,att_dim,attn_activation='tanh',
              attn_inner_activation='tanh',
              single_attn=False,**kwargs):
     '''
         attention_vec: 输入到这一层的attention向量,根据这个向量来计算这一层的attention输出
         single_attention_param: 每个时间t,的向量中的元素是否使用同一个attention值
     '''
     self.attn_activation=activations.get(attn_activation)
     self.attn_inner_activation=activations.get(attn_inner_activation)
     self.single_attention_param=single_attn
     self.input_spec=None
     self.att_dim=att_dim
     super(AttentionLSTM,self).__init__(output_dim,**kwargs)
    def __init__(self,
                 output_dim,
                 attn_activation='tanh',
                 attn_inner_activation='tanh',
                 single_attn=True,
                 n_attention_dim=None,
                 **kwargs):
        #self.attention_vec = attention_vec
        self.attn_activation = activations.get(attn_activation)
        self.attn_inner_activation = activations.get(attn_inner_activation)
        self.single_attention_param = single_attn
        self.n_attention_dim = output_dim if n_attention_dim is None else n_attention_dim

        super(AttentionLSTM, self).__init__(output_dim, **kwargs)
 def __init__(self, units,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
     if 'input_shape' not in kwargs and 'input_dim' in kwargs:
         kwargs['input_shape'] = (kwargs.pop('input_dim'),)
     super(MultiHead, self).__init__(**kwargs)
     self.units = units
     self.heads = 8
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.input_spec = InputSpec(min_ndim=2)
     self.supports_masking = True
     self.input_spec = [InputSpec(min_ndim=3), InputSpec(min_ndim=3), InputSpec(min_ndim=3)]
Beispiel #9
0
 def __init__(self, filters, kernel_size, rank=1, strides=1, padding='valid', data_format='channels_last',
              dilation_rate=1, activation=None, use_bias=True, kernel_initializer='glorot_uniform',
              bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,
              kernel_constraint=None, bias_constraint=None, type = 1, **kwargs):
     super(Conv1D_linearphaseType, self).__init__(**kwargs)
     self.rank = rank
     self.filters = filters
     self.kernel_size_=kernel_size
     if kernel_size % 2:
         self.kernel_size = conv_utils.normalize_tuple(kernel_size // 2 + 1, rank, 'kernel_size')
     else:
         self.kernel_size = conv_utils.normalize_tuple(kernel_size // 2, rank, 'kernel_size')
     self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
     self.padding = conv_utils.normalize_padding(padding)
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank, 'dilation_rate')
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.input_spec = InputSpec(ndim=self.rank + 2)
     if type > 4:
         raise ValueError('FIR type should be between 1-4')
     else:
         self.type = type
Beispiel #10
0
    def __init__(self,
                 units,
                 bond_classes,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):

        super(EdgeNetwork, self).__init__(**kwargs)
        self.units = units
        self.bond_classes = bond_classes

        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
 def __init__(self, layer, attention_vec, attn_activation='tanh', single_attention_param=False, **kwargs):
     assert isinstance(layer, LSTM) or isinstance(layer, GRU)
     super(AttentionWrapper, self).__init__(layer, **kwargs)
     self.supports_masking = True
     self.attention_vec = attention_vec
     self.attn_activation = activations.get(attn_activation)
     self.single_attention_param = single_attention_param
Beispiel #12
0
    def __init__(self, nb_filter, nb_row, nb_col,
                 init='glorot_uniform', activation=None, weights=None,
                 border_mode='valid', subsample=(1, 1),
                 dim_ordering='default',
                 W_regularizer=None, b_regularizer=None, activity_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 bias=True, **kwargs):
        if dim_ordering == 'default':
            dim_ordering = K.image_dim_ordering()
        if border_mode != 'valid':
            raise ValueError('Invalid border mode for LocallyConnected2D '
                             '(only "valid" is supported):', border_mode)
        self.nb_filter = nb_filter
        self.nb_row = nb_row
        self.nb_col = nb_col
        self.init = initializations.get(init, dim_ordering=dim_ordering)
        self.activation = activations.get(activation)

        self.border_mode = border_mode
        self.subsample = tuple(subsample)
        if dim_ordering not in {'tf', 'th'}:
            raise ValueError('`dim_ordering` must be in {tf, th}.')
        self.dim_ordering = dim_ordering

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.input_spec = [InputSpec(ndim=4)]
        self.initial_weights = weights
        super(LocallyConnected2D, self).__init__(**kwargs)
Beispiel #13
0
    def __init__(self,
                 output_dim,
                 mem_vec_dim,
                 init='glorot_uniform',
                 activation='linear',
                 weights=None,
                 activity_regularizer=None,
                 input_dim=None,
                 **kwargs):
        '''
        Params:
            output_dim: 输出的维度
            mem_vec_dim: query向量的维度
            
        '''
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.output_dim = output_dim
        self.input_dim = input_dim
        self.mem_vector_dim = mem_vec_dim

        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.initial_weights = weights

        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim, )
        super(MemoryNet, self).__init__(**kwargs)
 def __init__(self,
              units,
              map=None,
              nonzero_ind=None,
              kernel_initializer='glorot_uniform',
              W_regularizer=None,
              activation='tanh',
              use_bias=True,
              bias_initializer='zeros',
              bias_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
     self.units = units
     self.activation = activation
     self.map = map
     self.nonzero_ind = nonzero_ind
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.kernel_regularizer = regularizers.get(W_regularizer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activation_fn = activations.get(activation)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     super(SparseTF, self).__init__(**kwargs)
Beispiel #15
0
    def __init__(self, filters=1, kernel_size=80, rank=1, strides=1, padding='valid',
                 data_format='channels_last', dilation_rate=1, activation=None, use_bias=True,
                 fsHz=1000.,
                 fc_initializer=initializers.RandomUniform(minval=10, maxval=400),
                 n_order_initializer=initializers.constant(4.),
                 amp_initializer=initializers.constant(10 ** 5),
                 beta_initializer=initializers.RandomNormal(mean=30, stddev=6),
                 bias_initializer='zeros',
                 **kwargs):
        super(Conv1D_gammatone, self).__init__(**kwargs)
        self.rank = rank
        self.filters = filters
        self.kernel_size_ = kernel_size
        self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size')
        self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
        self.padding = conv_utils.normalize_padding(padding)
        self.data_format = conv_utils.normalize_data_format(data_format)
        self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank, 'dilation_rate')
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.bias_initializer = initializers.get(bias_initializer)
        self.fc_initializer = initializers.get(fc_initializer)
        self.n_order_initializer = initializers.get(n_order_initializer)
        self.amp_initializer = initializers.get(amp_initializer)
        self.beta_initializer = initializers.get(beta_initializer)
        self.input_spec = InputSpec(ndim=self.rank + 2)

        self.fsHz = fsHz
        self.t = tf.range(start=0, limit=kernel_size / float(fsHz),
                          delta=1 / float(fsHz), dtype=K.floatx())
        self.t = tf.expand_dims(input=self.t, axis=-1)
Beispiel #16
0
    def __init__(self, nb_filter, filter_length,
                 init='glorot_uniform', activation=None, weights=None,
                 border_mode='valid', subsample_length=1,
                 W_regularizer=None, b_regularizer=None, activity_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 bias=True, input_dim=None, input_length=None, **kwargs):
        if border_mode != 'valid':
            raise ValueError('Invalid border mode for LocallyConnected1D '
                             '(only "valid" is supported):', border_mode)
        self.nb_filter = nb_filter
        self.filter_length = filter_length
        self.init = initializations.get(init, dim_ordering='th')
        self.activation = activations.get(activation)

        self.border_mode = border_mode
        self.subsample_length = subsample_length

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.input_spec = [InputSpec(ndim=3)]
        self.initial_weights = weights
        self.input_dim = input_dim
        self.input_length = input_length
        if self.input_dim:
            kwargs['input_shape'] = (self.input_length, self.input_dim)
        super(LocallyConnected1D, self).__init__(**kwargs)
Beispiel #17
0
 def __init__(self, layer, attention_vec, attn_activation='tanh', single_attention_param=False, **kwargs):
     assert isinstance(layer, LSTM)
     self.supports_masking = True
     self.attention_vec = attention_vec
     self.attn_activation = activations.get(attn_activation)
     self.single_attention_param = single_attention_param
     super(AttentionLSTMWrapper, self).__init__(layer, **kwargs)
Beispiel #18
0
    def __init__(self, filters, kernel_size=(2, 2), strides=None, padding='valid', data_format=None,              
                 kernel_initializer='uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 activation='linear',
		 with_softmax=True,
                 **kwargs):
        super(KernelBasedPooling, self).__init__(kernel_size, strides, padding,
                                           data_format, **kwargs)
        self.rank = 2
        self.with_softmax = with_softmax
        self.filters = filters
        self.kernel_size = kernel_size
        self.activation = activations.get(activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.input_spec = InputSpec(min_ndim=2)
        self.supports_masking = True
        self.trainable = True
 def __init__(self,
              units,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              W_regularizer=None,
              bias_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
     # self.output_dim = output_dim
     # self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.units = units
     self.activation = activation
     self.activation_fn = activations.get(activation)
     self.use_bias = use_bias
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.W_regularizer = W_regularizer
     self.bias_regularizer = bias_regularizer
     self.kernel_regularizer = regularizers.get(W_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = bias_constraint
     super(Diagonal, self).__init__(**kwargs)
Beispiel #20
0
 def __init__(self, mode="bilinear", attention_embedding_size=None, inner_activation="tanh",
              return_attention_matrix=False, initializer="glorot_uniform", **kwargs):
     self.mode = mode
     self.initializer = initializers.get(initializer)
     self.attention_embedding_size = attention_embedding_size
     self.inner_activation = activations.get(inner_activation)
     self.return_attention_matrix = return_attention_matrix
     super(CrossAttention, self).__init__(**kwargs)
Beispiel #21
0
 def __init__(self,
              output_dim,
              att_dim,
              attn_activation='tanh',
              attn_inner_activation='tanh',
              single_attn=False,
              **kwargs):
     '''
         attention_vec: 输入到这一层的attention向量,根据这个向量来计算这一层的attention输出
         single_attention_param: 每个时间t,的向量中的元素是否使用同一个attention值
     '''
     self.attn_activation = activations.get(attn_activation)
     self.attn_inner_activation = activations.get(attn_inner_activation)
     self.single_attention_param = single_attn
     self.input_spec = None
     self.att_dim = att_dim
     super(AttentionLSTM, self).__init__(output_dim, **kwargs)
Beispiel #22
0
 def __init__(self,
              units=None,
              activation='tanh',
              recurrent_activation='hard_sigmoid',
              use_bias=True,
              kernel_initializer='glorot_uniform',
              recurrent_initializer='orthogonal',
              bias_initializer='zeros',
              kernel_regularizer=None,
              recurrent_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              recurrent_constraint=None,
              bias_constraint=None,
              unit_forget_bias=True,
              dropout=0.,
              recurrent_dropout=0.,
              implementation=1,
              **kwargs):
     if units is None:
         assert 'output_dim' in kwargs, 'Missing argument: units'
     else:
         kwargs['output_dim'] = units
     self.activation = activations.get(activation)
     self.recurrent_activation = activations.get(recurrent_activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.recurrent_initializer = initializers.get(recurrent_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.recurrent_constraint = constraints.get(recurrent_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.unit_forget_bias = unit_forget_bias
     self.recurrent_dropout = min(1., max(0., recurrent_dropout))
     self.dropout = min(1., max(0., dropout))
     self.implementation = implementation
     self._trainable_weights = []
     self._dropout_mask = None
     self._recurrent_dropout_mask = None
     super(ExtendedRNNCell, self).__init__(**kwargs)
Beispiel #23
0
    def __init__(self, units, rank,
                 activation='tanh',
                 recurrent_activation='hard_sigmoid',
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 bias_initializer='zeros',
                 unit_forget_bias=True,
                 kernel_regularizer=None,
                 recurrent_regularizer=None,
                 bias_regularizer=None,
                 kernel_constraint=None,
                 recurrent_constraint=None,
                 bias_constraint=None,
                 dropout=0.,
                 recurrent_dropout=0.,
                 implementation=1,
                 **kwargs):
        super(CLSTMCell, self).__init__(**kwargs)
        self.units = units
        self.rank = rank
        self.activation = activations.get(activation)
        self.recurrent_activation = activations.get(recurrent_activation)
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.unit_forget_bias = unit_forget_bias

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.dropout = min(1., max(0., dropout))
        self.recurrent_dropout = min(1., max(0., recurrent_dropout))
        self.implementation = implementation
        self.state_size = (self.rank, self.units)
        self.output_size = self.rank
        self._dropout_mask = None
        self._recurrent_dropout_mask = None
Beispiel #24
0
 def __init__(self,
              output_dim,
              attention_vec,
              attn_activation='tanh',
              attn_inner_activation='tanh',
              single_attn=False,
              n_attention_dim=None,
              **kwargs):
     '''
         attention_vec: 输入到这一层的attention向量,根据这个向量来计算这一层的attention输出 
         如果attention_vec=None,就不使用attention
     '''
     self.attention_vec = attention_vec
     self.attn_activation = activations.get(attn_activation)
     self.attn_inner_activation = activations.get(attn_inner_activation)
     self.single_attention_param = single_attn
     self.n_attention_dim = output_dim if n_attention_dim is None else n_attention_dim
     super(AttentionLSTM, self).__init__(output_dim, **kwargs)
Beispiel #25
0
 def __init__(self,
              output_dim,
              init='glorot_uniform',
              attn_activation='tanh',
              **kwargs):
     self.output_dim = output_dim
     self.init = initializations.get(init)
     self.attn_activation = activations.get(attn_activation)
     super(AttentionLayer, self).__init__(**kwargs)
Beispiel #26
0
 def __init__(self,
              filters,
              kernel_size,
              strides=1,
              padding='valid',
              data_format=None,
              dilation_rate=1,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              conv_select_activation=None,
              select_weight_init_value=0,
              **kwargs):
     super(ConvBank, self).__init__(**kwargs)
     rank = 2
     self.rank = 2
     self.filters = filters
     self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
                                                   'kernel_size')
     self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
     self.padding = conv_utils.normalize_padding(padding)
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.dilation_rate = conv_utils.normalize_tuple(
         dilation_rate, rank, 'dilation_rate')
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.select_activation = None if conv_select_activation == None else activations.get(
         conv_select_activation)
     self.select_weight_init = initializers.Constant(
         select_weight_init_value)
Beispiel #27
0
 def apply_gates(self, inputs, input_shape, axis):
     gates = K.dot(K.mean(inputs, axis=axis), self.gates_kernel)
     gates = K.bias_add(gates, self.gates_bias, data_format='channels_last')
     gates = activations.get('softmax')(gates)
     inputs_mul_gates = K.reshape(gates, [-1, self.k] + [1] *
                                  (len(input_shape) - 1)) * K.expand_dims(
                                      inputs, axis=1)
     # inputs_mul_gates = K.stack([K.reshape(gates[:, k], [-1] + [1] * (len(input_shape) - 1)) * inputs
     #                             for k in range(self.k)], axis=0)
     return inputs_mul_gates
    def __init__(self, units, attn_activation='tanh', single_attention_param=False,
                 activation='tanh',
                 recurrent_activation='hard_sigmoid',
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 bias_initializer='zeros',
                 unit_forget_bias=True,
                 kernel_regularizer=None,
                 recurrent_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 recurrent_constraint=None,
                 bias_constraint=None,
                 dropout=0.,
                 recurrent_dropout=0.,
                 implementation=1,
                 return_sequences=False,
                 return_state=False,
                 go_backwards=False,
                 stateful=False,
                 unroll=False,
                 **kwargs):

        self.supports_masking = True
        self.attn_activation = activations.get(attn_activation)
        self.single_attention_param = single_attention_param

        cell = AttentionLSTMCell(units,
                        attn_activation=attn_activation, single_attention_param=single_attention_param,
                        activation=activation,
                        recurrent_activation=recurrent_activation,
                        use_bias=use_bias,
                        kernel_initializer=kernel_initializer,
                        recurrent_initializer=recurrent_initializer,
                        unit_forget_bias=unit_forget_bias,
                        bias_initializer=bias_initializer,
                        kernel_regularizer=kernel_regularizer,
                        recurrent_regularizer=recurrent_regularizer,
                        bias_regularizer=bias_regularizer,
                        kernel_constraint=kernel_constraint,
                        recurrent_constraint=recurrent_constraint,
                        bias_constraint=bias_constraint,
                        dropout=dropout,
                        recurrent_dropout=recurrent_dropout,
                        implementation=implementation)
        super(AttentionLSTM, self).__init__(cell,
                                   return_sequences=return_sequences,
                                   return_state=return_state,
                                   go_backwards=go_backwards,
                                   stateful=stateful,
                                   unroll=unroll,
                                   **kwargs)
        self.activity_regularizer = regularizers.get(activity_regularizer)
Beispiel #29
0
    def __init__(
            self,
            units,
            output_dim,
            activation='tanh',
            return_probabilities=False,
            name='AttentionDecoder',
            kernel_initializer='glorot_uniform',
            recurrent_initializer='orthogonal',
            bias_initializer='zeros',
            kernel_regularizer=None,
            bias_regularizer=None,
            activity_regularizer=None,
            kernel_constraint=None,
            bias_constraint=None,
            # return_sequences=True,
            **kwargs):
        """
        self.return_sequences 默认是True
        :param units:
        :param output_dim:
        :param activation:
        :param return_probabilities:
        :param name:
        :param kernel_initializer:   初始权重
        :param recurrent_initializer:
        :param bias_initializer:
        :param kernel_regularizer: 正则化
        :param bias_regularizer:
        :param activity_regularizer:
        :param kernel_constraint:
        :param bias_constraint:
        :param kwargs:
        """
        self.units = units
        self.output_dim = output_dim
        self.return_probabilities = return_probabilities
        self.activation = activations.get(activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.bias_initializer = initializers.get(bias_initializer)

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        # self.return_sequences = return_sequences  # must return sequences

        super(AttentionDecoder, self).__init__(**kwargs)
        self.name = name
Beispiel #30
0
    def __init__(self,
                 output_dim,
                 attention_vec,
                 attn_activation='tanh',
                 single_attention_param=False,
                 **kwargs):
        self.attention_vec = attention_vec
        self.attn_activation = activations.get(attn_activation)
        self.single_attention_param = single_attention_param

        super(AttentionLSTM, self).__init__(output_dim, **kwargs)
Beispiel #31
0
    def __init__(self, units,
                 activation='linear',
                 kernel_initializer='glorot_uniform',
                 **kwargs):

        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'),)
        self.units = units  # k
        self.activation = activations.get(activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        super(BilinearLayer, self).__init__(**kwargs)
Beispiel #32
0
 def __init__(self,
              activation=None,
              use_bias=True,
              bias_initializer='zeros',
              bias_regularizer=None,
              **kwargs):
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.bias_initializer = initializers.get(bias_initializer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     super(OutputLayer, self).__init__(**kwargs)
    def __init__(self,
                 output_dim,
                 attention_vec,
                 attn_activation='tanh',
                 single_attention_param=False,
                 **kwargs):
        self.attention_vec = attention_vec
        self.attn_activation = activations.get(attn_activation)
        self.single_attention_param = single_attention_param

        super(AttentionLSTM, self).__init__(output_dim, **kwargs)
Beispiel #34
0
    def call(self, inputs):
        output1 = K.dot(inputs, self.kernel1)
        output2 = K.dot(inputs, self.kernel2)
        output3 = K.dot(inputs, self.kernel3)
        output4 = K.dot(inputs, self.kernel4)
        output5 = K.dot(inputs, self.kernel5)
        output6 = K.dot(inputs, self.kernel6)

        if self.use_bias:
            output1 = K.bias_add(output1,
                                 self.bias1,
                                 data_format='channels_last')
            output2 = K.bias_add(output2,
                                 self.bias2,
                                 data_format='channels_last')
            output3 = K.bias_add(output3,
                                 self.bias3,
                                 data_format='channels_last')
            output4 = K.bias_add(output4,
                                 self.bias4,
                                 data_format='channels_last')
            output5 = K.bias_add(output5,
                                 self.bias5,
                                 data_format='channels_last')
            output6 = K.bias_add(output6,
                                 self.bias6,
                                 data_format='channels_last')

        self.activation = activations.get('X_1')
        output1 = self.activation(output1)

        self.activation = activations.get('X_2')
        output2 = self.activation(output2)

        self.activation = activations.get('X_3')
        output3 = self.activation(output3)

        self.activation = activations.get('X_4')
        output4 = self.activation(output4)

        self.activation = activations.get('X_5')
        output5 = self.activation(output5)

        self.activation = activations.get('X_6')
        output6 = self.activation(output6)
        output_all = concatenate(
            [output1, output2, output3, output4, output5, output6])

        output_all = K.dot(output_all, self.kernel_all)
        output_all = K.bias_add(output_all,
                                self.bias_all,
                                data_format='channels_last')
        self.activation = activations.get('linear')
        output_all = self.activation(output_all)

        return output_all
Beispiel #35
0
 def __init__(self, dims, **kwargs):
     super(AttentionLayer, self).__init__(**kwargs)
     self.dims = dims
     self.act_tanh = activations.get('tanh')
     self.w = self.add_weight(name='w',
                              shape=(self.dims, self.dims),
                              initializer='glorot_uniform',
                              trainable=True)
     self.b = self.add_weight(name='b',
                              shape=(self.dims, ),
                              initializer='zeros',
                              trainable=True)
Beispiel #36
0
    def __init__(self,
                 units,
                 out_units,
                 hidden_layers=1,
                 dropout_rate=0.0,
                 random_input_order=False,
                 activation='elu',
                 out_activation='sigmoid',
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 out_kernel_initializer='glorot_uniform',
                 out_bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'), )
        super(MaskingDense, self).__init__(**kwargs)

        self.input_sel = None
        self.random_input_order = random_input_order
        self.rate = min(1., max(0., dropout_rate))
        self.kernel_sels = []
        self.units = units
        self.out_units = out_units
        self.hidden_layers = hidden_layers
        self.activation = activations.get(activation)
        self.out_activation = activations.get(activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.out_kernel_initializer = initializers.get(out_kernel_initializer)
        self.out_bias_initializer = initializers.get(out_bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
Beispiel #37
0
    def __init__(self,
                 units=50,
                 normalize=False,
                 init_diag=False,
                 activation='tanh',
                 recurrent_activation='hard_sigmoid',
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 bias_initializer='zeros',
                 **kwargs):

        super(SpatialGRU, self).__init__(**kwargs)
        self.units = units
        self.normalize = normalize
        self.init_diag = init_diag
        self.supports_masking = True
        self.activation = activations.get(activation)
        self.recurrent_activation = activations.get(recurrent_activation)
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
Beispiel #38
0
    def __init__(self,output_dim,mem_vec_dim,init='glorot_uniform', activation='linear', weights=None,
                 activity_regularizer=None,input_dim=None, **kwargs):
        '''
        Params:
            output_dim: 输出的维度
            mem_vec_dim: query向量的维度
            
        '''
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.output_dim = output_dim
        self.input_dim = input_dim
        self.mem_vector_dim=mem_vec_dim
        
        self.activity_regularizer = regularizers.get(activity_regularizer)


        self.initial_weights = weights

        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim,)
        super(MemoryNet,self).__init__(**kwargs)