示例#1
0
    def __init__(self, filters, kernel_size=(2, 2), strides=None, padding='valid', data_format=None,              
                 kernel_initializer='uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 activation='linear',
		 with_softmax=True,
                 **kwargs):
        super(KernelBasedPooling, self).__init__(kernel_size, strides, padding,
                                           data_format, **kwargs)
        self.rank = 2
        self.with_softmax = with_softmax
        self.filters = filters
        self.kernel_size = kernel_size
        self.activation = activations.get(activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.input_spec = InputSpec(min_ndim=2)
        self.supports_masking = True
        self.trainable = True
示例#2
0
 def __init__(self,
              alpha_initializer='zeros',
              b_initializer='zeros',
              S=1,
              alpha_regularizer=None,
              b_regularizer=None,
              alpha_constraint=None,
              b_constraint=None,
              shared_axes=None,
              **kwargs):
     super(APLUnit, self).__init__(**kwargs)
     self.supports_masking = True
     self.alpha_initializer = initializers.get(alpha_initializer)
     self.alpha_regularizer = regularizers.get(alpha_regularizer)
     self.alpha_constraint = constraints.get(alpha_constraint)
     self.b_initializer = initializers.get(b_initializer)
     self.b_regularizer = regularizers.get(b_regularizer)
     self.b_constraint = constraints.get(b_constraint)
     if shared_axes is None:
         self.shared_axes = None
     elif not isinstance(shared_axes, (list, tuple)):
         self.shared_axes = [shared_axes]
     else:
         self.shared_axes = list(shared_axes)
     self.S = S
     self.alpha_arr = []
     self.b_arr = []
示例#3
0
    def __init__(self,
                 units,
                 encoder_ts,
                 encoder_latdim,
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        self.units = units
        self.state_size = units

        #self.x_seq = encoder_H
        self.encoder_ts = encoder_ts  # encoder 's timesteps
        self.encoder_latDim = encoder_latdim  # encoder 's latent dim

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.bias_initializer = initializers.get(bias_initializer)

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        super(AttentionRNNCell, self).__init__(**kwargs)
 def __init__(self, units,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
     if 'input_shape' not in kwargs and 'input_dim' in kwargs:
         kwargs['input_shape'] = (kwargs.pop('input_dim'),)
     super(MultiHead, self).__init__(**kwargs)
     self.units = units
     self.heads = 8
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.input_spec = InputSpec(min_ndim=2)
     self.supports_masking = True
     self.input_spec = [InputSpec(min_ndim=3), InputSpec(min_ndim=3), InputSpec(min_ndim=3)]
示例#5
0
 def __init__(self, filters, kernel_size, rank=1, strides=1, padding='valid', data_format='channels_last',
              dilation_rate=1, activation=None, use_bias=True, kernel_initializer='glorot_uniform',
              bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,
              kernel_constraint=None, bias_constraint=None, type = 1, **kwargs):
     super(Conv1D_linearphaseType, self).__init__(**kwargs)
     self.rank = rank
     self.filters = filters
     self.kernel_size_=kernel_size
     if kernel_size % 2:
         self.kernel_size = conv_utils.normalize_tuple(kernel_size // 2 + 1, rank, 'kernel_size')
     else:
         self.kernel_size = conv_utils.normalize_tuple(kernel_size // 2, rank, 'kernel_size')
     self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
     self.padding = conv_utils.normalize_padding(padding)
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank, 'dilation_rate')
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.input_spec = InputSpec(ndim=self.rank + 2)
     if type > 4:
         raise ValueError('FIR type should be between 1-4')
     else:
         self.type = type
示例#6
0
文件: local.py 项目: bjerva/keras
    def __init__(self, nb_filter, filter_length,
                 init='glorot_uniform', activation=None, weights=None,
                 border_mode='valid', subsample_length=1,
                 W_regularizer=None, b_regularizer=None, activity_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 bias=True, input_dim=None, input_length=None, **kwargs):
        if border_mode != 'valid':
            raise ValueError('Invalid border mode for LocallyConnected1D '
                             '(only "valid" is supported):', border_mode)
        self.nb_filter = nb_filter
        self.filter_length = filter_length
        self.init = initializations.get(init, dim_ordering='th')
        self.activation = activations.get(activation)

        self.border_mode = border_mode
        self.subsample_length = subsample_length

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.input_spec = [InputSpec(ndim=3)]
        self.initial_weights = weights
        self.input_dim = input_dim
        self.input_length = input_length
        if self.input_dim:
            kwargs['input_shape'] = (self.input_length, self.input_dim)
        super(LocallyConnected1D, self).__init__(**kwargs)
示例#7
0
    def __init__(self,
                 step_dim,
                 get_alpha=False,
                 return_sequence=False,
                 W_regularizer=None,
                 b_regularizer=None,
                 L_regularizer=None,
                 W_constraint=None,
                 b_constraint=None,
                 L_constraint=None,
                 bias=False,
                 **kwargs):
        self.supports_masking = True
        self.init = initializers.get('glorot_uniform')
        self.l_init = initializers.constant(value=0.5)
        self.return_sequence = return_sequence
        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.L_regularizer = regularizers.get(L_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)
        self.L_constraint = constraints.get(L_constraint)
        self.bias = bias
        self.step_dim = step_dim
        self.get_alpha = get_alpha
        self.features_dim = 0
        super(ISA, self).__init__(**kwargs)
示例#8
0
    def __init__(self,
                 units,
                 bond_classes,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):

        super(EdgeNetwork, self).__init__(**kwargs)
        self.units = units
        self.bond_classes = bond_classes

        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
示例#9
0
文件: local.py 项目: bjerva/keras
    def __init__(self, nb_filter, nb_row, nb_col,
                 init='glorot_uniform', activation=None, weights=None,
                 border_mode='valid', subsample=(1, 1),
                 dim_ordering='default',
                 W_regularizer=None, b_regularizer=None, activity_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 bias=True, **kwargs):
        if dim_ordering == 'default':
            dim_ordering = K.image_dim_ordering()
        if border_mode != 'valid':
            raise ValueError('Invalid border mode for LocallyConnected2D '
                             '(only "valid" is supported):', border_mode)
        self.nb_filter = nb_filter
        self.nb_row = nb_row
        self.nb_col = nb_col
        self.init = initializations.get(init, dim_ordering=dim_ordering)
        self.activation = activations.get(activation)

        self.border_mode = border_mode
        self.subsample = tuple(subsample)
        if dim_ordering not in {'tf', 'th'}:
            raise ValueError('`dim_ordering` must be in {tf, th}.')
        self.dim_ordering = dim_ordering

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.input_spec = [InputSpec(ndim=4)]
        self.initial_weights = weights
        super(LocallyConnected2D, self).__init__(**kwargs)
 def __init__(self,
              units,
              map=None,
              nonzero_ind=None,
              kernel_initializer='glorot_uniform',
              W_regularizer=None,
              activation='tanh',
              use_bias=True,
              bias_initializer='zeros',
              bias_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
     self.units = units
     self.activation = activation
     self.map = map
     self.nonzero_ind = nonzero_ind
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.kernel_regularizer = regularizers.get(W_regularizer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activation_fn = activations.get(activation)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     super(SparseTF, self).__init__(**kwargs)
示例#11
0
    def __init__(self,
                 units,
                 activation='tanh',
                 recurrent_activation='hard_sigmoid',
                 attention_activation='tanh',
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 attention_initializer='orthogonal',
                 bias_initializer='zeros',
                 unit_forget_bias=True,
                 kernel_regularizer=None,
                 recurrent_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 attention_regularizer=None,
                 kernel_constraint=None,
                 recurrent_constraint=None,
                 bias_constraint=None,
                 attention_constraint=None,
                 dropout=0.,
                 recurrent_dropout=0.,
                 return_attention=False,
                 implementation=1,
                 **kwargs):
        super(AttentionLSTM, self).__init__(**kwargs)
        self.units = units
        self.activation = activations.get(activation)
        self.recurrent_activation = activations.get(recurrent_activation)
        self.attention_activation = activations.get(attention_activation)
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.attention_initializer = initializers.get(attention_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.unit_forget_bias = unit_forget_bias

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.attention_regularizer = regularizers.get(attention_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.attention_constraint = constraints.get(attention_constraint)

        self.dropout = min(1., max(0., dropout))
        self.recurrent_dropout = min(1., max(0., recurrent_dropout))
        self.return_attention = return_attention
        self.state_spec = [
            InputSpec(shape=(None, self.units)),
            InputSpec(shape=(None, self.units))
        ]
        self.implementation = implementation
示例#12
0
    def __init__(
            self,
            units,  #units in decoder 
            steps,  # steps for output
            output_dim,  # dimension of output
            atten_units,  #attencion units in dense layer
            gmax,  #sub hidden units maxout
            activation='tanh',
            recurrent_activation='hard_sigmoid',
            kernel_initializer='glorot_uniform',
            recurrent_initializer='orthogonal',
            bias_initializer='zeros',
            kernel_regularizer=None,
            recurrent_regularizer=None,
            bias_regularizer=None,
            kernel_constraint=None,
            recurrent_constraint=None,
            bias_constraint=None,
            dropout=0.,
            recurrent_dropout=0.,
            return_probabilities=False,
            **kwargs):

        self.units = units
        self.steps = steps
        self.output_dim = output_dim
        self.atten_units = atten_units
        self.activation = activations.get(activation)
        self.gmax = gmax
        self.recurrent_activation = activations.get(recurrent_activation)

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.bias_initializer = initializers.get(bias_initializer)

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.dropout = min(1., max(0., dropout))
        self.recurrent_dropout = min(1., max(0., recurrent_dropout))
        self._dropout_mask = None
        self._recurrent_dropout_mask = None

        self.return_probabilities = return_probabilities
        """if self.dropout or self.recurrent_dropout:
            self.uses_learning_phase = True"""
        super(attention_LSTM, self).__init__(**kwargs)
示例#13
0
    def __init__(self,
                 nb_filter,
                 shared_pool,
                 nb_row=1,
                 nb_col=1,
                 init='glorot_uniform',
                 activation='linear',
                 weights=None,
                 border_mode='valid',
                 subsample=(1, 1),
                 dim_ordering=K.image_dim_ordering(),
                 W_regularizer=None,
                 b_regularizer=None,
                 activity_regularizer=None,
                 W_constraint=None,
                 b_constraint=None,
                 bias=True,
                 **kwargs):
        if border_mode != 'valid':
            raise Exception(
                'Invalid border mode for Convolution2D '
                '(only "valid" is supported):', border_mode)
        if tuple(subsample) != (
                nb_row, nb_col
        ):  #model.to_json saves subsample as list and not as tuple
            raise Exception(
                'Local layer only works with equal filter dimensions and strides'
            )
        self.nb_filter = nb_filter
        self.shared_pool = shared_pool
        self.nb_row = nb_row
        self.nb_col = nb_col
        self.init = initializations.get(init, dim_ordering=dim_ordering)
        self.activation = activations.get(activation)

        self.border_mode = border_mode
        self.subsample = tuple(subsample)
        assert dim_ordering in {'tf', 'th'}, 'dim_ordering must be in {tf, th}'
        self.dim_ordering = dim_ordering

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.input_spec = [InputSpec(ndim=4)]
        self.initial_weights = weights
        super(SemiShared, self).__init__(**kwargs)
示例#14
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 mode=MODE_EXPAND,
                 embeddings_initializer='uniform',
                 embeddings_regularizer=None,
                 activity_regularizer=None,
                 embeddings_constraint=None,
                 mask_zero=False,
                 **kwargs):
        """
        :param input_dim: The maximum absolute value of positions.
        :param output_dim: The embedding dimension.
        :param embeddings_initializer:
        :param embeddings_regularizer:
        :param activity_regularizer:
        :param embeddings_constraint:
        :param mask_zero: The index that represents padding. Only works in `append` mode.
        :param kwargs:
        """
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.mode = mode
        self.embeddings_initializer = initializers.get(embeddings_initializer)
        self.embeddings_regularizer = regularizers.get(embeddings_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.embeddings_constraint = constraints.get(embeddings_constraint)
        self.mask_zero = mask_zero
        self.supports_masking = mask_zero is not False

        self.embeddings = None
        super(PositionEmbedding, self).__init__(**kwargs)
示例#15
0
 def __init__(self,
              kernel_size,
              strides=(1, 1),
              padding='valid',
              data_format=None,
              depth_multiplier=1,
              activation=None,
              use_bias=True,
              depthwise_initializer='glorot_uniform',
              depthwise_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              depthwise_constraint=None,
              bias_constraint=None,
              **kwargs):
     super(DepthConv2D,
           self).__init__(filters=None,
                          kernel_size=kernel_size,
                          strides=strides,
                          padding=padding,
                          data_format=data_format,
                          activation=activation,
                          use_bias=use_bias,
                          bias_regularizer=bias_regularizer,
                          activity_regularizer=activity_regularizer,
                          bias_constraint=bias_constraint,
                          **kwargs)
     self.depth_multiplier = depth_multiplier
     self.depthwise_initializer = initializers.get(depthwise_initializer)
     self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
     self.depthwise_constraint = constraints.get(depthwise_constraint)
 def __init__(self,
              units,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              W_regularizer=None,
              bias_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
     # self.output_dim = output_dim
     # self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.units = units
     self.activation = activation
     self.activation_fn = activations.get(activation)
     self.use_bias = use_bias
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.W_regularizer = W_regularizer
     self.bias_regularizer = bias_regularizer
     self.kernel_regularizer = regularizers.get(W_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = bias_constraint
     super(Diagonal, self).__init__(**kwargs)
    def __init__(self,
                 units,
                 cnn_encoder_k=None,
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        """
        :param units: decoder hidden_size, d in WTL 在WTL中lstm的hidden_size和Vi的维度d是相等
        :param cnn_encoder_k: cnn_encoder中导出来的特征向量个数k
        :param kernel_initializer:
        :param recurrent_initializer:
        :param bias_initializer:
        :param kernel_regularizer:
        :param bias_regularizer:
        :param activity_regularizer:
        :param kernel_constraint:
        :param bias_constraint:
        :param kwargs:
        """
        self.units = units
        self.state_size = (units, units)

        self.cnn_encoder_k = cnn_encoder_k

        if self.cnn_encoder_k is None:
            raise ValueError('''self.cnn_encoder_k can not be NoneType''')

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.bias_initializer = initializers.get(bias_initializer)

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.kernel_contraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        super(AdaptiveAttentionLSTMCell, self).__init__(**kwargs)
示例#18
0
    def __init__(
            self,
            F_,
            attn_heads=1,
            attn_heads_reduction='concat',  # ['concat', 'average']
            attn_dropout=0.5,
            activation='relu',
            kernel_initializer='glorot_uniform',
            attn_kernel_initializer='glorot_uniform',
            kernel_regularizer=None,
            attn_kernel_regularizer=None,
            activity_regularizer=None,
            kernel_constraint=None,
            att_kernel_constraint=None,
            **kwargs):
        assert attn_heads_reduction in ['concat', 'average'], \
            'Possbile reduction methods: \'concat\', \'average\''

        self.F_ = F_  # Number of output features (F' in the paper)
        self.attn_heads = attn_heads  # Number of attention heads (K in the paper)
        self.attn_heads_reduction = attn_heads_reduction  # 'concat' or 'average' (Eq 5 and 6 in the paper)
        self.attn_dropout = attn_dropout  # Internal dropout rate for attention coefficients
        self.activation = activations.get(
            activation)  # Optional nonlinearity (Eq 4 in the paper)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.attn_kernel_initializer = initializers.get(
            attn_kernel_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.attn_kernel_regularizer = regularizers.get(
            attn_kernel_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.attn_kernel_constraint = constraints.get(att_kernel_constraint)
        self.supports_masking = False

        # Populated by build()
        self.kernels = []  # Layer kernels for attention heads
        self.attn_kernels = []  # Attention kernels for attention heads

        if attn_heads_reduction == 'concat':
            # Output will have shape (..., K * F')
            self.output_dim = self.F_ * self.attn_heads
        else:
            # Output will have shape (..., F')
            self.output_dim = self.F_

        super(GraphAttention, self).__init__(**kwargs)
示例#19
0
    def __init__(self,
                 nb_filter,
                 nb_row,
                 nb_col,
                 init='glorot_uniform',
                 activation=None,
                 weights=None,
                 border_mode='valid',
                 subsample=(1, 1),
                 dim_ordering='default',
                 W_regularizer=None,
                 b_regularizer=None,
                 activity_regularizer=None,
                 W_constraint=None,
                 b_constraint=None,
                 bias=True,
                 **kwargs):
        if dim_ordering == 'default':
            dim_ordering = K.image_dim_ordering()
        if border_mode != 'valid':
            raise ValueError(
                'Invalid border mode for LocallyConnected2D '
                '(only "valid" is supported):', border_mode)
        self.nb_filter = nb_filter
        self.nb_row = nb_row
        self.nb_col = nb_col
        self.init = initializations.get(init, dim_ordering=dim_ordering)
        self.activation = activations.get(activation)

        self.border_mode = border_mode
        self.subsample = tuple(subsample)
        if dim_ordering not in {'tf', 'th'}:
            raise ValueError('`dim_ordering` must be in {tf, th}.')
        self.dim_ordering = dim_ordering

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.input_spec = [InputSpec(ndim=4)]
        self.initial_weights = weights
        super(LocallyConnected2D, self).__init__(**kwargs)
示例#20
0
    def __init__(self,step_dim,W_regularizer=None, b_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 bias=True, **kwargs):
        self.supports_masking = True
        self.init = initializers.get('glorot_uniform')

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.step_dim = step_dim
        self.features_dim = 0

        super(AttentionLayer,self).__init__(**kwargs)
示例#21
0
 def __init__(self,
              units=None,
              activation='tanh',
              recurrent_activation='hard_sigmoid',
              use_bias=True,
              kernel_initializer='glorot_uniform',
              recurrent_initializer='orthogonal',
              bias_initializer='zeros',
              kernel_regularizer=None,
              recurrent_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              recurrent_constraint=None,
              bias_constraint=None,
              unit_forget_bias=True,
              dropout=0.,
              recurrent_dropout=0.,
              implementation=1,
              **kwargs):
     if units is None:
         assert 'output_dim' in kwargs, 'Missing argument: units'
     else:
         kwargs['output_dim'] = units
     self.activation = activations.get(activation)
     self.recurrent_activation = activations.get(recurrent_activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.recurrent_initializer = initializers.get(recurrent_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.recurrent_constraint = constraints.get(recurrent_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.unit_forget_bias = unit_forget_bias
     self.recurrent_dropout = min(1., max(0., recurrent_dropout))
     self.dropout = min(1., max(0., dropout))
     self.implementation = implementation
     self._trainable_weights = []
     self._dropout_mask = None
     self._recurrent_dropout_mask = None
     super(ExtendedRNNCell, self).__init__(**kwargs)
示例#22
0
    def __init__(self, units, rank,
                 activation='tanh',
                 recurrent_activation='hard_sigmoid',
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 bias_initializer='zeros',
                 unit_forget_bias=True,
                 kernel_regularizer=None,
                 recurrent_regularizer=None,
                 bias_regularizer=None,
                 kernel_constraint=None,
                 recurrent_constraint=None,
                 bias_constraint=None,
                 dropout=0.,
                 recurrent_dropout=0.,
                 implementation=1,
                 **kwargs):
        super(CLSTMCell, self).__init__(**kwargs)
        self.units = units
        self.rank = rank
        self.activation = activations.get(activation)
        self.recurrent_activation = activations.get(recurrent_activation)
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.unit_forget_bias = unit_forget_bias

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.dropout = min(1., max(0., dropout))
        self.recurrent_dropout = min(1., max(0., recurrent_dropout))
        self.implementation = implementation
        self.state_size = (self.rank, self.units)
        self.output_size = self.rank
        self._dropout_mask = None
        self._recurrent_dropout_mask = None
示例#23
0
    def __init__(self, step_dim,get_alpha=False,
                 W_regularizer=None, b_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 bias=True, **kwargs):
        self.supports_masking = True
        self.init = initializers.get('glorot_uniform')

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.step_dim = step_dim
        self.features_dim = 0
        self.get_alpha=get_alpha
        super(mm_IAN, self).__init__(**kwargs)
    def __init__(self,
                 init='glorot_uniform',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        self.supports_masking = True
        self.init = initializers.get(init)
        self.kernel_initializer = initializers.get('glorot_uniform')

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        super(AttentionWithContext, self).__init__(**kwargs)
示例#25
0
文件: local.py 项目: yaojun01/keras
    def __init__(self,
                 nb_filter,
                 filter_length,
                 init='glorot_uniform',
                 activation=None,
                 weights=None,
                 border_mode='valid',
                 subsample_length=1,
                 W_regularizer=None,
                 b_regularizer=None,
                 activity_regularizer=None,
                 W_constraint=None,
                 b_constraint=None,
                 bias=True,
                 input_dim=None,
                 input_length=None,
                 **kwargs):
        if border_mode != 'valid':
            raise Exception(
                'Invalid border mode for LocallyConnected1D '
                '(only "valid" is supported):', border_mode)
        self.nb_filter = nb_filter
        self.filter_length = filter_length
        self.init = initializations.get(init, dim_ordering='th')
        self.activation = activations.get(activation)

        self.border_mode = border_mode
        self.subsample_length = subsample_length

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.input_spec = [InputSpec(ndim=3)]
        self.initial_weights = weights
        self.input_dim = input_dim
        self.input_length = input_length
        if self.input_dim:
            kwargs['input_shape'] = (self.input_length, self.input_dim)
        super(LocallyConnected1D, self).__init__(**kwargs)
示例#26
0
 def __init__(self,
              filters,
              kernel_size,
              strides=1,
              padding='valid',
              data_format=None,
              dilation_rate=1,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              conv_select_activation=None,
              select_weight_init_value=0,
              **kwargs):
     super(ConvBank, self).__init__(**kwargs)
     rank = 2
     self.rank = 2
     self.filters = filters
     self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
                                                   'kernel_size')
     self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
     self.padding = conv_utils.normalize_padding(padding)
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.dilation_rate = conv_utils.normalize_tuple(
         dilation_rate, rank, 'dilation_rate')
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.select_activation = None if conv_select_activation == None else activations.get(
         conv_select_activation)
     self.select_weight_init = initializers.Constant(
         select_weight_init_value)
 def __init__(self,
              units,
              strides=(1, 1),
              padding='valid',
              kernel_initializer='glorot_uniform',
              kernel_constraint=None,
              bias_initializer='zeros',
              bias_constraint=None,
              use_bias=True,
              **kwargs):
     super(ErosionDense, self).__init__(**kwargs)
     self.units = units
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.bias_initializer = initializers.get(bias_initializer)
     # for we are assuming channel last
     self.channel_axis = -1
示例#28
0
    def __init__(self,
                 attention_mask=None,
                 num_attention_heads=1,
                 size_per_head=512,
                 query_act=None,
                 key_act=None,
                 value_act=None,
                 attention_probs_dropout_prob=0.0,
                 initializer_range=0.02,
                 do_return_2d_tensor=False,
                 from_seq_length=None,
                 to_seq_length=None,
                 W_regularizer=None,
                 b_regularizer=None,
                 W_constraint=None,
                 b_constraint=None,
                 bias=True,
                 **kwargs):

        self.init = initializers.truncated_normal(stddev=initializer_range)

        self.attention_mask = attention_mask
        self.num_attention_heads = num_attention_heads
        self.size_per_head = size_per_head
        self.query_act = query_act
        self.key_act = key_act
        self.value_act = value_act
        self.attention_probs_dropout_prob = attention_probs_dropout_prob
        self.initializer_range = initializer_range
        self.do_return_2d_tensor = do_return_2d_tensor
        self.from_seq_length = from_seq_length
        self.to_seq_length = to_seq_length

        self.output_dim = num_attention_heads * size_per_head

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)
        self.bias = bias

        super(Attention, self).__init__(**kwargs)
示例#29
0
 def __init__(self, units=1, strides=(1, 1),
              kernel_initializer='glorot_uniform',
              kernel_constraint=None,
              use_bias=True,
              **kwargs):
     super(CombDense_new1, self).__init__(**kwargs)
     self.num_node = units
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     # for we are assuming channel last
     self.channel_axis = -1
示例#30
0
    def __init__(self,
                 units,
                 out_units,
                 hidden_layers=1,
                 dropout_rate=0.0,
                 random_input_order=False,
                 activation='elu',
                 out_activation='sigmoid',
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 out_kernel_initializer='glorot_uniform',
                 out_bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'), )
        super(MaskingDense, self).__init__(**kwargs)

        self.input_sel = None
        self.random_input_order = random_input_order
        self.rate = min(1., max(0., dropout_rate))
        self.kernel_sels = []
        self.units = units
        self.out_units = out_units
        self.hidden_layers = hidden_layers
        self.activation = activations.get(activation)
        self.out_activation = activations.get(activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.out_kernel_initializer = initializers.get(out_kernel_initializer)
        self.out_bias_initializer = initializers.get(out_bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
示例#31
0
 def __init__(self,
              units,
              kernel_initializer='glorot_uniform',
              kernel_regularizer=None,
              kernel_constraint=None,
              **kwargs):
     if 'input_shape' not in kwargs and 'input_dim' in kwargs:
         kwargs['input_shape'] = (kwargs.pop('input_dim'), )
     super().__init__(**kwargs)
     self.units = units
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
    def __init__(self,
                 W_regularizer=None,
                 b_regularizer=None,
                 W_constraint=None,
                 b_constraint=None,
                 bias=True,
                 **kwargs):
        """
        Keras Layer that implements an Attention mechanism for temporal data.
        Supports Masking.
        Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756]
        # Input shape
            3D tensor with shape: `(samples, steps, features)`.
        # Output shape
            2D tensor with shape: `(samples, features)`.
        :param kwargs:
        Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
        The dimensions are inferred based on the output shape of the RNN.


        Note: The layer has been tested with Keras 2.0.6

        Example:
            model.add(LSTM(64, return_sequences=True))
            model.add(Attention())
            # next add a Dense layer (for classification/regression) or whatever...

        """
        self.supports_masking = True
        self.init = initializers.get('glorot_uniform')

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        super(Attention, self).__init__(**kwargs)