예제 #1
0
 def __init__(self, offset_creator_class, weight_basis,
              axis=-1,
              momentum=0.99,
              epsilon=1e-3,
              center=True,
              scale=True,
              beta_initializer='zeros',
              gamma_initializer='ones',
              moving_mean_initializer='zeros',
              moving_variance_initializer='ones',
              beta_regularizer=None,
              gamma_regularizer=None,
              beta_constraint=None,
              gamma_constraint=None,
              **kwargs):
     super(RProjBatchNormalization, self).__init__(offset_creator_class, weight_basis, **kwargs)
     self.supports_masking = True
     self.axis = axis
     self.momentum = momentum
     self.epsilon = epsilon
     self.center = center
     self.scale = scale
     self.beta_initializer = initializers.get(beta_initializer)
     self.gamma_initializer = initializers.get(gamma_initializer)
     self.moving_mean_initializer = initializers.get(moving_mean_initializer)
     self.moving_variance_initializer = initializers.get(moving_variance_initializer)
     self.beta_regularizer = regularizers.get(beta_regularizer)
     self.gamma_regularizer = regularizers.get(gamma_regularizer)
     self.beta_constraint = constraints.get(beta_constraint)
     self.gamma_constraint = constraints.get(gamma_constraint)
예제 #2
0
 def __init__(self,
              axis=None,
              epsilon=1e-3,
              center=True,
              scale=True,
              beta_initializer='zeros',
              gamma_initializer='ones',
              beta_regularizer=None,
              gamma_regularizer=None,
              beta_constraint=None,
              gamma_constraint=None,
              **kwargs):
     self.beta = None
     self.gamma = None
     super(InstanceNormalization, self).__init__(**kwargs)
     self.supports_masking = True
     self.axis = axis
     self.epsilon = epsilon
     self.center = center
     self.scale = scale
     self.beta_initializer = initializers.get(beta_initializer)
     self.gamma_initializer = initializers.get(gamma_initializer)
     self.beta_regularizer = regularizers.get(beta_regularizer)
     self.gamma_regularizer = regularizers.get(gamma_regularizer)
     self.beta_constraint = constraints.get(beta_constraint)
     self.gamma_constraint = constraints.get(gamma_constraint)
예제 #3
0
 def __init__(self, offset_creator_class, weight_basis,
              filters,
              kernel_size,
              strides=(1, 1),
              padding='valid',
              data_format=None,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
     super(RProjLocallyConnected2D, self).__init__(offset_creator_class, weight_basis, **kwargs)
     self.filters = filters
     self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
     self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
     self.padding = conv_utils.normalize_padding(padding)
     if self.padding != 'valid':
         raise ValueError('Invalid border mode for LocallyConnected2D '
                          '(only "valid" is supported): ' + padding)
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.input_spec = InputSpec(ndim=4)
예제 #4
0
 def __init__(self,
              kernel_size,
              strides=(1, 1),
              padding='valid',
              depth_multiplier=1,
              data_format=None,
              activation=None,
              use_bias=True,
              depthwise_initializer='glorot_uniform',
              bias_initializer='zeros',
              depthwise_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              depthwise_constraint=None,
              bias_constraint=None,
              **kwargs):
     super(DepthwiseConv2D, self).__init__(
         filters=None,
         kernel_size=kernel_size,
         strides=strides,
         padding=padding,
         data_format=data_format,
         activation=activation,
         use_bias=use_bias,
         bias_regularizer=bias_regularizer,
         activity_regularizer=activity_regularizer,
         bias_constraint=bias_constraint,
         **kwargs)
     self.depth_multiplier = depth_multiplier
     self.depthwise_initializer = initializers.get(depthwise_initializer)
     self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
     self.depthwise_constraint = constraints.get(depthwise_constraint)
     self.bias_initializer = initializers.get(bias_initializer)
예제 #5
0
 def __init__(self, offset_creator_class, weight_basis,
              units,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
     if 'input_shape' not in kwargs and 'input_dim' in kwargs:
         kwargs['input_shape'] = (kwargs.pop('input_dim'),)
     super(RProjDense, self).__init__(offset_creator_class, weight_basis, **kwargs)
     self.units = units
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.input_spec = InputSpec(min_ndim=2)
     self.supports_masking = True
    def __init__(self, units,
                 projection_units=None,
                 activation='tanh',
                 recurrent_activation='sigmoid',
                 projection_activation='linear',
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 projection_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 unit_forget_bias=False,
                 kernel_regularizer=None,
                 recurrent_regularizer=None,
                 projection_regularizer=None,
                 bias_regularizer=None,
                 kernel_constraint=None,
                 recurrent_constraint=None,
                 projection_constraint=None,
                 bias_constraint=None,
                 dropout=0.,
                 recurrent_dropout=0.,
                 implementation=2,
                 **kwargs):
        super(NASCell, self).__init__(**kwargs)
        self.units = units
        self.projection_units = projection_units
        self.activation = activations.get(activation)
        self.recurrent_activation = activations.get(recurrent_activation)
        self.projection_activation = activations.get(projection_activation)
        self.cell_activation = activations.get('relu')
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.projection_initializer = initializers.get(projection_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.unit_forget_bias = unit_forget_bias

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.projection_regularizer = regularizers.get(projection_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.projection_constraint = constraints.get(projection_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.dropout = min(1., max(0., dropout))
        self.recurrent_dropout = min(1., max(0., recurrent_dropout))
        self.implementation = implementation

        if self.projection_units is not None:
            self.state_size = (self.projection_units, self.units)
        else:
            self.state_size = (self.units, self.units)

        self._dropout_mask = None
        self._recurrent_dropout_mask = None
예제 #7
0
 def __init__(self, num_capsule, dim_vector, num_routing=3,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              **kwargs):
     super(CapsuleLayer, self).__init__(**kwargs)
     self.num_capsule = num_capsule
     self.dim_vector = dim_vector
     self.num_routing = num_routing
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
    def __init__(self, epsilon=1e-3, axis=-1,
                 weights=None, beta_init='zero', gamma_init='one',
                 gamma_regularizer=None, beta_regularizer=None, **kwargs):

        self.supports_masking = True
        self.beta_init = initializers.get(beta_init)
        self.gamma_init = initializers.get(gamma_init)
        self.epsilon = epsilon
        self.axis = axis
        self.gamma_regularizer = regularizers.get(gamma_regularizer)
        self.beta_regularizer = regularizers.get(beta_regularizer)
        self.initial_weights = weights
        super(FixedBatchNormalization, self).__init__(**kwargs)
예제 #9
0
    def __init__(self, filters, kernel_size,strides=1,padding='same',dilation_rate=1,
                 bias_initializer='zeros',kernel_initializer='glorot_uniform', activation='linear', weights=None,
                 border_mode='valid', subsample_length=1,
                 kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,
                 kernel_constraint=None, bias_constraint=None,
                 use_bias=True, input_dim=None, input_length=None, tied_to=None,data_format='channels_last',rank=1,learnedKernel=None,layer_inner=None,
                 **kwargs):
        if border_mode not in {'valid', 'same'}:
            raise Exception('Invalid border mode for Convolution1D:', border_mode)
        self.input_spec = [InputSpec(ndim=3)]
        self.input_dim = input_dim
        self.input_length = input_length
        self.tied_to = tied_to
        self.learnedKernel = np.array(learnedKernel)
        #self.tied_to.set_weights([weights, bias])
        self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
        self.padding = conv_utils.normalize_padding(padding)
        self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank, 'dilation_rate')
        self.data_format = data_format
        self.filters = filters
        if self.tied_to is not None:
            self.kernel_size = self.tied_to.kernel_size
        else:
            self.kernel_size = kernel_size
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.activation = activations.get(activation)
        assert border_mode in {'valid', 'same'}, 'border_mode must be in {valid, same}'
        self.border_mode = border_mode
        self.subsample_length = subsample_length
        self.subsample = (subsample_length, 1)

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.use_bias = use_bias

        self.rank = 1

        self.layer_inner = layer_inner

        if self.input_dim:
            kwargs['input_shape'] = (self.input_length, self.input_dim)

        #self.layer_inner = kwargs.pop('layer_inner')

        super(Convolution1D_tied, self).__init__(**kwargs)
예제 #10
0
파일: LSTMCNN.py 프로젝트: jarfo/kchar
    def __init__(self,
                 init='glorot_uniform',
                 activation=None,
                 weights=None,
                 W_regularizer=None,
                 b_regularizer=None,
                 activity_regularizer=None,
                 W_constraint=None,
                 b_constraint=None,
                 bias=True,
                 input_dim=None,
                 **kwargs):

        self.init = initializers.get(init)
        self.activation = activations.get(activation)

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.initial_weights = weights
        self.input_spec = InputSpec(ndim=2)

        self.input_dim = input_dim
        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim,)
        super(Highway, self).__init__(**kwargs)
 def __init__(self,
              initial_batch_size,
              units,
              num_blocks,
              num_units_per_block,
              vocab_size,
              keys,
              activation,
              weights=None,
              initializer='normal',
              bias_initializer='zeros',
              use_bias=True,
              **kwargs):
     super(REN, self).__init__(**kwargs)
     self.units = units
     self._num_blocks = num_blocks
     self._num_units_per_block = num_units_per_block
     self._vocab_size = vocab_size
     self._keys = keys
     self._activation = activation
     # self._activation = activation
     # if activation == 'prelu':
     #     self._activation = prelu
     # else:
     #     self._activation = activations.get(activation)
     self._initializer = initializers.random_normal(stddev=0.1)
     # self.ortho_initializer = tf.orthogonal_initializer(gain=1.0)
     self.initial_batch_size = initial_batch_size
     self.bias_initializer = initializers.get(bias_initializer)
     self.supports_masking = True
     self.use_bias = use_bias
     self.initial_weights = weights
예제 #12
0
    def __init__(self, step_dim,
                 W_regularizer=None, b_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 bias=True, **kwargs):
        """
        Keras Layer that implements an Attention mechanism for temporal data.
        Supports Masking.
        Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756]
        # Input shape
            3D tensor with shape: `(samples, steps, features)`.
        # Output shape
            2D tensor with shape: `(samples, features)`.
        :param kwargs:
        Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
        The dimensions are inferred based on the output shape of the RNN.
        Example:
            model.add(LSTM(64, return_sequences=True))
            model.add(Attention())
        """
        self.supports_masking = True
        # self.init = initializations.get('glorot_uniform')
        self.init = initializers.get('glorot_uniform')

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.step_dim = step_dim
        self.features_dim = 0
        super(Attention, self).__init__(**kwargs)
예제 #13
0
 def add_non_trainable_weight(self,
                              name,
                              shape,
                              dtype=None,
                              initializer=None,
                              regularizer=None,
                              constraint=None):
     '''Adds a weight variable to the layer.
     # Arguments
         name: String, the name for the weight variable.
         shape: The shape tuple of the weight.
         dtype: The dtype of the weight.
         initializer: An Initializer instance (callable).
         regularizer: An optional Regularizer instance.
         trainable: A boolean, whether the weight should
             be trained via backprop or not (assuming
             that the layer itself is also trainable).
         constraint: An optional Constraint instance.
     # Returns
         The created weight variable.
     '''
     initializer = initializers.get(initializer)
     if dtype is None:
         dtype = K.floatx()
     weight = K.variable(initializer(shape), dtype=dtype, name=name)
     if regularizer is not None:
         self.add_loss(regularizer(weight))
     if constraint is not None:
         self.constraints[weight] = constraint
     self._non_trainable_weights.append(weight)
     return weight
예제 #14
0
 def __init__(self, num_capsule, dim_capsule, routings=3,
              kernel_initializer='glorot_uniform',
              **kwargs):
     super(CapsuleLayer, self).__init__(**kwargs)
     self.num_capsule = num_capsule
     self.dim_capsule = dim_capsule
     self.routings = routings
     self.kernel_initializer = initializers.get(kernel_initializer)
예제 #15
0
 def __init__(self, a_initializer='ones',
              k_initializer='ones',
              n_initializer='ones',
              z_initializer='zeros',
              a_regularizer=None,
              a_constraint=constraints.NonNeg(),
              k_regularizer=None,
              k_constraint=constraints.NonNeg(),
              n_regularizer=None,
              n_constraint=constraints.NonNeg(),  
              z_regularizer=None,
              z_constraint=constraints.NonNeg(),
              shared_axes=None,
              a_shared=True,
              k_shared=True,
              n_shared=True,
              z_shared=True,
              z_one=False,
              **kwargs):
     super(Hill, self).__init__(**kwargs)
     self.supports_masking = True
     self.a_initializer = initializers.get(a_initializer)
     self.a_regularizer = regularizers.get(a_regularizer)
     self.a_constraint = constraints.get(a_constraint)
     self.k_initializer = initializers.get(a_initializer)
     self.k_regularizer = regularizers.get(a_regularizer)
     self.k_constraint = constraints.get(a_constraint)
     self.n_initializer = initializers.get(a_initializer)
     self.n_regularizer = regularizers.get(a_regularizer)
     self.n_constraint = constraints.get(a_constraint)
     self.z_initializer = initializers.get(a_initializer)
     self.z_regularizer = regularizers.get(a_regularizer)
     self.z_constraint = constraints.get(a_constraint)
     self.a_shared = a_shared
     self.k_shared = k_shared
     self.n_shared = n_shared
     self.z_shared = z_shared
     self.z_one = z_one
     if shared_axes is None:
         self.shared_axes = None
     elif not isinstance(shared_axes, (list, tuple)):
         self.shared_axes = [shared_axes]
     else:
         self.shared_axes = list(shared_axes)
    def __init__(self, units, output_dim,
                 activation='tanh',
                 return_probabilities=False,
                 name='AttentionDecoder',
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        """
        Implements an AttentionDecoder that takes in a sequence encoded by an
        encoder and outputs the decoded states 
        :param units: dimension of the hidden state and the attention matrices
        :param output_dim: the number of labels in the output space

        references:
            Bahdanau, Dzmitry, Kyunghyun Cho, and Yoshua Bengio. 
            "Neural machine translation by jointly learning to align and translate." 
            arXiv preprint arXiv:1409.0473 (2014).
        """
        self.units = units
        self.output_dim = output_dim
        self.return_probabilities = return_probabilities
        self.activation = activations.get(activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.bias_initializer = initializers.get(bias_initializer)

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        super(AttentionDecoder, self).__init__(**kwargs)
        self.name = name
        self.return_sequences = True  # must return sequences
예제 #17
0
파일: decode.py 프로젝트: the-moliver/kfs
    def __init__(self, nb_complex, filter_delays, kernel_initializer='glorot_uniform', kernel_regularizer=None, kernel_constraint=None, **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'),)

        self.nb_complex = nb_complex
        self.filter_delays = filter_delays
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)

        self.input_spec = [InputSpec(ndim=3)]
        super(SpatioTemporalFilterComplex, self).__init__(**kwargs)
예제 #18
0
    def __init__(self, output_dim, init='glorot_uniform', activation='relu',weights=None,
            W_regularizer=None, b_regularizer=None, activity_regularizer=None,
            W_constraint=None, b_constraint=None, input_dim=None, **kwargs):
        self.W_initializer = initializers.get(init)
        self.b_initializer = initializers.get('zeros')
        self.activation = activations.get(activation)
        self.output_dim = output_dim
        self.input_dim = input_dim

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)
        self.initial_weights = weights
        self.input_spec = InputSpec(ndim=2)

        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim,)
        super(SparseFullyConnectedLayer, self).__init__(**kwargs)
예제 #19
0
    def __init__(self, axis=-1,
                 gamma_init='one', beta_init='zero',
                 gamma_regularizer=None, beta_regularizer=None,
                 epsilon=1e-6,
                 group=32,
                 data_format=None,
                 **kwargs):
        self.beta = None
        self.gamma = None
        super(GroupNormalization, self).__init__(**kwargs)

        self.axis = to_list(axis)
        self.gamma_init = initializers.get(gamma_init)
        self.beta_init = initializers.get(beta_init)
        self.gamma_regularizer = regularizers.get(gamma_regularizer)
        self.beta_regularizer = regularizers.get(beta_regularizer)
        self.epsilon = epsilon
        self.group = group
        self.data_format = K.normalize_data_format(data_format)

        self.supports_masking = True
 def __init__(self, n_units,
              sig_level=2,
              train_time_lapse =False,
              initial_time_lapse=0.1,
              output_signatures = False,#whether the output includes the signatures
              use_signatures = True, #whether each new state can depend on the signature
              kernel_initializer='glorot_uniform',
              recurrent_initializer='he_normal',#could be good to use 'orthogonal' if not use_signatures
              activation='tanh',#not applied to signature elements
              **kwargs):
     self.sig_level = sig_level
     self.sigsize = iisignature.siglength(2,sig_level)
     self.n_units = n_units #like output_dim
     self.units = n_units*(self.sigsize+1) if output_signatures else n_units
     self.train_time_lapse = train_time_lapse
     self.initial_time_lapse = initial_time_lapse
     self.output_signatures = output_signatures
     self.use_signatures = use_signatures
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.recurrent_initializer = initializers.get(recurrent_initializer)
     self.activation = activations.get(activation)
     super(RecurrentSig, self).__init__(**kwargs)
예제 #21
0
 def __init__(self, alpha_initializer=initializers.constant(0.2),
              beta_initializer=initializers.constant(5.0),
              alpha_regularizer=None,
              alpha_constraint=None,
              beta_regularizer=None,
              beta_constraint=None,
              shared_axes=None,
              **kwargs):
     super(ParametricSoftplus, self).__init__(**kwargs)
     self.supports_masking = True
     self.alpha_initializer = initializers.get(alpha_initializer)
     self.alpha_regularizer = regularizers.get(alpha_regularizer)
     self.alpha_constraint = constraints.get(alpha_constraint)
     self.beta_initializer = initializers.get(beta_initializer)
     self.beta_regularizer = regularizers.get(beta_regularizer)
     self.beta_constraint = constraints.get(beta_constraint)
     if shared_axes is None:
         self.shared_axes = None
     elif not isinstance(shared_axes, (list, tuple)):
         self.shared_axes = [shared_axes]
     else:
         self.shared_axes = list(shared_axes)
예제 #22
0
    def add_weight(self,
                   name,
                   shape,
                   dtype=None,
                   initializer=None,
                   regularizer=None,
                   trainable=True,
                   constraint=None):
        '''Adds a weight variable to the layer.

        # Arguments
            name: String, the name for the weight variable.
            shape: The shape tuple of the weight.
            dtype: The dtype of the weight.
            initializer: An Initializer instance (callable).
            regularizer: An optional Regularizer instance.
            trainable: A boolean, whether the weight should
                be trained via backprop or not (assuming
                that the layer itself is also trainable).
            constraint: An optional Constraint instance.

        # Returns
            The created weight variable.
        '''
        initializer = initializers.get(initializer)
        if dtype is None:
            dtype = K.floatx()

        # Create Theta_0
        value_0 = initializer(shape)
        theta_0 = tf.Variable(value_0, trainable=False, dtype=_convert_string_dtype(dtype), name='%s_theta0' % name)
        if isinstance(value_0, np.ndarray):
            theta_0._keras_shape = value_0.shape
        elif hasattr(value_0, 'get_shape'):
            theta_0._keras_shape = tuple(map(int, value_0.get_shape()))
        theta_0._uses_learning_phase = False

        # Call subclass offset creator
        theta_offset = self._make_theta_offset(theta_0.get_shape())

        # Compute final theta to be used in network
        theta = tf.add(theta_0, theta_offset, name=name)
        
        if regularizer is not None:
            self.add_loss(regularizer(theta))
        if constraint is not None:
            self.constraints[theta] = constraint
        self._base_thetas.append(theta_0)
        self._non_trainable_weights.extend([theta_0])
        return theta
예제 #23
0
 def __init__(self, fwh_projector, rank,
              filters,
              kernel_size,
              strides=1,
              padding='valid',
              data_format=None,
              dilation_rate=1,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
     # embed()
     super(_RProjFWHConv, self).__init__(**kwargs)
     self.fwh_projector = fwh_projector
     self.rank = rank
     self.filters = filters
     self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size')
     self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
     self.padding = conv_utils.normalize_padding(padding)
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank, 'dilation_rate')
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.input_spec = InputSpec(ndim=self.rank + 2)
    def __init__(self, timesteps, bias=True, simple=False,
                 W_regularizer=None, W_constraint=None,
                 V_regularizer=None, V_constraint=None,
                 **kwargs):
        self.supports_masking = True
        self.init = initializers.get('glorot_uniform')
        self.bias = bias
        self.timesteps = timesteps
        self.simple = simple
        self.W_regularizer = regularizers.get(W_regularizer)
        self.W_constraint = constraints.get(W_constraint)
        self.V_regularizer = regularizers.get(V_regularizer)
        self.V_constraint = constraints.get(V_constraint)

        super(Attention, self).__init__(**kwargs)
    def __init__(self, timesteps, attention_size, bias=True,
                 W_regularizer=regularizers.l1(0.01), W_constraint=None,
                 U_regularizer=regularizers.l1(0.01), U_constraint=None,
                 **kwargs):
        self.supports_masking = True
        self.init = initializers.get('glorot_uniform')
        self.bias = bias
        self.timesteps = timesteps
        self.attention_size = attention_size
        self.W_regularizer = regularizers.get(W_regularizer)
        self.W_constraint = constraints.get(W_constraint)
        self.U_regularizer = regularizers.get(U_regularizer)
        self.U_constraint = constraints.get(U_constraint)

        super(Attention, self).__init__(**kwargs)
예제 #26
0
    def __init__(self,
                 W_regularizer=None, b_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 bias=True, **kwargs):

        self.supports_masking = True
        self.init = initializers.get('glorot_uniform')

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        super(Attention_layer, self).__init__(**kwargs)
예제 #27
0
    def __init__(self,
                 units = 1,
                 output_dim = 1,
                 W_regularizer=None, b_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 bias=True,
                 return_sequence = True,
                 return_attention=False,
                 return_probabilities = False,
                 **kwargs):
        """
        Keras Layer that implements an Attention mechanism for temporal data.
        Supports Masking.
        Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756]
        # Input shape
            3D tensor with shape: `(samples, steps, features)`.
        # Output shape
            2D tensor with shape: `(samples, features)`.
        :param kwargs:
        Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
        The dimensions are inferred based on the output shape of the RNN.
        Note: The layer has been tested with Keras 1.x
        Example:
        
            # 1
            model.add(LSTM(64, return_sequences=True))
            model.add(Attention())
            # next add a Dense layer (for classification/regression) or whatever...
            # 2 - Get the attention scores
            hidden = LSTM(64, return_sequences=True)(words)
            sentence, word_scores = Attention(return_attention=True)(hidden)
        """
        self.supports_masking = True
        self.return_attention = return_attention
        self.init = initializers.get('glorot_uniform')
        self.attention_dim = units
        self.output_dim = output_dim
        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        super(AttentionDecoder, self).__init__(**kwargs)
예제 #28
0
    def __init__(self, step_dim,
                 W_regularizer=None, b_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 bias=True, **kwargs):
        self.supports_masking = True
        # self.init = initializations.get('glorot_uniform')
        self.init = initializers.get('glorot_uniform')

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.step_dim = step_dim
        self.features_dim = 0
        super(Attention, self).__init__(**kwargs)
예제 #29
0
    def add_weight(self,
                   name,
                   shape,
                   dtype=None,
                   initializer=None,
                   regularizer=None,
                   trainable=True,
                   constraint=None):
        '''Version of add_weight that creates a weight theta by instantiating
        theta_0 and then adding to it an offset from the member
        offset_creator.
        '''
        initializer = initializers.get(initializer)
        if dtype is None:
            dtype = K.floatx()

        # Create Theta_0
        value_0 = initializer(shape)
        theta_0 = tf.Variable(value_0, trainable=False, dtype=_convert_string_dtype(dtype), name='%s_theta0' % name)
        if isinstance(value_0, np.ndarray):
            theta_0._keras_shape = value_0.shape
        elif hasattr(value_0, 'get_shape'):
            theta_0._keras_shape = tuple(map(int, value_0.get_shape()))
        theta_0._uses_learning_phase = False

        # Call offset creator
        theta_offset, non_trainable_weights = self.offset_creator.create_theta_offset(self.weight_basis,
                                                                                      theta_0.get_shape(),
                                                                                      dtype=dtype,
                                                                                      name=name)

        theta = tf.add(theta_0, theta_offset, name=name)

        if regularizer is not None:
            self.add_loss(regularizer(theta))
        if constraint is not None:
            self.constraints[theta] = constraint
        #self._base_thetas.append(theta_0)
        #self._basis_matrices.append(ww)
        #self._non_trainable_weights.extend([theta_0, ww])
        self._non_trainable_weights.extend([theta_0] + non_trainable_weights)
        return theta
예제 #30
0
    def __init__(self,
                 return_coefficients=False,
                 W_regularizer=None,
                 u_regularizer=None,
                 b_regularizer=None,
                 W_constraint=None,
                 u_constraint=None,
                 b_constraint=None,
                 bias=True,
                 **kwargs):
        self.supports_masking = True
        self.return_coefficients = return_coefficients
        self.init = initializers.get('glorot_uniform')

        self.W_regularizer = regularizers.get(W_regularizer)
        self.u_regularizer = regularizers.get(u_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.u_constraint = constraints.get(u_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        super(AttentionWithContext, self).__init__(**kwargs)
예제 #31
0
    def __init__(self,
                 nb_classes,
                 frequency_table=None,
                 mode=0,
                 init='glorot_uniform',
                 weights=None,
                 W_regularizer=None,
                 b_regularizer=None,
                 activity_regularizer=None,
                 W_constraint=None,
                 b_constraint=None,
                 bias=True,
                 verbose=False,
                 **kwargs):
        '''
		# Arguments:
		nb_classes: Number of classes.
		frequency_table: list. Frequency of each class. More frequent classes will have shorter huffman codes.
		mode: integer. One of [0, 1]
		verbose: boolean. Set to true to see the progress of building huffman tree. 
		'''
        self.nb_classes = nb_classes
        if frequency_table is None:
            frequency_table = [1] * nb_classes
        self.frequency_table = frequency_table
        self.mode = mode
        self.init = initializers.get(init)
        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)
        self.bias = bias
        self.initial_weights = weights
        self.verbose = verbose
        super(Huffmax, self).__init__(**kwargs)
예제 #32
0
    def add_variable(self, shape, dtype=None, initializer="zeros", name=None):
        """Create an optimizer variable.

        Args:
          shape: A list of integers, a tuple of integers, or a 1-D Tensor of type
            int32. Defaults to scalar if unspecified.
          dtype: The DType of the optimizer variable to be created. Defaults to
            `tf.keras.backend.floatx` if unspecified.
          initializer: string or callable. Initializer instance.
          name: The name of the optimizer variable to be created.

        Returns:
          An optimizer variable, in the format of tf.Variable.

        """
        if isinstance(initializer, str):
            initializer = initializers.get(initializer)
        if dtype is None:
            dtype = backend.floatx()
        if shape is None:
            shape = []
        return tf.Variable(
            initial_value=initializer(shape, dtype), name=name, trainable=False
        )
예제 #33
0
    def __init__(self,
                 first_dim,
                 last_dim,
                 init='glorot_uniform',
                 activation=None,
                 weights=None,
                 W_regularizer=None,
                 b_regularizer=None,
                 activity_regularizer=None,
                 W_constraint=None,
                 b_constraint=None,
                 bias=True,
                 input_dim=None,
                 **kwargs):

        self.init = initializers.get(init)
        self.activation = activations.get(activation)

        self.input_dim = input_dim
        self.first_dim = first_dim
        self.last_dim = last_dim

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.initial_weights = weights
        self.input_spec = [InputSpec(ndim=2)]

        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim, )
        super(Dense3D, self).__init__(**kwargs)
예제 #34
0
    def __init__(self,
                 filters,
                 kernel_size,
                 strides=(1, 1),
                 padding='valid',
                 data_format=None,
                 depth_multiplier=1,
                 activation=None,
                 use_bias=True,
                 depthwise_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 depthwise_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 depthwise_constraint=None,
                 bias_constraint=None,
                 dilation_rate=[1, 1],
                 **kwargs):
        super(DepthwiseConv2D,
              self).__init__(filters=filters,
                             kernel_size=kernel_size,
                             strides=strides,
                             padding=padding,
                             data_format=data_format,
                             activation=activation,
                             use_bias=use_bias,
                             bias_regularizer=bias_regularizer,
                             activity_regularizer=activity_regularizer,
                             bias_constraint=bias_constraint,
                             **kwargs)

        self.dilation_rate = dilation_rate
        self.depth_multiplier = depth_multiplier
        self.depthwise_initializer = initializers.get(depthwise_initializer)
        self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
        self.depthwise_constraint = constraints.get(depthwise_constraint)
    def __init__(self,
                 step_dim,
                 W_regularizer=None,
                 b_regularizer=None,
                 W_constraint=None,
                 b_constraint=None,
                 bias=True,
                 **kwargs):
        """
        Keras Layer that implements an Attention mechanism for temporal data.
        Supports Masking.
        Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756]
        # Input shape
            3D tensor with shape: `(samples, steps, features)`.
        # Output shape
            2D tensor with shape: `(samples, features)`.
        :param kwargs:
        Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
        The dimensions are inferred based on the output shape of the RNN.
        Example:
            model.add(LSTM(64, return_sequences=True))
            model.add(Attention())
        """
        self.supports_masking = True
        self.init = initializers.get('glorot_uniform')

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.step_dim = step_dim
        self.features_dim = 0
        super(Attention, self).__init__(**kwargs)
예제 #36
0
파일: csr.py 프로젝트: zbx911/transcoder
    def build(self, input_shape):
        batch_size = input_shape[0]
        time_steps = input_shape[1]
        pixels_per_step = input_shape[2]
        channels_per_pixel = input_shape[3]

        # Notice that the input_spec changes after __init__ and again after build(), becoming more restrictive
        self.input_spec = keras.engine.InputSpec(shape=(batch_size, time_steps,
                                                        pixels_per_step,
                                                        channels_per_pixel))
        self.state_spec = keras.engine.InputSpec(shape=(batch_size,
                                                        pixels_per_step,
                                                        self.units))

        # TODO: does Keras require self.states?
        self.states = [None]

        self.Wz = self.add_weight(
            (self.filter_size, self.units, self.units),
            name='Wz',
            initializer=initializers.get('glorot_uniform'))
        self.Wr = self.add_weight(
            (self.filter_size, self.units, self.units),
            name='Wr',
            initializer=initializers.get('glorot_uniform'))
        self.Wh = self.add_weight(
            (self.filter_size, self.units, self.units),
            name='Wh',
            initializer=initializers.get('glorot_uniform'))
        self.Uz = self.add_weight(
            (self.filter_size, channels_per_pixel, self.units),
            name='Uz',
            initializer=initializers.get('glorot_uniform'))
        self.Ur = self.add_weight(
            (self.filter_size, channels_per_pixel, self.units),
            name='Ur',
            initializer=initializers.get('glorot_uniform'))
        self.Uh = self.add_weight(
            (self.filter_size, channels_per_pixel, self.units),
            name='Uh',
            initializer=initializers.get('glorot_uniform'))
        self.built = True
 def __init__(self,
              axis=-1,
              momentum=0.99,
              epsilon=1e-5,
              center=True,
              scale=True,
              mean_weight_initializer='ones',
              variance_weight_initializer='ones',
              beta_initializer='zeros',
              gamma_initializer='ones',
              moving_mean_initializer='zeros',
              moving_variance_initializer='ones',
              beta_regularizer=None,
              gamma_regularizer=None,
              beta_constraint=None,
              gamma_constraint=None,
              **kwargs):
     super(SwitchableNormalization, self).__init__(**kwargs)
     self.supports_masking = True
     self.axis = axis
     self.momentum = momentum
     self.epsilon = epsilon
     self.center = center
     self.scale = scale
     self.mean_weight_initializer = initializers.get(
         mean_weight_initializer)
     self.variance_weight_initializer = initializers.get(
         variance_weight_initializer)
     self.beta_initializer = initializers.get(beta_initializer)
     self.gamma_initializer = initializers.get(gamma_initializer)
     self.moving_mean_initializer = initializers.get(
         moving_mean_initializer)
     self.moving_variance_initializer = initializers.get(
         moving_variance_initializer)
     self.beta_regularizer = regularizers.get(beta_regularizer)
     self.gamma_regularizer = regularizers.get(gamma_regularizer)
     self.beta_constraint = constraints.get(beta_constraint)
     self.gamma_constraint = constraints.get(gamma_constraint)
예제 #38
0
 def __init__(self, **kwargs):
     self.init = initializers.get('glorot_uniform')
     super(MyAttentionLayer, self).__init__(**kwargs)
예제 #39
0
 def __init__(self, return_attention=False, **kwargs):
     self.init = initializers.get('uniform')
     self.supports_masking = True
     self.return_attention = return_attention
     super(AttentionWeightedAverage, self).__init__(** kwargs)
예제 #40
0
    def __init__(
            self,
            units,
            memory_order,
            theta,  # relative to dt=1
            measure='legt',
            method='zoh',
            trainable_input_encoders=True,
            trainable_hidden_encoders=True,
            trainable_memory_encoders=True,
            trainable_input_kernel=True,
            trainable_hidden_kernel=True,
            trainable_memory_kernel=True,
            trainable_A=False,
            trainable_B=False,
            input_encoders_initializer='lecun_uniform',
            hidden_encoders_initializer='lecun_uniform',
            memory_encoders_initializer=Constant(0),  # 'lecun_uniform',
            input_kernel_initializer='glorot_normal',
            hidden_kernel_initializer='glorot_normal',
            memory_kernel_initializer='glorot_normal',
            hidden_activation='tanh',
            **kwargs):
        super().__init__(**kwargs)

        self.units = units
        self.memory_order = memory_order
        self.theta = theta
        self.method = method
        self.trainable_input_encoders = trainable_input_encoders
        self.trainable_hidden_encoders = trainable_hidden_encoders
        self.trainable_memory_encoders = trainable_memory_encoders
        self.trainable_input_kernel = trainable_input_kernel
        self.trainable_hidden_kernel = trainable_hidden_kernel
        self.trainable_memory_kernel = trainable_memory_kernel
        self.trainable_A = trainable_A
        self.trainable_B = trainable_B

        self.input_encoders_initializer = initializers.get(
            input_encoders_initializer)
        self.hidden_encoders_initializer = initializers.get(
            hidden_encoders_initializer)
        self.memory_encoders_initializer = initializers.get(
            memory_encoders_initializer)
        self.input_kernel_initializer = initializers.get(
            input_kernel_initializer)
        self.hidden_kernel_initializer = initializers.get(
            hidden_kernel_initializer)
        self.memory_kernel_initializer = initializers.get(
            memory_kernel_initializer)

        self.hidden_activation = activations.get(hidden_activation)

        A, B = transition(measure, memory_order)
        # Construct A and B matrices
        C = np.ones((1, memory_order))
        D = np.zeros((1, ))
        dA, dB, _, _, _ = signal.cont2discrete((A, B, C, D),
                                               dt=1. / theta,
                                               method=method)

        self._A = dA - np.eye(memory_order)  # puts into form: x += Ax
        self._B = dB

        self.state_size = (self.units, self.memory_order)
        self.output_size = self.units
예제 #41
0
    def __init__(
            self,
            units,
            memory_order,
            measure='legt',
            method='zoh',
            max_length=256,
            trainable_input_encoders=True,
            trainable_hidden_encoders=True,
            trainable_memory_encoders=True,
            trainable_input_kernel=True,
            trainable_hidden_kernel=True,
            trainable_memory_kernel=True,
            trainable_A=False,
            trainable_B=False,
            input_encoders_initializer='lecun_uniform',
            hidden_encoders_initializer='lecun_uniform',
            memory_encoders_initializer=Constant(0),  # 'lecun_uniform',
            input_kernel_initializer='glorot_normal',
            hidden_kernel_initializer='glorot_normal',
            memory_kernel_initializer='glorot_normal',
            hidden_activation='tanh',
            gate=False,
            **kwargs):
        super().__init__(**kwargs)

        self.units = units
        self.memory_order = memory_order
        self.method = method
        self.max_length = max_length
        self.trainable_input_encoders = trainable_input_encoders
        self.trainable_hidden_encoders = trainable_hidden_encoders
        self.trainable_memory_encoders = trainable_memory_encoders
        self.trainable_input_kernel = trainable_input_kernel
        self.trainable_hidden_kernel = trainable_hidden_kernel
        self.trainable_memory_kernel = trainable_memory_kernel
        self.trainable_A = trainable_A
        self.trainable_B = trainable_B
        self.gate = gate

        self.input_encoders_initializer = initializers.get(
            input_encoders_initializer)
        self.hidden_encoders_initializer = initializers.get(
            hidden_encoders_initializer)
        self.memory_encoders_initializer = initializers.get(
            memory_encoders_initializer)
        self.input_kernel_initializer = initializers.get(
            input_kernel_initializer)
        self.hidden_kernel_initializer = initializers.get(
            hidden_kernel_initializer)
        self.memory_kernel_initializer = initializers.get(
            memory_kernel_initializer)

        self.hidden_activation = activations.get(hidden_activation)

        A, B = transition(measure, memory_order)
        # Construct A and B matrices

        A_stacked = np.empty((max_length, memory_order, memory_order),
                             dtype=A.dtype)
        B_stacked = np.empty((max_length, memory_order), dtype=B.dtype)
        B = B[:, 0]
        N = memory_order
        for t in range(1, max_length + 1):
            At = A / t
            Bt = B / t
            # if discretization in forward_aliases:
            if method in forward_aliases:
                A_stacked[t - 1] = np.eye(N) + At
                B_stacked[t - 1] = Bt
            # elif discretization in backward_aliases:
            elif method in backward_aliases:
                A_stacked[t - 1] = la.solve_triangular(np.eye(N) - At,
                                                       np.eye(N),
                                                       lower=True)
                B_stacked[t - 1] = la.solve_triangular(np.eye(N) - At,
                                                       Bt,
                                                       lower=True)
            elif method in bilinear_aliases:
                A_stacked[t - 1] = la.solve_triangular(np.eye(N) - At / 2,
                                                       np.eye(N) + At / 2,
                                                       lower=True)
                B_stacked[t - 1] = la.solve_triangular(np.eye(N) - At / 2,
                                                       Bt,
                                                       lower=True)
            elif method in zoh_aliases:
                A_stacked[t - 1] = la.expm(A * (math.log(t + 1) - math.log(t)))
                B_stacked[t - 1] = la.solve_triangular(A,
                                                       A_stacked[t - 1] @ B -
                                                       B,
                                                       lower=True)
        B_stacked = B_stacked[:, :, None]

        A_stacked -= np.eye(memory_order)  # puts into form: x += Ax
        self._A = A_stacked - np.eye(memory_order)  # puts into form: x += Ax
        self._B = B_stacked

        self.state_size = (self.units, self.memory_order, 1)
        self.output_size = self.units
예제 #42
0
 def __init__(self, data_format=None, **kwargs):
     super(NoisyAndPooling2D, self).__init__(**kwargs)
     self.data_format = K.normalize_data_format(data_format)
     self.input_spec = InputSpec(ndim=4)
     self.initializer = initializers.get('zeros')
     self.a = 10
예제 #43
0
    def __init__(self,
                 num_filters=1,
                 kernel_size=(1, 1),
                 bin_sizes=[1, 2, 3, 6],
                 pool_mode='avg',
                 padding='valid',
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        """
        Initialize a new Pyramid Pooling Module.

        Args:
            num_filters: the number of filters per convolutional unit
            bin_sizes: sizes for pooling bins
            pool_mode: pooling mode to use
            padding: One of `"valid"` or `"same"` (case-insensitive).
            activation: Activation function to use
            use_bias: whether layer uses a bias vector
            kernel_initializer: Initializer for kernel weights
            bias_initializer: Initializer for bias vector
            kernel_regularizer: Regularizer function applied to kernel weights
            bias_regularizer: Regularizer function applied to bias vector
            activity_regularizer: Regularizer function applied to output
            kernel_constraint: Constraint function applied to kernel
            bias_constraint: Constraint function applied to bias vector
            kwargs: keyword arguments for Layer super constructor

        Returns:
            None

        """
        if padding != 'same' and any(x > 1 for x in kernel_size):
            raise ValueError(
                "padding should be 'same' if the kernel size is larger than (1, 1)"
            )
        # setup instance variables
        self.input_spec = InputSpec(ndim=4)
        self.num_filters = num_filters
        self.kernel_size = kernel_size
        self.bin_sizes = bin_sizes
        self.pool_mode = pool_mode
        self.padding = conv_utils.normalize_padding(padding)
        self.data_format = 'channels_last'
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        # initialize the kernels and biases
        self.kernels = None
        self.biases = None
        # call the super constructor
        super(PyramidPoolingModule, self).__init__(**kwargs)
예제 #44
0
 def test_default_random_normal(self):
   rn = initializers.get('normal')
   self.assertEqual(rn.mean, 0.0)
   self.assertEqual(rn.stddev, 0.05)
예제 #45
0
 def test_default_random_uniform(self):
   ru = initializers.get('uniform')
   self.assertEqual(ru.minval, -0.05)
   self.assertEqual(ru.maxval, 0.05)
예제 #46
0
def sanitizedInitGet(init):
    if init in ["sqrt_init"]:
        return sqrt_init
    else:
        return initializers.get(init)
예제 #47
0
    def __init__(self,
                 units,
                 num_experts,
                 num_tasks,
                 use_expert_bias=True,
                 use_gate_bias=True,
                 expert_activation='relu',
                 gate_activation='softmax',
                 expert_bias_initializer='zeros',
                 gate_bias_initializer='zeros',
                 expert_bias_regularizer=None,
                 gate_bias_regularizer=None,
                 expert_bias_constraint=None,
                 gate_bias_constraint=None,
                 export_kernel_initializer='VarianceScaling',
                 gate_kernel_initializer='VarianceScaling',
                 expert_kernel_regularizer=None,
                 gate_kernel_regularizer=None,
                 expert_kernel_constraint=None,
                 gate_kernel_constraint=None,
                 activaty_regularizer=None,
                 **kwargs):
        """
        :param units: Number of hidden units
        :param num_experts: Number of experts
        :param num_tasks: Number of tasks
        :param use_expert_bias: Boolean to indicate the usage of bias in the expert weights
        :param use_gate_bias: Boolean to indicate the usage of bias in the gate weights
        :param expert_activation: Activation function of the expert weights
        :param gate_activation: Activation function of the gate weights
        :param expert_bias_initializer: Initializer for the expert bias
        :param gate_bias_initializer: Initializer for the gate bias
        :param expert_bias_regularizer: Regularizer for the expert bias
        :param gate_bias_regularizer: Regularizer for the gate bias
        :param expert_bias_constraint: Constraint for the expert bias
        :param gate_bias_constraint: Constraint for the gate bias
        :param export_kernel_initializer: Initializer for the expert weights
        :param gate_kernel_initializer: Initializer for the gate weights
        :param expert_kernel_regularizer: Regularizer for the expert weights
        :param gate_kernel_regularizer: Regularizer for the gate weights
        :param expert_kernel_constraint: Constraint for the expert weights
        :param gate_kernel_constraint: Constraint for the gate weights
        :param activaty_regularizer: Regularizer for the activity
        :param kwargs: Additional keyword arguments for the Layer class
        """
        self.units = units
        self.num_experts = num_experts
        self.num_tasks = num_tasks
        # Weight parameter
        self.expert_kernels = None
        self.gate_kernels = None
        self.expert_kernel_initializer = initializers.get(
            export_kernel_initializer)
        self.gate_kernel_initializer = initializers.get(
            gate_kernel_initializer)
        self.expert_kernel_regularizer = regularizers.get(
            expert_kernel_regularizer)
        self.gate_kernel_regularizer = regularizers.get(
            gate_kernel_regularizer)
        self.expert_kernel_constraint = constraints.get(
            expert_kernel_constraint)
        self.gate_kernel_constraint = constraints.get(gate_kernel_constraint)

        # Activation parameter
        self.expert_activation = activations.get(expert_activation)
        self.gate_activation = activations.get(gate_activation)
        # Bias parameter
        self.expert_bias = None
        self.gate_bias = None
        self.use_expert_bias = use_expert_bias
        self.use_gate_bias = use_gate_bias
        self.expert_bias_initializer = initializers.get(
            expert_bias_initializer)
        self.gate_bias_initializer = initializers.get(gate_bias_initializer)
        self.expert_bias_regularizer = regularizers.get(
            expert_bias_regularizer)
        self.gate_bias_regularizer = regularizers.get(gate_bias_regularizer)
        self.expert_bias_constraint = constraints.get(expert_bias_constraint)
        self.gate_bias_constraint = constraints.get(gate_bias_constraint)
        # Activity parameter
        self.activity_regularizer = regularizers.get(activaty_regularizer)
        # Keras parameter
        self.input_spec = InputSpec(min_ndim=2)
        self.supports_masking = True

        super(MMOE, self).__init__(**kwargs)
예제 #48
0
 def __init__(self, context_dim, regularizer=None):
     self.context_dim = context_dim
     self.regularizer = regularizer
     self.init = initializers.get('normal')
     self.supports_masking = True
     super(AttentionLayer, self).__init__()
 def __init__(self, **kwargs):
     self.init = initializers.get('normal')
     self.supports_masking = True
     super(AlignmentAttentionLayer, self).__init__(**kwargs)
예제 #50
0
 def __init__(self, class_num=8631, s=64, m=0.5, **kwargs):
     self.init = initializers.get('glorot_uniform')
     self.class_num = class_num
     self.s = s
     self.m = m
     super(ArcFaceLoss, self).__init__(**kwargs)
예제 #51
0
 def __init__(self, weights=None, axis=-1, gamma_init='zero', **kwargs):
     self.axis = axis
     self.gamma_init = initializers.get(gamma_init)
     self.initial_weights = weights
     super(Scale, self).__init__(**kwargs)
예제 #52
0
 def test_default_truncated_normal(self):
   tn = initializers.get('truncated_normal')
   self.assertEqual(tn.mean, 0.0)
   self.assertEqual(tn.stddev, 0.05)
예제 #53
0
 def __init__(self, attention_dim):
     self.init = initializers.get('normal')
     self.supports_masking = True
     self.attention_dim = attention_dim
     super(AttLayer, self).__init__()
 def __init__(self, **kwargs):
     self.init = initializers.get('normal')
     #self.input_spec = [InputSpec(ndim=3)]
     super(AttLayer, self).__init__(**kwargs)
예제 #55
0
 def __init__(self, kernel_initializer='glorot_uniform', **kwargs):
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.kernel_size = conv_utils.normalize_tuple(1, 1, 'kernel_size')
     super(SortLayer, self).__init__(**kwargs)
예제 #56
0
    def __init__(
        self,
        rank,
        filters,
        kernel_size,
        strides=1,
        padding="valid",
        data_format=None,
        dilation_rate=1,
        groups=1,
        activation=None,
        use_bias=True,
        kernel_initializer="glorot_uniform",
        bias_initializer="zeros",
        kernel_regularizer=None,
        bias_regularizer=None,
        activity_regularizer=None,
        kernel_constraint=None,
        bias_constraint=None,
        trainable=True,
        name=None,
        conv_op=None,
        **kwargs,
    ):
        super().__init__(
            trainable=trainable,
            name=name,
            activity_regularizer=regularizers.get(activity_regularizer),
            **kwargs,
        )
        self.rank = rank

        if isinstance(filters, float):
            filters = int(filters)
        if filters is not None and filters <= 0:
            raise ValueError("Invalid value for argument `filters`. "
                             "Expected a strictly positive value. "
                             f"Received filters={filters}.")
        self.filters = filters
        self.groups = groups or 1
        self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
                                                      "kernel_size")
        self.strides = conv_utils.normalize_tuple(strides,
                                                  rank,
                                                  "strides",
                                                  allow_zero=True)
        self.padding = conv_utils.normalize_padding(padding)
        self.data_format = conv_utils.normalize_data_format(data_format)
        self.dilation_rate = conv_utils.normalize_tuple(
            dilation_rate, rank, "dilation_rate")

        self.activation = activations.get(activation)
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.input_spec = InputSpec(min_ndim=self.rank + 2)

        self._validate_init()
        self._is_causal = self.padding == "causal"
        self._channels_first = self.data_format == "channels_first"
        self._tf_data_format = conv_utils.convert_data_format(
            self.data_format, self.rank + 2)
예제 #57
0
    def build(self, input_shape):
        input_dim = input_shape[-1]

        self.input_encoders = self.add_weight(
            name='input_encoders',
            shape=(input_dim, 1),
            initializer=self.input_encoders_initializer,
            trainable=self.trainable_input_encoders)

        self.hidden_encoders = self.add_weight(
            name='hidden_encoders',
            shape=(self.units, 1),
            initializer=self.hidden_encoders_initializer,
            trainable=self.trainable_hidden_encoders)

        self.memory_encoders = self.add_weight(
            name='memory_encoders',
            shape=(self.memory_order, 1),
            initializer=self.memory_encoders_initializer,
            trainable=self.trainable_memory_encoders)

        self.input_kernel = self.add_weight(
            name='input_kernel',
            shape=(input_dim, self.units),
            initializer=self.input_kernel_initializer,
            trainable=self.trainable_input_kernel)

        if self.trainable_hidden_kernel:
            self.hidden_kernel = self.add_weight(
                name='hidden_kernel',
                shape=(self.units, self.units),
                initializer=self.hidden_kernel_initializer,
                trainable=self.trainable_hidden_kernel)
        else:
            self.hidden_kernel = self.add_weight(name='hidden_kernel',
                                                 shape=(self.units,
                                                        self.units),
                                                 initializer=Constant(0.),
                                                 trainable=False)

        self.memory_kernel = self.add_weight(
            name='memory_kernel',
            shape=(self.memory_order, self.units),
            initializer=self.memory_kernel_initializer,
            trainable=self.trainable_memory_kernel)

        self.A = self.add_weight(
            name='A',
            shape=(self.max_length, self.memory_order, self.memory_order),
            initializer=Constant(self._A),  # note: transposed
            trainable=self.trainable_A)

        self.B = self.add_weight(
            name='B',
            shape=(self.max_length, self.memory_order, 1),  # system is SISO
            initializer=Constant(self._B),  # note: transposed
            trainable=self.trainable_B)

        if self.gate:
            self.W_gate = self.add_weight(
                name='gate',
                shape=(self.units + self.memory_order,
                       self.units),  # system is SISO
                initializer=initializers.get(
                    'glorot_normal'),  # note: transposed
                trainable=True)

        self.built = True
예제 #58
0
    def __init__(self, filters,
                 kernel_size,
                 retention_ratio=None, 
                 learn_retention_ratio=False, 
                 strides=(1, 1),
                 padding='valid',
                 data_format=None,
                 dilation_rate=(1, 1),
                 activation='tanh',
                 recurrent_activation='hard_sigmoid',
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 bias_initializer='zeros',
                 unit_forget_bias=True,
                 kernel_regularizer=None,
                 recurrent_regularizer=None,
                 bias_regularizer=None,
                 kernel_constraint=None,
                 recurrent_constraint=None,
                 bias_constraint=None,
                 dropout=0.,
                 recurrent_dropout=0.,
                 **kwargs):
        super(ConvlpRNN2DCell, self).__init__(**kwargs)
        self.filters = filters
        self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
        self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
        self.padding = conv_utils.normalize_padding(padding)
        self.data_format = conv_utils.normalize_data_format(data_format)
        self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2, 'dilation_rate')
        self.activation = activations.get(activation)
        self.recurrent_activation = activations.get(recurrent_activation)
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.unit_forget_bias = unit_forget_bias

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        if K.backend() == 'theano' and (dropout or recurrent_dropout):
            warnings.warn(
                'RNN dropout is no longer supported with the Theano backend '
                'due to technical limitations. '
                'You can either set `dropout` and `recurrent_dropout` to 0, '
                'or use the TensorFlow backend.')
            dropout = 0.
            recurrent_dropout = 0.
        self.dropout = min(1., max(0., dropout))
        self.recurrent_dropout = min(1., max(0., recurrent_dropout))
        self.state_size = (self.filters, self.filters)
        self._dropout_mask = None
        self._recurrent_dropout_mask = None
        self._learn_retention_ratio = learn_retention_ratio
        self._num_units = filters
        self.set_retention_ratio = retention_ratio
예제 #59
0
 def __init__(self, attention_dim):
     self.init = initializers.get('normal')
     self.supports_masking = True
     self.attention_dim = attention_dim
     super(AttLayer, self).__init__()
예제 #60
0
 def __init__(self, name: str, initialization: str='glorot_uniform', activation: str='linear'):
     self.name = name
     self.init = initializers.get(initialization)
     self.activation = activations.get(activation)