def __init__(self,
              filters,
              kernel_size,
              strides=1,
              padding='valid',
              dilation_rate=1,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
     super(MyConv1D, self).__init__(**kwargs)
     self.supports_masking = True
     self.filters = filters
     self.kernel_size = (kernel_size, )
     self.strides = (strides, )
     self.padding = padding
     self.dilation_rate = (dilation_rate, )
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.input_spec = InputSpec(ndim=3)
Esempio n. 2
0
    def build(self, input_shape):
        assert len(input_shape) >= 2
        input_dim = input_shape[1]

        if self.H == 'Glorot':
            self.H = np.float32(np.sqrt(1.5 / (input_dim + self.units)))
            #print('Glorot H: {}'.format(self.H))
        if self.kernel_lr_multiplier == 'Glorot':
            self.kernel_lr_multiplier = np.float32(1. / np.sqrt(1.5 / (input_dim + self.units)))
            #print('Glorot learning rate multiplier: {}'.format(self.kernel_lr_multiplier))
            
        self.kernel_constraint = Clip(-self.H, self.H)
        self.kernel_initializer = initializers.RandomUniform(-self.H, self.H)
        self.kernel = self.add_weight(shape=(input_dim, self.units),
                                     initializer=self.kernel_initializer,
                                     name='kernel',
                                     regularizer=self.kernel_regularizer,
                                     constraint=self.kernel_constraint)

        if self.use_bias:
            self.lr_multipliers = [self.kernel_lr_multiplier, self.bias_lr_multiplier]
            self.bias = self.add_weight(shape=(self.output_dim,),
                                     initializer=self.bias_initializer,
                                     name='bias',
                                     regularizer=self.bias_regularizer,
                                     constraint=self.bias_constraint)
        else:
            self.lr_multipliers = [self.kernel_lr_multiplier]
            self.bias = None

        self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
        self.built = True
Esempio n. 3
0
    def build(self, input_shape):
        ndim = len(input_shape)
        if self.axis == 0:
            raise ValueError('Axis cannot be zero')

        if (self.axis is not None) and (ndim == 2):
            raise ValueError('Cannot specify axis for rank 1 tensor')

        self.input_spec = InputSpec(ndim=ndim)

        if self.axis is None:
            shape = (1, )
        else:
            shape = (input_shape[self.axis], )

        if self.scale:
            self.gamma = self.add_weight(shape=shape,
                                         name='gamma',
                                         initializer=self.gamma_initializer,
                                         regularizer=self.gamma_regularizer,
                                         constraint=self.gamma_constraint)
        else:
            self.gamma = None
        if self.center:
            self.beta = self.add_weight(shape=shape,
                                        name='beta',
                                        initializer=self.beta_initializer,
                                        regularizer=self.beta_regularizer,
                                        constraint=self.beta_constraint)
        else:
            self.beta = None
        self.built = True
Esempio n. 4
0
    def build(self, input_shape):
        """
        Builds Tensorflow DAG for this layer.
        :param input_shape: input tensor shape
        :return: none
        """
        # Set the input dimensions as the output dimension of the conv layer
        assert len(input_shape) >= 2
        self.input_dim = self.tied_layer.output_shape[-1]
        self.input_spec = [InputSpec(min_ndim=2, axes={-1: self.input_dim})]

        # Set kernel from the tied layer as flipped
        self.kernel = K.reverse(self.tied_layer.kernel, axes=0)

        self.kernel = K.reshape(
            self.kernel,
            (
                self.kernel_size[0],
                self.tied_layer.output_shape[2],
                self.tied_layer.input_shape[-1],
            ),
        )

        # Set bias from the tied layer
        if self.tied_layer.use_bias is True:
            self.bias = self.tied_layer.bias
        else:
            self.bias = None

        # Have to set build to True
        self.built = True
Esempio n. 5
0
    def build(self, input_shape):
        assert len(input_shape) == 2
        assert input_shape[-1] % 2 == 0
        input_dim = input_shape[-1] // 2
        data_format = K.image_data_format()
        kernel_shape = (input_dim, self.units)
        fan_in, fan_out = initializers._compute_fans(kernel_shape,
                                                     data_format=data_format)

        if self.init_criterion == 'he':
            s = K.sqrt(1. / fan_in)
        elif self.init_criterion == 'glorot':
            s = K.sqrt(1. / (fan_in + fan_out))
        rng = RandomStreams(seed=self.seed)

        def init_theta(shape, dtype=None):
            return rng.uniform(size=kernel_shape, low=0, high=6)

        if self.kernel_initializer in {'complex'}:
            theta_init = init_theta
        else:
            raise ValueError('Not recognized choice of initialization!')

        # Defining layer parameters (Codebook):
        self.theta = self.add_weight(shape=kernel_shape,
                                     initializer=theta_init,
                                     name='theta_kernel',
                                     regularizer=self.kernel_regularizer,
                                     constraint=self.kernel_constraint)

        self.real_kernel = (1 / self.scale) * K.cos(self.theta)  #
        self.imag_kernel = (1 / self.scale) * K.sin(self.theta)  #

        self.input_spec = InputSpec(ndim=2, axes={-1: 2 * input_dim})
        self.built = True
    def build(self, input_shape):
        param_shape = list(input_shape[1:])
        self.param_broadcast = [False] * len(param_shape)
        if self.shared_axes is not None:
            for i in self.shared_axes:
                param_shape[i - 1] = 1
                self.param_broadcast[i - 1] = True

        param_shape = tuple(param_shape)

        self.t_left = self.add_weight(shape=param_shape,
                                      name='t_left',
                                      initializer=self.t_left_initializer)

        self.a_left = self.add_weight(shape=param_shape,
                                      name='a_left',
                                      initializer=self.a_left_initializer)

        self.t_right = self.add_weight(shape=param_shape,
                                       name='t_right',
                                       initializer=self.t_right_initializer)

        self.a_right = self.add_weight(shape=param_shape,
                                       name='a_right',
                                       initializer=self.a_right_initializer)

        # Set input spec
        axes = {}
        if self.shared_axes:
            for i in range(1, len(input_shape)):
                if i not in self.shared_axes:
                    axes[i] = input_shape[i]
        self.input_spec = InputSpec(ndim=len(input_shape), axes=axes)
        self.built = True
Esempio n. 7
0
 def __init__(self,
              ch_j,
              n_j,
              kernel_size=(3, 3),
              strides=(1, 1),
              r_num=1,
              b_alphas=[8, 8, 8],
              padding='same',
              data_format='channels_last',
              dilation_rate=(1, 1),
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              **kwargs):
     super(Conv2DCaps, self).__init__(**kwargs)
     rank = 2
     self.ch_j = ch_j  # Number of capsules in layer J
     self.n_j = n_j  # Number of neurons in a capsule in J
     self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
                                                   'kernel_size')
     self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
     self.r_num = r_num
     self.b_alphas = b_alphas
     self.padding = conv_utils.normalize_padding(padding)
     self.data_format = K.normalize_data_format(data_format)
     self.dilation_rate = (1, 1)
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.input_spec = InputSpec(ndim=rank + 3)
    def __init__(self,
                 output_dim,
                 init='glorot_uniform',
                 inner_init='orthogonal',
                 forget_bias_init='one',
                 activation='tanh',
                 inner_activation='hard_sigmoid',
                 W_regularizer=None,
                 U_regularizer=None,
                 b_regularizer=None,
                 first=False,
                 disable_shots=False,
                 dropout_W=.0,
                 dropout_U=.0,
                 **kwargs):
        self.output_dim = output_dim
        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.forget_bias_init = initializations.get(forget_bias_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
        self.W_regularizer = regularizers.get(W_regularizer)
        self.U_regularizer = regularizers.get(U_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.W_regularizer = W_regularizer
        self.U_regularizer = U_regularizer
        self.first = first
        self.disable_shots = disable_shots
        self.dropout_W, self.dropout_U = dropout_W, dropout_U
        self.uses_learning_phase = True

        self.input_dim = None
        self.input_spec = [InputSpec(ndim=3)]

        super(BA_LSTM, self).__init__(**kwargs)
    def __init__(self,
                 output_dim,
                 init='glorot_uniform',
                 activation='relu',
                 weights=None,
                 W_regularizer=None,
                 b_regularizer=None,
                 activity_regularizer=None,
                 W_constraint=None,
                 b_constraint=None,
                 input_dim=None,
                 **kwargs):
        self.W_initializer = initializers.get(init)
        self.b_initializer = initializers.get('zeros')
        self.activation = activations.get(activation)
        self.output_dim = output_dim
        self.input_dim = input_dim

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)
        self.initial_weights = weights
        self.input_spec = InputSpec(ndim=2)

        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim, )
        super(SparseFullyConnectedLayer, self).__init__(**kwargs)
Esempio n. 10
0
    def build(self, input_shape):
        assert len(input_shape) == 2
        assert input_shape[-1] % 2 == 0
        input_dim = input_shape[-1] // 4
        data_format = K.image_data_format()
        kernel_shape = (input_dim, self.units)
        init_shape = (input_dim, self.q_units)

        kern_init = qdense_init(init_shape, self.init_criterion)

        self.kernel = self.add_weight(shape=kernel_shape,
                                      initializer=kern_init,
                                      name='r',
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)

        if self.use_bias:
            self.bias = self.add_weight(shape=(self.units, ),
                                        initializer='zeros',
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None

        self.input_spec = InputSpec(ndim=2, axes={-1: 4 * input_dim})
        self.built = True
Esempio n. 11
0
    def __init__(self, output_dim, window_size=3, stride=1,
                 kernel_initializer='uniform', bias_initializer='zero',
                 activation='linear', activity_regularizer=None,
                 kernel_regularizer=None, bias_regularizer=None,
                 kernel_constraint=None, bias_constraint=None, 
                 use_bias=True, input_dim=None, input_length=None, **kwargs):
        self.output_dim = output_dim
        self.window_size = window_size
        self.strides = (stride, 1)

        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.activation = activations.get(activation)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.input_spec = [InputSpec(ndim=3)]
        self.input_dim = input_dim
        self.input_length = input_length
        if self.input_dim:
            kwargs['input_shape'] = (self.input_length, self.input_dim)
        super(GCNN, self).__init__(**kwargs)
Esempio n. 12
0
 def __init__(self,
              units,
              activation=None,
              use_bias=True,
              init_criterion='he',
              kernel_initializer=qdense_init,
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              seed=None,
              **kwargs):
     if 'input_shape' not in kwargs and 'input_dim' in kwargs:
         kwargs['input_shape'] = (kwargs.pop('input_dim'), )
     super(QuaternionDense, self).__init__(**kwargs)
     self.units = units
     self.q_units = units // 4
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.init_criterion = init_criterion
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     if seed is None:
         self.seed = np.random.randint(1, 10e6)
     else:
         self.seed = seed
     self.input_spec = InputSpec(ndim=2)
     self.supports_masking = True
 def __init__(self, downsampling_factor=10, kernel_initializer='glorot_uniform', kernel_regularizer=None, kernel_constraint=None, **kwargs):
     super().__init__(**kwargs)
     self.downsampling_factor = downsampling_factor
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.input_spec = [InputSpec(ndim=4)]
    def build(self, input_shape):
        input_dim = [dim//self.downsampling_factor for dim in input_shape[1:3]]

        self.kernel = self.add_weight(shape=(input_dim), initializer=self.kernel_initializer, name='kernel', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint)
        self.bias = None
        self.input_spec = InputSpec(ndim=4)
        self.built = True
    def build(self, input_shape):
        self._validate_input_shape(input_shape)

        self.input_spec = InputSpec(shape=input_shape)

        if not self.layer.built:
            self.layer.build(input_shape)
            self.layer.built = True

        input_dim = input_shape[-1]

        output_dim = self.layer.compute_output_shape(input_shape)[-1]

        self._W1 = self.add_weight(shape=(input_dim, input_dim),
                                   name="{}_W1".format(self.name),
                                   initializer=self.weight_initializer)
        self._W2 = self.add_weight(shape=(output_dim, input_dim),
                                   name="{}_W2".format(self.name),
                                   initializer=self.weight_initializer)
        self._W3 = self.add_weight(shape=(2 * input_dim, input_dim),
                                   name="{}_W3".format(self.name),
                                   initializer=self.weight_initializer)
        self._b2 = self.add_weight(shape=(input_dim, ),
                                   name="{}_b2".format(self.name),
                                   initializer=self.weight_initializer)
        self._b3 = self.add_weight(shape=(input_dim, ),
                                   name="{}_b3".format(self.name),
                                   initializer=self.weight_initializer)
        self._V = self.add_weight(shape=(input_dim, 1),
                                  name="{}_V".format(self.name),
                                  initializer=self.weight_initializer)

        super(AttentionRNNWrapper, self).build()
Esempio n. 16
0
 def build(self, input_shape):
     if self.data_format == 'channels_first':
         channel_axis = 1
     else:
         channel_axis = -1
     if input_shape[channel_axis] is None:
         raise ValueError('The channel dimension of the inputs '
                          'should be defined. Found `None`.')
     input_dim = input_shape[channel_axis]
     shift = 2
     if self.with_r:
         shift += 1
     kernel_shape = self.kernel_size + (input_dim + shift, self.filters)
     #print(kernel_shape)
     self.kernel = self.add_weight(shape=kernel_shape,
                                   initializer=self.kernel_initializer,
                                   name='kernel',
                                   regularizer=self.kernel_regularizer,
                                   constraint=self.kernel_constraint)
     if self.use_bias:
         self.bias = self.add_weight(shape=(self.filters, ),
                                     initializer=self.bias_initializer,
                                     name='bias',
                                     regularizer=self.bias_regularizer,
                                     constraint=self.bias_constraint)
     else:
         self.bias = None
     # Set input spec.
     self.input_spec = InputSpec(ndim=self.rank + 2,
                                 axes={channel_axis: input_dim})
     self.built = True
Esempio n. 17
0
File: qrnn.py Progetto: xuqy1981/-
    def build(self, input_shape):
        if self.stateful:
            self.reset_states()
        else:
            # initial states: all-zero tensor of shape (output_dim)
            self.states = [None]

        input_dim = input_shape[2]
        self.input_spec = [InputSpec(shape=input_shape)]
        self.W_shape = (self.window_size, 1, input_dim, self.output_dim * 3)

        self.W = self.add_weight(self.W_shape,
                                 initializer=self.init,
                                 name='{}_W'.format(self.name),
                                 regularizer=self.W_regularizer,
                                 constraint=self.W_constraint)
        if self.bias:
            self.b = self.add_weight((self.output_dim * 3, ),
                                     initializer='zero',
                                     name='{}_b'.format(self.name),
                                     regularizer=self.b_regularizer,
                                     constraint=self.b_constraint)

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights
Esempio n. 18
0
    def add(self, layer):
        '''Add a layer
		# Arguments:
		layer: Layer instance. RNNCell or a normal layer such as Dense.
		'''
        self.model.add(layer)
        self.uses_learning_phase = self._truth_tensor or any(
            [l.uses_learning_phase for l in self.model.layers])
        if len(self.model.layers) == 1:
            if layer.input_spec is not None:
                shape = layer.input_spec[0].shape
            else:
                shape = layer.input_shape
            if not self.decode:
                shape = (shape[0], self.input_length) + shape[1:]
            self.batch_input_shape = shape
            self.input_spec = [InputSpec(shape=shape)]
        if _isRNN(layer) and self.state_sync:
            if not hasattr(self, 'nb_states'):
                self.nb_states = len(layer.states)
            else:
                assert len(
                    layer.states
                ) == self.nb_states, 'Incompatible layer. In a state synchronized recurrent container, all the cells should have the same number of states.'
        if self.stateful:
            self.reset_states()
    def build(self, input_shape):
        param_shape = list(input_shape[1:])
        self.param_broadcast = [False] * len(param_shape)
        if self.shared_axes is not None:
            for i in self.shared_axes:
                param_shape[i - 1] = 1
                self.param_broadcast[i - 1] = True

        param_shape = tuple(param_shape)
        # Initialised as ones to emulate the default ELU
        self.alpha = self.add_weight(shape=param_shape,
                                     name='alpha',
                                     initializer=self.alpha_initializer,
                                     regularizer=self.alpha_regularizer,
                                     constraint=self.alpha_constraint)
        self.beta = self.add_weight(shape=param_shape,
                                    name='beta',
                                    initializer=self.beta_initializer,
                                    regularizer=self.beta_regularizer,
                                    constraint=self.beta_constraint)

        # Set input spec
        axes = {}
        if self.shared_axes:
            for i in range(1, len(input_shape)):
                if i not in self.shared_axes:
                    axes[i] = input_shape[i]
        self.input_spec = InputSpec(ndim=len(input_shape), axes=axes)
        self.built = True
Esempio n. 20
0
    def build(self, input_shape):
        assert len(input_shape) >= 2
        input_dim = input_shape[-1]

        self.kernel = self.add_weight(shape=(self.units, input_dim,
                                             self.channels),
                                      initializer=self.kernel_initializer,
                                      name='kernel',
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)
        if self.use_bias:
            self.bias = self.add_weight(shape=(self.units, self.channels),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None
        self.factor = self.add_weight(shape=(self.units, self.channels),
                                      initializer='ones',
                                      name='factor',
                                      regularizer=self.bias_regularizer,
                                      constraint=self.bias_constraint)
        self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
        self.built = True
Esempio n. 21
0
 def __init__(self, data_format=None, **kwargs):
     super().__init__(**kwargs)
     if get_backend() == "amd":
         self.data_format = K.normalize_data_format(data_format)  # pylint:disable=no-member
     else:
         self.data_format = conv_utils.normalize_data_format(data_format)
     self.input_spec = InputSpec(ndim=4)
 def __init__(self, padding, data_format='channels_last', **kwargs):
     # self.rank is 1 for ZeroPadding1D, 2 for ZeroPadding2D.
     self.rank = len(padding)
     self.padding = padding
     self.data_format = K.normalize_data_format(data_format)
     self.input_spec = InputSpec(ndim=self.rank + 2)
     super(_ZeroPadding, self).__init__(**kwargs)
Esempio n. 23
0
    def __init__(self,
                 units,
                 activation=None,
                 init_criterion='he',
                 kernel_initializer='complex',
                 kernel_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 seed=None,
                 scale=1,
                 **kwargs):

        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'), )
        super(CompFC, self).__init__(**kwargs)
        self.units = units
        self.scale = scale
        self.activation = activations.get(activation)
        self.init_criterion = init_criterion
        if kernel_initializer in {'complex', 'constant'}:
            self.kernel_initializer = kernel_initializer
        else:
            self.kernel_initializer = initializers.get(kernel_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        if seed is None:
            self.seed = np.random.randint(1, 10e6)
        else:
            self.seed = seed
        self.input_spec = InputSpec(ndim=2)
        self.supports_masking = True
    def build(self, input_shape):
        if self.data_format == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        if input_shape[channel_axis] is None:
            raise ValueError('The channel dimension of the inputs '
                             'should be defined. Found `None`.')
        input_dim = input_shape[channel_axis]
        kernel_shape = self.kernel_size + (input_dim, self.filters)

        self.kernel = self.add_weight(shape=kernel_shape,
                                      initializer=self.kernel_initializer,
                                      name='kernel',
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)
        
        self.u = self.add_weight(shape=tuple([1, self.kernel.shape.as_list()[-1]]),
                         initializer=initializers.RandomNormal(0, 1),
                         name='sn',
                         trainable=False)
        
        if self.use_bias:
            self.bias = self.add_weight(shape=(self.filters,),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None
        # Set input spec.
        self.input_spec = InputSpec(ndim=self.rank + 2,
                                    axes={channel_axis: input_dim})
        self.built = True
Esempio n. 25
0
 def __init__(self,
              units,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              tied_to=None,
              **kwargs):
     self.tied_to = tied_to
     if 'input_shape' not in kwargs and 'input_dim' in kwargs:
         kwargs['input_shape'] = (kwargs.pop('input_dim'), )
     super().__init__(**kwargs)
     self.units = units
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.input_spec = InputSpec(min_ndim=2)
     self.supports_masking = True
Esempio n. 26
0
    def build(self, input_shape):
        # input_shape (None,40)
        print(input_shape)

        input_dim = input_shape[1]
        self.kernel = self.add_weight(shape=(input_dim, self.units),
                                      initializer=self.kernel_initializer,
                                      name='kernel',
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)

        if self.H == 'Glorot':
            self.H = np.float32(np.sqrt(1.5 / (int(input_dim) + self.units)))
            # print('Glorot H: {}'.format(self.H))
        if self.kernel_lr_multiplier == 'Glorot':
            self.kernel_lr_multiplier = np.float32(
                1. / np.sqrt(1.5 / (int(input_dim) + self.units)))
            # print('Glorot learning rate multiplier: {}'.format(self.kernel_lr_multiplier))

        if self.use_bias:
            pass
        else:
            self.lr_multipliers = [self.kernel_lr_multiplier]
            self.bias = None

        self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
        self.built = True
        self.binary = binarize(self.kernel, H=self.H)
Esempio n. 27
0
 def build(self, input_shape):
     kernel_shape_f_g = (1, 1) + (self.channels, self.filters_f_g)
     print(kernel_shape_f_g)
     kernel_shape_h = (1, 1) + (self.channels, self.filters_h)
     self.N = input_shape[1] * input_shape[2]
     # Create a trainable weight variable for this layer:
     self.gamma = self.add_weight(name='gamma',
                                  shape=[1],
                                  initializer='zeros',
                                  trainable=True)
     self.kernel_f = self.add_weight(shape=kernel_shape_f_g,
                                     initializer='glorot_uniform',
                                     name='kernel_f')
     self.kernel_g = self.add_weight(shape=kernel_shape_f_g,
                                     initializer='glorot_uniform',
                                     name='kernel_g')
     self.kernel_h = self.add_weight(shape=kernel_shape_h,
                                     initializer='glorot_uniform',
                                     name='kernel_h')
     self.bias_f = self.add_weight(shape=(self.filters_f_g, ),
                                   initializer='zeros',
                                   name='bias_F')
     self.bias_g = self.add_weight(shape=(self.filters_f_g, ),
                                   initializer='zeros',
                                   name='bias_g')
     self.bias_h = self.add_weight(shape=(self.filters_h, ),
                                   initializer='zeros',
                                   name='bias_h')
     super(SelfAttention, self).build(input_shape)
     # Set input spec.
     self.input_spec = InputSpec(ndim=4, axes={3: input_shape[-1]})
     self.built = True
Esempio n. 28
0
    def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        self.input_dim = input_shape[-1]

        self.kernel = self.add_weight(shape=(self.input_dim, self.units),
                                      name='kernel',
                                      initializer=self.kernel_initializer,
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)
        self.chain_kernel = self.add_weight(shape=(self.units, self.units),
                                            name='chain_kernel',
                                            initializer=self.chain_initializer,
                                            regularizer=self.chain_regularizer,
                                            constraint=self.chain_constraint)
        if self.use_bias:
            self.bias = self.add_weight(shape=(self.units,),
                                        name='bias',
                                        initializer=self.bias_initializer,
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = 0

        if self.use_boundary:
            self.left_boundary = self.add_weight(shape=(self.units,),
                                                 name='left_boundary',
                                                 initializer=self.boundary_initializer,
                                                 regularizer=self.boundary_regularizer,
                                                 constraint=self.boundary_constraint)
            self.right_boundary = self.add_weight(shape=(self.units,),
                                                  name='right_boundary',
                                                  initializer=self.boundary_initializer,
                                                  regularizer=self.boundary_regularizer,
                                                  constraint=self.boundary_constraint)
        self.built = True
Esempio n. 29
0
    def build(self, input_shape):
        ndim = len(input_shape)
        assert ndim >= 2
        input_dim = input_shape[-1]
        self.input_dim = input_dim
        self.input_spec = [InputSpec(dtype=K.floatx(),
                                     ndim=ndim)]

        self.kernel = self.add_weight(shape=(input_dim, self.units),
                                      initializer=self.kernel_initializer,
                                      name='{}_W'.format(self.name),
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)
        if self.use_bias:
            self.bias = self.add_weight(shape=(self.units,),
                                        initializer='zero',
                                        name='{}_b'.format(self.name),
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights
        self.built = True
 def build(self, input_shape):
     self.input_spec = InputSpec(shape=input_shape)
     shape = (input_shape[self.axis], )
     init_gamma = self.scale * np.ones(shape)
     self.gamma = K.variable(init_gamma, name='{}_gamma'.format(self.name))
     self.trainable_weights = [self.gamma]
     super(Normalize, self).build(input_shape)