Beispiel #1
0
 def __init__(self,
              units,
              num_sampled,
              num_true=1,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
     if 'input_shape' not in kwargs and 'input_dim' in kwargs:
         kwargs['input_shape'] = (kwargs.pop('input_dim'),)
     super(NCE, self).__init__(**kwargs)
     self.units = units
     self.num_sampled = num_sampled
     if self.num_sampled > self.units:
         raise Exception('num_sample: {} cannot be greater than units: {}'.format(
             num_sampled, units))
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.input_spec = [InputSpec(min_ndim=2), InputSpec(min_ndim=1)]
     self.supports_masking = True
    def build(self, input_shape):
        # Note input_shape will be list of shapes of initial states and
        # constants if these are passed in __call__.
        if self._num_constants is not None:
            constants_shape = input_shape[-self._num_constants:]
        else:
            constants_shape = None

        if isinstance(input_shape, list):
            input_shape = input_shape[0]

        batch_size = input_shape[0] if self.stateful else None
        self.input_spec[0] = InputSpec(shape=(batch_size, None) +
                                       input_shape[2:6])

        # allow cell (if layer) to build before we set or validate state_spec   ##### maybe has to be built even if not layer??
        if isinstance(self.cell, Layer):
            step_input_shape = (input_shape[0], ) + input_shape[2:]
            if constants_shape is not None:
                self.cell.build([step_input_shape] + constants_shape)
            else:
                self.cell.build(step_input_shape)

        # set or validate state_spec
        if hasattr(self.cell.state_size, '__len__'):
            state_size = list(self.cell.state_size)
        else:
            state_size = [self.cell.state_size]

        if self.state_spec is not None:
            # initial_state was passed in call, check compatibility
            if self.cell.data_format == 'channels_first':
                ch_dim = 1
            elif self.cell.data_format == 'channels_last':
                ch_dim = 4
            if not [spec.shape[ch_dim]
                    for spec in self.state_spec] == state_size:
                raise ValueError(
                    'An initial_state was passed that is not compatible with '
                    '`cell.state_size`. Received `state_spec`={}; '
                    'However `cell.state_size` is '
                    '{}'.format([spec.shape for spec in self.state_spec],
                                self.cell.state_size))
        else:
            if self.cell.data_format == 'channels_first':
                self.state_spec = [
                    InputSpec(shape=(None, dim, None, None, None))
                    for dim in state_size
                ]
            elif self.cell.data_format == 'channels_last':
                self.state_spec = [
                    InputSpec(shape=(None, None, None, None, dim))
                    for dim in state_size
                ]
        if self.stateful:
            self.reset_states()
        self.built = True
    def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
        inputs, initial_state, constants = _standardize_args(
            inputs, initial_state, constants, self._num_constants)

        if initial_state is None and constants is None:
            return super(ConvRNN3D, self).__call__(inputs, **kwargs)

        # If any of `initial_state` or `constants` are specified and are Keras
        # tensors, then add them to the inputs and temporarily modify the
        # input_spec to include them.

        additional_inputs = []
        additional_specs = []
        if initial_state is not None:
            kwargs['initial_state'] = initial_state
            additional_inputs += initial_state
            self.state_spec = []
            for state in initial_state:
                try:
                    shape = K.int_shape(state)
                # Fix for Theano
                except TypeError:
                    shape = tuple(None for _ in range(K.ndim(state)))
                self.state_spec.append(InputSpec(shape=shape))

            additional_specs += self.state_spec
        if constants is not None:
            kwargs['constants'] = constants
            additional_inputs += constants
            self.constants_spec = [
                InputSpec(shape=K.int_shape(constant))
                for constant in constants
            ]
            self._num_constants = len(constants)
            additional_specs += self.constants_spec
        # at this point additional_inputs cannot be empty
        for tensor in additional_inputs:
            if K.is_keras_tensor(tensor) != K.is_keras_tensor(
                    additional_inputs[0]):
                raise ValueError('The initial state or constants of an RNN'
                                 ' layer cannot be specified with a mix of'
                                 ' Keras tensors and non-Keras tensors')

        if K.is_keras_tensor(additional_inputs[0]):
            # Compute the full input spec, including state and constants
            full_input = [inputs] + additional_inputs
            full_input_spec = self.input_spec + additional_specs
            # Perform the call with temporarily replaced input_spec
            original_input_spec = self.input_spec
            self.input_spec = full_input_spec
            output = super(ConvRNN3D, self).__call__(full_input, **kwargs)
            self.input_spec = original_input_spec
            return output
        else:
            return super(ConvRNN3D, self).__call__(inputs, **kwargs)
Beispiel #4
0
 def set_config(self, config_in):
     self.rank = 2
     self.filters = config_in['filters']
     self.kernel_size = conv_utils.normalize_tuple(config_in['kernel_size'],
                                                   self.rank, 'kernel_size')
     self.strides = conv_utils.normalize_tuple(config_in['strides'],
                                               self.rank, 'strides')
     self.padding = conv_utils.normalize_padding(config_in['padding'])
     self.data_format = K.normalize_data_format(config_in['data_format'])
     self.dilation_rate = conv_utils.normalize_tuple(
         config_in['dilation_rate'], self.rank, 'dilation_rate')
     self.activation = activations.get(config_in['activation'])
     self.use_bias = config_in['use_bias']
     self.kernel_initializer = initializers.get(
         config_in['kernel_initializer'])
     self.bias_initializer = initializers.get(config_in['bias_initializer'])
     self.kernel_regularizer = regularizers.get(
         config_in['kernel_regularizer'])
     self.bias_regularizer = regularizers.get(config_in['bias_regularizer'])
     self.activity_regularizer = regularizers.get(
         config_in['activity_regularizer'])
     self.kernel_constraint = constraints.get(
         config_in['kernel_constraint'])
     self.bias_constraint = constraints.get(config_in['bias_constraint'])
     self.input_spec = InputSpec(ndim=self.rank + 2)
    def build(self, input_shape):
        if self.data_format == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        if input_shape[channel_axis] is None:
            raise ValueError('The channel dimension of the inputs '
                             'should be defined. Found `None`.')
        input_dim = input_shape[channel_axis]
        kernel_shape = self.kernel_size + (input_dim, self.filters)

        self.kernel = self.add_weight(shape=kernel_shape,
                                      initializer=self.kernel_initializer,
                                      name='kernel',
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)
        if self.use_bias:
            self.bias = self.add_weight(shape=(self.filters,),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None

        kernel_norm = tf.reduce_sum(tf.square(self.kernel),axis=(0,1,2),keep_dims=False)
        self.gain = tf.Variable(tf.ones_like(kernel_norm),name='kernel_gain',trainable=False)
        self._non_trainable_weights.append(self.gain)

        self.kernel = self.gain*self.kernel/(tf.sqrt(kernel_norm)+1e-12)

        # Set input spec.
        self.input_spec = InputSpec(ndim=self.rank + 2,
                                    axes={channel_axis: input_dim})
        self.built = True
Beispiel #6
0
 def __init__(self,
              filters,
              kernel_size,
              strides=1,
              rank=2,
              padding='valid',
              data_format=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
     super(Conv2D121, self).__init__(**kwargs)
     self.rank = rank
     self.filters = filters
     self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
                                                   'kernel_size')
     self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
     # normalize_padding: 检查padding的值,只有['valid', 'same', 'causal']三个值合法
     self.padding = conv_utils.normalize_padding(padding)
     # data_format: 检查
     self.data_format = normalize_data_format(data_format)
     self.use_bias = use_bias,
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.input_spec = InputSpec(ndim=self.rank + 2)
Beispiel #7
0
    def build(self, input_shape):
        assert len(input_shape) >= 2
        self.input_dim = input_shape[-1]

        #trainable parameters : mu_w,mu_b,sigma_w,sigma_b

        self.mu_w = self.add_weight(shape=(self.input_dim, self.units),
                                    initializer=self.kernel_initializer,
                                    name='mu_w',
                                    regularizer=None,
                                    constraint=None)

        self.mu_b = self.add_weight(shape=(self.units, ),
                                    initializer=self.bias_initializer,
                                    name='mu_b',
                                    regularizer=None,
                                    constraint=None)

        self.sigma_w = self.add_weight(
            shape=(self.input_dim, self.units),
            initializer=initializers.Constant(0.01),  #constant initialization
            name='sigma_w',
            regularizer=None,
            constraint=None)

        self.sigma_b = self.add_weight(
            shape=(self.units, ),
            initializer=initializers.Constant(0.01),  #constant initialization
            name='sigma_b',
            regularizer=None,
            constraint=None)

        self.input_spec = InputSpec(min_ndim=2, axes={-1: self.input_dim})
        self.built = True
Beispiel #8
0
 def __init__(self,
              units,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              kernel_mask_val=None,
              Masked=False,
              **kwargs):
     if 'input_shape' not in kwargs and 'input_dim' in kwargs:
         kwargs['input_shape'] = (kwargs.pop('input_dim'), )
     super(MaskedDense, self).__init__(**kwargs)
     self.units = units
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.input_spec = InputSpec(min_ndim=2)
     self.supports_masking = True
     self.Masked = Masked
     self.kernel_mask_val = kernel_mask_val
Beispiel #9
0
 def __init__(self, size, data_format=None, **kwargs):
     # self.rank is 1 for UpSampling1D, 2 for UpSampling2D.
     self.rank = len(size)
     self.size = size
     self.data_format = K.normalize_data_format(data_format)
     self.input_spec = InputSpec(ndim=self.rank + 2)
     super(_UpSampling, self).__init__(**kwargs)
Beispiel #10
0
    def build(self, input_shape):

        #Ordinary Conv2D kernel
        channel_axis = -1
        if input_shape[channel_axis] is None:
            raise ValueError('The channel dimension of the inputs '
                             'should be defined. Found `None`.')
        input_dim = input_shape[channel_axis]
        kernel_shape = self.kernel_size + (input_dim, self.filters)

        self.kernel = self.add_weight(shape=kernel_shape,
                                      initializer=self.kernel_initializer,
                                      name='kernel',
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)
        if self.use_bias:
            self.bias = self.add_weight(shape=(self.filters, ),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None
        # Set input spec.
        self.input_spec = InputSpec(ndim=self.rank + 2,
                                    axes={channel_axis: input_dim})
        self.built = True
Beispiel #11
0
    def __init__(self,
                 units=14,
                 name='rbmhidden',
                 return_sequences=False,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):

        super(RBMhidden, self).__init__(
            activity_regularizer=regularizers.get(activity_regularizer),
            **kwargs)

        self.units = int(units) if not isinstance(units, int) else units
        self.return_sequences = return_sequences
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.input_spec = InputSpec(min_ndim=2)
        self.supports_masking = True
Beispiel #12
0
    def build(self, input_shape):
        """
        Re-implement for free param kappa.

        For more info, see: https://elib.dlr.de/116408/1/WACV2018.pdf
        """
        assert len(input_shape) >= 2
        input_dim = input_shape[-1]

        self.kernel = self.add_weight(shape=(input_dim, self.units),
                                      initializer=self.kernel_initializer,
                                      name='kernel',
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)
        self.kappa = self.add_weight(
            shape=(1, ),
            initializer=initializers.Constant(value=1.),
            name="kappa",
            regularizer=regularizers.l2(1e-1),
            constraint=constraints.NonNeg())
        if self.use_bias:
            self.bias = self.add_weight(shape=(self.units, ),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None
        self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
        self.built = True
Beispiel #13
0
 def __init__(self, units,
              activation='tanh',
              use_bias=True,
              recurrent_activation='hard_sigmoid',
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
     if 'input_shape' not in kwargs and 'input_dim' in kwargs:
         kwargs['input_shape'] = (kwargs.pop('input_dim'),)
     super(GatedPCL, self).__init__(**kwargs)
     self.units = units
     self.activation = activations.get(activation)
     self.recurrent_activation = activations.get(recurrent_activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.input_spec = InputSpec(min_ndim=2)
     self.supports_masking = True
     self.mask =PCLMatrix.matrix128_256
Beispiel #14
0
    def build(self, input_shape):
        assert len(input_shape) >= 2
        input_dim = input_shape[-1]

        self.kernel = self.add_weight(shape=(input_dim, self.units * 4),
                                      initializer=self.kernel_initializer,
                                      name='kernel',
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)
        if self.use_bias:
            self.bias = self.add_weight(shape=(self.units * 4,),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None
        self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})

        self.kernel_i = self.kernel[:, :self.units] * self.mask
        print(self.kernel_i)
        print('kernel_i', self.kernel_i.shape, self.mask.shape)
        self.kernel_c = self.kernel[:, self.units * 2: self.units * 3] * self.mask
        self.kernel_o = self.kernel[:, self.units * 3:] * self.mask

        if self.use_bias:
            self.bias_i = self.bias[:self.units]
            self.bias_c = self.bias[self.units * 2: self.units * 3]
            self.bias_o = self.bias[self.units * 3:]
        else:
            self.bias_i = None
            self.bias_c = None
            self.bias_o = None

        self.built = True
Beispiel #15
0
    def build(self, input_shape):

        assert len(input_shape) >= 2
        input_dim = input_shape[-1]
        ''' original for dense, too many dimensions so
        self.kernel = self.add_weight(shape=(input_dim, self.units),
                                      initializer=self.kernel_initializer,
                                      name='kernel',
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)
        '''
        self.kernel = self.add_weight(shape=(self.units, 1),
                                      initializer=self.kernel_initializer,
                                      name='kernel',
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)

        if self.use_bias:
            self.bias = self.add_weight(shape=(self.units, ),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None
        self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
        self.built = True
Beispiel #16
0
    def build(self, input_shape):
        if self.data_format == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        if input_shape[channel_axis] is None:
            raise ValueError('The channel dimension of the inputs '
                             'should be defined. Found `None`.')
        input_dim = input_shape[channel_axis]
        kernel_shape = self.kernel_size + (input_dim, self.filters)

        self.kernel_mask = tf.Variable(initial_value=self.kernel_mask_val,
                                       trainable=False)

        self.kernel = self.add_weight(shape=kernel_shape,
                                      initializer=self.kernel_initializer,
                                      name='kernel',
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)
        if self.use_bias:
            self.bias = self.add_weight(shape=(self.filters, ),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None
        # Set input spec.
        self.input_spec = InputSpec(ndim=self.rank + 2,
                                    axes={channel_axis: input_dim})
        self.built = True
    def build(self, input_shape):
        if self.data_format == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        if input_shape[channel_axis] is None:
            raise ValueError('The channel dimension of the inputs '
                             'should be defined. Found `None`.')
        input_dim = input_shape[channel_axis]
        binary_kernel_shape = self.kernel_size + (input_dim, input_dim)
        self.binary_kernel = make_binary_kernel(binary_kernel_shape,
                                                self.sparsity)

        self.kernel = self.add_weight(shape=(1, 1, input_dim, self.filters),
                                      initializer=self.kernel_initializer,
                                      name='kernel',
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)
        if self.use_bias:
            raise NotImplementedError
        else:
            self.bias = None

        # Set input spec.
        self.input_spec = InputSpec(ndim=self.rank + 2,
                                    axes={channel_axis: input_dim})
        self.built = True
Beispiel #18
0
    def build(self, input_shape):
        assert len(input_shape) == 2
        input_dim_1 = input_shape[-1]
        input_dim_2 = input_dim_1 * input_dim_1

        self.kernel1 = self.add_weight(shape=(input_dim_1, self.units),
                                       initializer=self.kernel_initializer,
                                       name='kernel1',
                                       regularizer=self.kernel_regularizer,
                                       constraint=self.kernel_constraint)
        self.kernel2 = self.add_weight(shape=(input_dim_2, self.units),
                                       initializer=self.kernel_initializer,
                                       name='kernel2',
                                       regularizer=self.kernel_regularizer,
                                       constraint=self.kernel_constraint)
        if self.use_bias:
            self.bias = self.add_weight(shape=(self.units, ),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None
        self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim_1})
        self.built = True
    def build(self, input_shape):
        # If a singleton shape tuple is passed in, use the vanilla TimeDistributed
        if not isinstance(input_shape, list):
            super(TimeDistributedMultiInput, self).build(input_shape)
            return

        assert all([len(shape) >= 3 for shape in input_shape if shape is not None])
        # We need to verify that the inputs have the same or broadcastable batch
        # and time dimensions
        batch_sizes = [shape[0] for shape in input_shape if shape is not None]
        batch_sizes = set(batch_sizes)
        batch_sizes -= set([None])
        # Allow batch size of 1 if an input is to be broadcasted
        batch_sizes -= set([1])

        if len(batch_sizes) > 1:
            raise ValueError('Receieved tensors with incompatible batch sizes. '
                            'Got tensors with shapes : ' + str(input_shape))
        timesteps = [shape[1] for shape in input_shape if shape is not None]
        timesteps = set(timesteps)
        timesteps -= set([None])
        # Allow 1 timestep if an input is to be broadcasted
        timesteps -= set([1])

        if len(timesteps) > 1:
            raise ValueError('Receieved tensors with incompatible number of timesteps. '
                            'Got tensors with shapes : ' + str(input_shape))
        self.timesteps = timesteps.pop() if len(timesteps) == 1 else None
        self.input_spec = [InputSpec(shape=s) for s in input_shape]
        child_input_shapes = [(shape[0],) + shape[2:] for shape in input_shape]
        if not self.layer.built:
            self.layer.build(child_input_shapes)
            self.layer.built = True

        Wrapper.build(self)
    def __init__(self,
                 kernel_size=3,
                 strides=1,
                 padding='valid',
                 dilation_rate=1,
                 activation=None,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        """

        :param kernel_size: 奇数整数或奇数整数数组,数组的长度为2
        :param strides:
        :param padding:
        :param dilation_rate:
        :param activation:
        :param kernel_initializer:
        :param bias_initializer:
        :param kernel_regularizer:
        :param bias_regularizer:
        :param kernel_constraint:
        :param bias_constraint:
        :param kwargs:
        """
        super(DeformConv2d, self).__init__(**kwargs)

        # H_kernel = kernel_size[0]
        # W_kernel = kernel_size[1]
        self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2,
                                                      'kernel_size')
        assert len(self.kernel_size) is 2, u'kernel_size 必须为2'
        assert self.kernel_size[0] & 1 is 1 and self.kernel_size[
            1] & 1 is 1, u"卷积和的高和宽必须为奇数"

        # N = H_kernel * W_kernel
        # 卷积输出通道为2 * N,即分别捕捉x轴与y轴的坐标偏移
        self.filters = 2 * self.kernel_size[0] * self.kernel_size[1]
        self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
        self.padding = conv_utils.normalize_padding(padding)
        self.dilation_rate = conv_utils.normalize_tuple(
            dilation_rate, 2, 'dilation_rate')
        self.activation = activations.get(activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.input_spec = InputSpec(ndim=4)

        self.kernel = None
        self.bias = None
Beispiel #21
0
 def build(self, input_shape):
     if self.data_format == 'channels_last':
         input_row, input_col = input_shape[1:-1]
         input_filter = input_shape[3]
     else:
         input_row, input_col = input_shape[2:]
         input_filter = input_shape[1]
     if input_row is None or input_col is None:
         raise ValueError('The spatial dimensions of the inputs to '
                          ' a LocallyConnected2D layer '
                          'should be fully-defined, but layer received '
                          'the inputs shape ' + str(input_shape))
     output_row = conv_utils.conv_output_length(input_row,
                                                self.kernel_size[0],
                                                self.padding,
                                                self.strides[0])
     output_col = conv_utils.conv_output_length(input_col,
                                                self.kernel_size[1],
                                                self.padding,
                                                self.strides[1])
     self.output_row = output_row
     self.output_col = output_col
     self.kernel_shape = (output_row, self.kernel_size[0],
                          self.kernel_size[1], input_filter, self.filters)
     self.kernel = self.add_weight(shape=self.kernel_shape,
                                   initializer=self.kernel_initializer,
                                   name='kernel',
                                   regularizer=self.kernel_regularizer,
                                   constraint=self.kernel_constraint)
     if self.use_bias:
         self.bias = self.add_weight(shape=(output_row, 1, self.filters),
                                     initializer=self.bias_initializer,
                                     name='bias',
                                     regularizer=self.bias_regularizer,
                                     constraint=self.bias_constraint)
     else:
         self.bias = None
     if self.data_format == 'channels_first':
         self.input_spec = InputSpec(ndim=4, axes={1: input_filter})
     else:
         self.input_spec = InputSpec(ndim=4, axes={-1: input_filter})
     self.built = True
Beispiel #22
0
    def build(self, input_shape):
        assert len(input_shape) >= 2
        input_dim = input_shape[-1]

        self.kernel = self.add_weight(shape=(input_dim, self.units),
                                      initializer=self.kernel_initializer,
                                      name='kernel',
                                      constraint=self.kernel_constraint)

        self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
        super(Projection, self).build(input_shape)
Beispiel #23
0
    def build(self, input_shape):
        assert len(input_shape) >= 2
        input_dim = input_shape[-1]

        self.bias = self.add_weight(shape=(input_dim, ),
                                    initializer=self.bias_initializer,
                                    name='bias',
                                    regularizer=self.bias_regularizer,
                                    constraint=self.bias_constraint)

        self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
        self.built = True
Beispiel #24
0
 def __init__(self,
              symmetric,
              input_is_revcomp_conv,
              smoothness_penalty=None,
              bias=False,
              **kwargs):
     super(WeightedSum1D, self).__init__(**kwargs)
     self.symmetric = symmetric
     self.input_is_revcomp_conv = input_is_revcomp_conv
     self.smoothness_penalty = smoothness_penalty
     self.bias = bias
     self.input_spec = [InputSpec(ndim=3)]
 def __init__(self,
              cell,
              return_sequences=False,
              return_state=False,
              go_backwards=False,
              stateful=False,
              unroll=False,
              **kwargs):
     super(ConvRNN3D,
           self).__init__(cell, return_sequences, return_state,
                          go_backwards, stateful, unroll, **kwargs)
     self.input_spec = [InputSpec(ndim=6)]
 def build(self, input_shape):
     assert len(input_shape) >= 2
     input_dim = input_shape[-1]
     w = self.add_weight(shape=(input_dim, self.units),
                         initializer=self.kernel_initializer,
                         name='kernel',
                         regularizer=self.kernel_regularizer,
                         constraint=self.kernel_constraint)
     self.kernel = w / (K.epsilon() +
                        K.sqrt(K.sum(K.square(w), axis=0, keepdims=True)))
     self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
     self.built = True
Beispiel #27
0
 def __init__(self,
              bias_initializer='zeros',
              bias_regularizer=None,
              bias_constraint=None,
              **kwargs):
     if 'input_shape' not in kwargs and 'input_dim' in kwargs:
         kwargs['input_shape'] = (kwargs.pop('input_dim'), )
     super(Bias, self).__init__(**kwargs)
     self.bias_initializer = initializers.get(bias_initializer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.bias_constraint = constraints.get(bias_constraint)
     self.input_spec = InputSpec(min_ndim=2)
     self.supports_masking = True
Beispiel #28
0
    def build(self, input_shape):
        """
        Build the layer for the given input shape.

        Args:
            input_shape: the shape to build the layer with

        Returns:
            None

        """
        # determine which axis contains channel data
        if self.data_format == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        # if the channel dimension is not defined, raise an error
        if input_shape[channel_axis] is None:
            raise ValueError(
                'The channel dimension of the inputs should be defined. '
                'Found `None`. ')

        # get the input channels from the input shape
        input_dim = input_shape[channel_axis]
        # create the shape for the 1 x 1 convolution kernels
        kernel_shape = (*self.kernel_size, input_dim, self.num_filters)

        # initialize the kernels and biases as empty lists
        self.kernels = len(self.bin_sizes) * [None]
        self.biases = len(self.bin_sizes) * [None]
        # iterate over the levels in the pyramid
        for (level, bin_size) in enumerate(self.bin_sizes):
            # create the kernel weights for this level
            self.kernels[level] = self.add_weight(
                shape=kernel_shape,
                initializer=self.kernel_initializer,
                name='kernel_{}'.format(bin_size),
                regularizer=self.kernel_regularizer,
                constraint=self.kernel_constraint)
            # if using bias, create the bias weights for this level
            if self.use_bias:
                self.biases[level] = self.add_weight(
                    shape=(self.num_filters, ),
                    initializer=self.bias_initializer,
                    name='bias_{}'.format(bin_size),
                    regularizer=self.bias_regularizer,
                    constraint=self.bias_constraint)

        # set input specification for th layer
        self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
        self.built = True
Beispiel #29
0
    def build(self, input_shape):
        assert len(input_shape) >= 2

        self.input_dim = input_shape[-1]
        self.kernel = self.add_weight(shape=(self.dendrites, self.input_dim,
                                             self.units),
                                      initializer=self.kernel_initializer,
                                      name='kernel',
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)
        self.alpha_L = 0.5
        self.alpha_U = 0.5
        self.b_U = 0  # Might make these trainable
        self.b_L = 1  # Might make these trainable
        self.a_d = 1

        self.c_d = 0.5
        self.b_d = 0.5

        # self.b_d = self.add_weight(shape = 1,
        #                            initializer = self.bias_initializer,
        #                            name='b_d',
        #                            regularizer=self.bias_regularizer,
        #                            constraint=self.bias_constraint)
        # self.c_d = self.add_weight(shape = 1,
        #                            initializer = self.bias_initializer,
        #                            name='c_d',
        #                            regularizer=self.bias_regularizer,
        #                            constraint=self.bias_constraint)
        #self.dendriteInput = np.zeros((self.dendrites, hself.units))
        self.dendriteInput = np.zeros((self.dendrites, self.units))
        self.dendriteActivations = np.zeros(self.dendrites)
        self.preoutput = 0
        if self.use_bias:
            self.dendriteBias = self.add_weight(
                shape=(self.units * self.dendrites, ),
                initializer=self.bias_initializer,
                name='bias',
                regularizer=self.bias_regularizer,
                constraint=self.bias_constraint)

            self.bias = self.add_weight(shape=(self.units, ),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None
        self.input_spec = InputSpec(min_ndim=2, axes={-1: self.input_dim})
        #  self.built = True
        super(Dendritic, self).build(input_shape)
Beispiel #30
0
    def __init__(self,
                 filters,
                 kernel_size,
                 conv_size,
                 conv_first=True,
                 strides=(1, 1),
                 padding='valid',
                 data_format=None,
                 dilation_rate=(1, 1),
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        # from _Conv
        super(PartialConv2D, self).__init__(**kwargs)
        self.rank = 2
        self.filters = filters
        self.kernel_size = conv_utils.normalize_tuple(kernel_size, self.rank,
                                                      'kernel_size')
        self.strides = conv_utils.normalize_tuple(strides, self.rank,
                                                  'strides')
        self.padding = conv_utils.normalize_padding(padding)
        self.data_format = K.normalize_data_format(data_format)
        self.dilation_rate = conv_utils.normalize_tuple(
            dilation_rate, self.rank, 'dilation_rate')
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        # custom
        self.conv_size = tuple(conv_size)
        self.conv_first = conv_first
        self.num_conv_features = int(np.prod(self.conv_size))
        self.input_spec = InputSpec(ndim=2)

        # defined in class methods
        self.kernel = None
        self.bias = None
        self.built = False