コード例 #1
0
ファイル: local.py プロジェクト: Shinepans/tensorflow-2
    def build(self, input_shape):
        if self.data_format == 'channels_first':
            input_dim, input_length = input_shape[1], input_shape[2]
        else:
            input_dim, input_length = input_shape[2], input_shape[1]

        if input_dim is None:
            raise ValueError(
                'Axis 2 of input should be fully-defined. '
                'Found shape:', input_shape)
        output_length = conv_utils.conv_output_length(input_length,
                                                      self.kernel_size[0],
                                                      self.padding,
                                                      self.strides[0])
        self.kernel_shape = (output_length, self.kernel_size[0] * input_dim,
                             self.filters)
        self.kernel = self.add_weight(shape=self.kernel_shape,
                                      initializer=self.kernel_initializer,
                                      name='kernel',
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)
        if self.use_bias:
            self.bias = self.add_weight(shape=(output_length, self.filters),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None

        if self.data_format == 'channels_first':
            self.input_spec = InputSpec(ndim=3, axes={1: input_dim})
        else:
            self.input_spec = InputSpec(ndim=3, axes={-1: input_dim})
        self.built = True
コード例 #2
0
    def build(self, input_shape):
        # Note input_shape will be list of shapes of initial states and
        # constants if these are passed in __call__.
        if self._num_constants is not None:
            constants_shape = input_shape[-self._num_constants:]  # pylint: disable=E1130
        else:
            constants_shape = None

        if isinstance(input_shape, list):
            input_shape = input_shape[0]

        batch_size = input_shape[0] if self.stateful else None
        self.input_spec[0] = InputSpec(shape=(batch_size, None) +
                                       input_shape[2:5])

        # allow cell (if layer) to build before we set or validate state_spec
        if isinstance(self.cell, Layer):
            step_input_shape = (input_shape[0], ) + input_shape[2:]
            if constants_shape is not None:
                self.cell.build([step_input_shape] + constants_shape)
            else:
                self.cell.build(step_input_shape)

        # set or validate state_spec
        if hasattr(self.cell.state_size, '__len__'):
            state_size = list(self.cell.state_size)
        else:
            state_size = [self.cell.state_size]

        if self.state_spec is not None:
            # initial_state was passed in call, check compatibility
            if self.cell.data_format == 'channels_first':
                ch_dim = 1
            elif self.cell.data_format == 'channels_last':
                ch_dim = 3
            if [spec.shape[ch_dim] for spec in self.state_spec] != state_size:
                raise ValueError(
                    'An initial_state was passed that is not compatible with '
                    '`cell.state_size`. Received `state_spec`={}; '
                    'However `cell.state_size` is '
                    '{}'.format([spec.shape for spec in self.state_spec],
                                self.cell.state_size))
        else:
            if self.cell.data_format == 'channels_first':
                self.state_spec = [
                    InputSpec(shape=(None, dim, None, None))
                    for dim in state_size
                ]
            elif self.cell.data_format == 'channels_last':
                self.state_spec = [
                    InputSpec(shape=(None, None, None, dim))
                    for dim in state_size
                ]
        if self.stateful:
            self.reset_states()
        self.built = True
コード例 #3
0
    def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
        inputs, initial_state, constants = _standardize_args(
            inputs, initial_state, constants, self._num_constants)

        if initial_state is None and constants is None:
            return super(ConvRNN2D, self).__call__(inputs, **kwargs)

        # If any of `initial_state` or `constants` are specified and are Keras
        # tensors, then add them to the inputs and temporarily modify the
        # input_spec to include them.

        additional_inputs = []
        additional_specs = []
        if initial_state is not None:
            kwargs['initial_state'] = initial_state
            additional_inputs += initial_state
            self.state_spec = []
            for state in initial_state:
                shape = K.int_shape(state)
                self.state_spec.append(InputSpec(shape=shape))

            additional_specs += self.state_spec
        if constants is not None:
            kwargs['constants'] = constants
            additional_inputs += constants
            self.constants_spec = [
                InputSpec(shape=K.int_shape(constant))
                for constant in constants
            ]
            self._num_constants = len(constants)
            additional_specs += self.constants_spec
        # at this point additional_inputs cannot be empty
        for tensor in additional_inputs:
            if K.is_keras_tensor(tensor) != K.is_keras_tensor(
                    additional_inputs[0]):
                raise ValueError('The initial state or constants of an RNN'
                                 ' layer cannot be specified with a mix of'
                                 ' Keras tensors and non-Keras tensors')

        if K.is_keras_tensor(additional_inputs[0]):
            # Compute the full input spec, including state and constants
            full_input = [inputs] + additional_inputs
            full_input_spec = self.input_spec + additional_specs
            # Perform the call with temporarily replaced input_spec
            original_input_spec = self.input_spec
            self.input_spec = full_input_spec
            output = super(ConvRNN2D, self).__call__(full_input, **kwargs)
            self.input_spec = original_input_spec
            return output
        else:
            return super(ConvRNN2D, self).__call__(inputs, **kwargs)
コード例 #4
0
    def build(self, input_shape):
        self.input_spec = InputSpec(shape=input_shape)
        if not self.layer.built:
            self.layer.build(input_shape)
            self.layer.built = True
        super(ConcreteDropout, self).build()

        # initialise p
        self.p_logit = self.layer.add_weight(
            name="p_logit",
            shape=(1, ),
            initializer=initializers.RandomUniform(self.init_min,
                                                   self.init_max),
            trainable=True)

        self.p = K.sigmoid(self.p_logit[0])

        # Initialise regulariser / prior KL term
        input_dim = np.prod(input_shape[1:])  # We drop only last dim
        weight = self.layer.kernel
        kernel_regularizer = self.weight_regularizer * K.sum(
            K.square(weight)) / (1.0 - self.p)

        dropout_regularizer = self.p * K.log(self.p)
        dropout_regularizer += (1. - self.p) * K.log(1. - self.p)
        dropout_regularizer *= self.dropout_regularizer * input_dim

        regularizer = K.sum(kernel_regularizer + dropout_regularizer)
        self.layer.add_loss(regularizer)
コード例 #5
0
ファイル: core.py プロジェクト: Shinepans/tensorflow-2
    def __init__(self,
                 units,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'), )

        super(Dense, self).__init__(
            activity_regularizer=regularizers.get(activity_regularizer),
            **kwargs)
        self.units = int(units)
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.supports_masking = True
        self.input_spec = InputSpec(min_ndim=2)
コード例 #6
0
    def build(self, input_shape):
        ndim = len(input_shape)
        if self.axis == 0:
            raise ValueError('Axis cannot be zero')

        if (self.axis is not None) and (ndim == 2):
            raise ValueError('Cannot specify axis for rank 1 tensor')

        self.input_spec = InputSpec(ndim=ndim)

        if self.axis is None:
            shape = (1, )
        else:
            shape = (input_shape[self.axis], )

        if self.scale:
            self.gamma = self.add_weight(shape=shape,
                                         name='gamma',
                                         initializer=self.gamma_initializer,
                                         regularizer=self.gamma_regularizer,
                                         constraint=self.gamma_constraint)
        else:
            self.gamma = None
        if self.center:
            self.beta = self.add_weight(shape=shape,
                                        name='beta',
                                        initializer=self.beta_initializer,
                                        regularizer=self.beta_regularizer,
                                        constraint=self.beta_constraint)
        else:
            self.beta = None
        self.built = True
コード例 #7
0
    def __init__(self,
                 upsampling=(2, 2),
                 output_size=None,
                 data_format=None,
                 **kwargs):
        """Initialize bilinearupsamping layer

        Parameters:
        ----------
        upsampling: tuple
            2 numbers > 0. The upsampling ratio for h and w
        output_size: int
            Used instead of upsampling arg if passed
        """

        super(BilinearUpsampling, self).__init__(**kwargs)

        self.data_format = conv_utils.normalize_data_format(data_format)
        #self.data_format = K.normalize_data_format(data_format)
        self.input_spec = InputSpec(ndim=4)
        if output_size:
            self.output_size = conv_utils.normalize_tuple(
                output_size, 2, 'output_size')
            self.upsampling = None
        else:
            self.output_size = None
            self.upsampling = conv_utils.normalize_tuple(
                upsampling, 2, 'upsampling')
コード例 #8
0
ファイル: core.py プロジェクト: Shinepans/tensorflow-2
 def build(self, input_shape):
     input_shape = tensor_shape.TensorShape(input_shape)
     if input_shape[-1].value is None:
         raise ValueError('The last dimension of the inputs to `Dense` '
                          'should be defined. Found `None`.')
     self.input_spec = InputSpec(min_ndim=2,
                                 axes={-1: input_shape[-1].value})
     self.kernel = self.add_weight(
         'kernel',
         shape=[input_shape[-1].value, self.units],
         initializer=self.kernel_initializer,
         regularizer=self.kernel_regularizer,
         constraint=self.kernel_constraint,
         dtype=self.dtype,
         trainable=True)
     if self.use_bias:
         self.bias = self.add_weight('bias',
                                     shape=[
                                         self.units,
                                     ],
                                     initializer=self.bias_initializer,
                                     regularizer=self.bias_regularizer,
                                     constraint=self.bias_constraint,
                                     dtype=self.dtype,
                                     trainable=True)
     else:
         self.bias = None
     self.built = True
コード例 #9
0
 def __init__(self, padding=(1, 1), **kwargs):
     super(ReflectionPadding2D, self).__init__(**kwargs)
     if isinstance(padding, int):
         self.padding = ((padding, padding), (padding, padding))
     else:
         self.padding = ((1, 1), (1, 1))
     self.input_spec = InputSpec(ndim=4)
    def build(self, input_shape):
        if self.data_format == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        if input_shape[channel_axis] is None:
            raise ValueError('The channel dimension of the inputs '
                             'should be defined. Found `None`.')
        input_dim = int(input_shape[channel_axis])
        kernel_shape = self.kernel_size + (input_dim, self.filters)

        self.kernel = self.add_weight(shape=kernel_shape,
                                      initializer=self.kernel_initializer,
                                      name='kernel',
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)
        
        self.u = self.add_weight(shape=tuple([1, self.kernel.shape.as_list()[-1]]),
                         initializer=initializers.RandomNormal(0, 1),
                         name='sn',
                         trainable=False)
        
        if self.use_bias:
            self.bias = self.add_weight(shape=(self.filters,),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None
        # Set input spec.
        self.input_spec = InputSpec(ndim=self.rank + 2,
                                    axes={channel_axis: input_dim})
        self.built = True
コード例 #11
0
 def __init__(self,
              filters,
              kernel_size,
              strides=1,
              padding='valid',
              data_format=None,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
   super(LocallyConnected1D, self).__init__(**kwargs)
   self.filters = filters
   self.kernel_size = conv_utils.normalize_tuple(kernel_size, 1, 'kernel_size')
   self.strides = conv_utils.normalize_tuple(strides, 1, 'strides')
   self.padding = conv_utils.normalize_padding(padding)
   if self.padding != 'valid':
     raise ValueError('Invalid border mode for LocallyConnected1D '
                      '(only "valid" is supported): ' + padding)
   self.data_format = conv_utils.normalize_data_format(data_format)
   self.activation = activations.get(activation)
   self.use_bias = use_bias
   self.kernel_initializer = initializers.get(kernel_initializer)
   self.bias_initializer = initializers.get(bias_initializer)
   self.kernel_regularizer = regularizers.get(kernel_regularizer)
   self.bias_regularizer = regularizers.get(bias_regularizer)
   self.activity_regularizer = regularizers.get(activity_regularizer)
   self.kernel_constraint = constraints.get(kernel_constraint)
   self.bias_constraint = constraints.get(bias_constraint)
   self.input_spec = InputSpec(ndim=3)
コード例 #12
0
    def build(self, input_shape):
        assert len(input_shape) >= 2
        input_dim = int(input_shape[-1])

        self.W_hat = self.add_weight(shape=(input_dim, self.units),
                                     name='W_hat',
                                     initializer=self.kernel_W_initializer,
                                     regularizer=self.kernel_W_regularizer,
                                     constraint=self.kernel_W_constraint)

        self.M_hat = self.add_weight(shape=(input_dim, self.units),
                                     name='M_hat',
                                     initializer=self.kernel_M_initializer,
                                     regularizer=self.kernel_M_regularizer,
                                     constraint=self.kernel_M_constraint)

        if self.nac_only:
            self.G = None
        else:
            self.G = self.add_weight(shape=(input_dim, self.units),
                                     name='G',
                                     initializer=self.gate_initializer,
                                     regularizer=self.gate_regularizer,
                                     constraint=self.gate_constraint)

        self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
        self.built = True
コード例 #13
0
ファイル: DEC.py プロジェクト: hadifar/DEC-keras
 def __init__(self, n_clusters, weights=None, alpha=1.0, **kwargs):
     if 'input_shape' not in kwargs and 'input_dim' in kwargs:
         kwargs['input_shape'] = (kwargs.pop('input_dim'), )
     super(ClusteringLayer, self).__init__(**kwargs)
     self.n_clusters = n_clusters
     self.alpha = alpha
     self.initial_weights = weights
     self.input_spec = InputSpec(ndim=2)
コード例 #14
0
    def build(self, input_shape):
        dim = input_shape[self.axis]

        if dim is None:
            raise ValueError('Axis ' + str(self.axis) + ' of '
                             'input tensor should have a defined dimension '
                             'but the layer received an input with shape ' +
                             str(input_shape) + '.')

        self.input_spec = InputSpec(ndim=len(input_shape),
                                    axes={self.axis: dim})
        shape = (dim, )

        if self.scale:
            self.gamma = self.add_weight(shape=shape,
                                         name='gamma',
                                         initializer=self.gamma_initializer,
                                         regularizer=self.gamma_regularizer,
                                         constraint=self.gamma_constraint)
        else:
            self.gamma = None
        if self.center:
            self.beta = self.add_weight(shape=shape,
                                        name='beta',
                                        initializer=self.beta_initializer,
                                        regularizer=self.beta_regularizer,
                                        constraint=self.beta_constraint)
        else:
            self.beta = None

        self.moving_mean = self.add_weight(
            shape=shape,
            name='moving_mean',
            initializer=self.moving_mean_initializer,
            trainable=False)

        self.moving_variance = self.add_weight(
            shape=shape,
            name='moving_variance',
            initializer=self.moving_variance_initializer,
            trainable=False)

        self.mean_weights = self.add_weight(
            shape=(3, ),
            name='mean_weights',
            initializer=self.mean_weights_initializer,
            regularizer=self.mean_weights_regularizer,
            constraint=self.mean_weights_constraints)

        self.variance_weights = self.add_weight(
            shape=(3, ),
            name='variance_weights',
            initializer=self.variance_weights_initializer,
            regularizer=self.variance_weights_regularizer,
            constraint=self.variance_weights_constraints)

        self.built = True
コード例 #15
0
    def __init__(self,
                 units,
                 relations,
                 kernel_basis_size=None,
                 activation=None,
                 use_bias=False,
                 batch_normalisation=False,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 feature_dropout=None,
                 support_dropout=None,
                 name='relational_graph_conv',
                 **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'), )

        super(RelationalGraphConv, self).__init__(
            activity_regularizer=regularizers.get(activity_regularizer),
            name=name,
            **kwargs)

        self.units = int(units)
        self.relations = int(relations)
        self.kernel_basis_size = (int(kernel_basis_size)
                                  if kernel_basis_size is not None else None)
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.batch_normalisation = batch_normalisation
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.feature_dropout = feature_dropout
        self.support_dropout = support_dropout

        self.supports_masking = True
        self.input_spec = InputSpec(min_ndim=2)

        self.dense_layer = rgat_layers.BasisDecompositionDense(
            units=self.units * self.relations,
            basis_size=self.kernel_basis_size,
            coefficients_size=self.relations,
            use_bias=False,
            kernel_initializer=self.kernel_initializer,
            kernel_regularizer=self.kernel_regularizer,
            kernel_constraint=self.kernel_constraint,
            name=name + '_basis_decomposition_dense',
            **kwargs)
        if self.batch_normalisation:
            self.batch_normalisation_layer = tf.layers.BatchNormalization()
コード例 #16
0
ファイル: core.py プロジェクト: Shinepans/tensorflow-2
 def __init__(self, rate, data_format=None, **kwargs):
     super(SpatialDropout3D, self).__init__(rate, **kwargs)
     if data_format is None:
         data_format = K.image_data_format()
     if data_format not in {'channels_last', 'channels_first'}:
         raise ValueError('data_format must be in '
                          '{"channels_last", "channels_first"}')
     self.data_format = data_format
     self.input_spec = InputSpec(ndim=5)
コード例 #17
0
    def build(self, input_shape):
        self.input_spec = [InputSpec(ndim=3)]
        assert len(input_shape) == 3

        self.W = self.add_weight(shape=(input_shape[2], 1),
                                 name='{}_W'.format(self.name),
                                 initializer=self.init)
        self.trainable_weights = [self.W]
        super(AttentionWeightedAverage, self).build(input_shape)
コード例 #18
0
ファイル: local.py プロジェクト: Shinepans/tensorflow-2
 def build(self, input_shape):
     if self.data_format == 'channels_last':
         input_row, input_col = input_shape[1:-1]
         input_filter = input_shape[3]
     else:
         input_row, input_col = input_shape[2:]
         input_filter = input_shape[1]
     if input_row is None or input_col is None:
         raise ValueError('The spatial dimensions of the inputs to '
                          ' a LocallyConnected2D layer '
                          'should be fully-defined, but layer received '
                          'the inputs shape ' + str(input_shape))
     output_row = conv_utils.conv_output_length(input_row,
                                                self.kernel_size[0],
                                                self.padding,
                                                self.strides[0])
     output_col = conv_utils.conv_output_length(input_col,
                                                self.kernel_size[1],
                                                self.padding,
                                                self.strides[1])
     self.output_row = output_row
     self.output_col = output_col
     self.kernel_shape = (output_row * output_col, self.kernel_size[0] *
                          self.kernel_size[1] * input_filter, self.filters)
     self.kernel = self.add_weight(shape=self.kernel_shape,
                                   initializer=self.kernel_initializer,
                                   name='kernel',
                                   regularizer=self.kernel_regularizer,
                                   constraint=self.kernel_constraint)
     if self.use_bias:
         self.bias = self.add_weight(shape=(output_row, output_col,
                                            self.filters),
                                     initializer=self.bias_initializer,
                                     name='bias',
                                     regularizer=self.bias_regularizer,
                                     constraint=self.bias_constraint)
     else:
         self.bias = None
     if self.data_format == 'channels_first':
         self.input_spec = InputSpec(ndim=4, axes={1: input_filter})
     else:
         self.input_spec = InputSpec(ndim=4, axes={-1: input_filter})
     self.built = True
コード例 #19
0
    def __init__(self, output_key, module_uri, max_strlen=10000, **kwargs):
        self._name = "TFHubTextLayer"
        super(TFHubTextLayer, self).__init__(**kwargs)
        self.input_spec = InputSpec(
            ndim=2, dtype=tensorflow.string)

        self.output_key = output_key
        # lol f*****g tensorflow hub can't handle unicode URIs                                                     
        self.module_uri = str(module_uri)
        self.max_strlen = max_strlen
コード例 #20
0
 def build(self, input_shape):
     input_shape = tensor_shape.TensorShape(input_shape).as_list()
     assert len(input_shape) >= 3
     self.input_spec = InputSpec(shape=input_shape)
     child_input_shape = [input_shape[0]] + input_shape[2:]
     if not self.layer.built:
         self.layer.build(child_input_shape)
         self.layer.built = True
     super(TimeDistributed, self).build()
     self.built = True
コード例 #21
0
    def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]

        # Compatibility with TensorFlow >= 1.0.0
        self.gamma = K.variable(self.gamma_init((1, )),
                                name='{}_gamma'.format(self.name))
        self.trainable_weights = [self.gamma]

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights
コード例 #22
0
ファイル: DEC.py プロジェクト: hadifar/DEC-keras
 def build(self, input_shape):
     assert len(input_shape) == 2
     input_dim = input_shape[1].value
     self.input_spec = InputSpec(dtype=K.floatx(), shape=(None, input_dim))
     self.clusters = self.add_weight(shape=(self.n_clusters, input_dim),
                                     initializer='glorot_uniform',
                                     name='clusters')
     if self.initial_weights is not None:
         self.set_weights(self.initial_weights)
         del self.initial_weights
     self.built = True
コード例 #23
0
ファイル: convolutional.py プロジェクト: Gaelic98/DeepPoseKit
 def __init__(self,
              index=None,
              coordinate_scale=1.0,
              confidence_scale=255.0,
              data_format=None,
              **kwargs):
     super(Maxima2D, self).__init__(**kwargs)
     self.data_format = normalize_data_format(data_format)
     self.input_spec = InputSpec(ndim=4)
     self.index = index
     self.coordinate_scale = coordinate_scale
     self.confidence_scale = confidence_scale
コード例 #24
0
ファイル: l0norm.py プロジェクト: asim800/l0
    def build(self, input_shape):
        input_shape = tensor_shape.TensorShape(input_shape)
        if self.data_format == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        if input_shape[channel_axis].value is None:
            raise ValueError('The channel dimension of the inputs '
                             'should be defined. Found `None`.')
        input_dim = int(input_shape[channel_axis])
        kernel_shape = self.kernel_size + (input_dim, self.filters)

        self.kernel = self.add_weight(name='kernel',
                                      shape=kernel_shape,
                                      initializer=self.kernel_initializer,
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint,
                                      trainable=True,
                                      dtype=self.dtype)
        self.loc = self.add_variable(
            'loc',
            shape=kernel_shape,
            initializer=tf.keras.initializers.RandomNormal(
                mean=self.loc_mean, stddev=self.loc_stddev, seed=None),
            regularizer=None,
            constraint=None,
            dtype=self.dtype,
            trainable=True)
        self.loc2 = self.loc.numpy()
        if self.use_bias:
            self.bias = self.add_weight(name='bias',
                                        shape=(self.filters, ),
                                        initializer=self.bias_initializer,
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint,
                                        trainable=True,
                                        dtype=self.dtype)
        else:
            self.bias = None
        self.input_spec = InputSpec(ndim=self.rank + 2,
                                    axes={channel_axis: input_dim})
        self._convolution_op = nn_ops.Convolution(
            input_shape,
            filter_shape=self.kernel.get_shape(),
            dilation_rate=self.dilation_rate,
            strides=self.strides,
            padding=self.padding.upper(),
            data_format=conv_utils.convert_data_format(self.data_format,
                                                       self.rank + 2))
        self.built = True
コード例 #25
0
 def __init__(self,
              return_sequences=False,
              return_state=False,
              go_backwards=False,
              stateful=False,
              **kwargs):
     # We invoke the base layer's initializer directly here because we do not
     # want to create RNN cell instance.
     super(RNN, self).__init__(**kwargs)  # pylint: disable=bad-super-call
     self.return_sequences = return_sequences
     self.return_state = return_state
     self.go_backwards = go_backwards
     self.stateful = stateful
     self.supports_masking = False
     self.input_spec = [InputSpec(ndim=3)]
     if hasattr(self.cell.state_size, '__len__'):
         state_size = self.cell.state_size
     else:
         state_size = [self.cell.state_size]
     self.state_spec = [InputSpec(shape=(None, dim)) for dim in state_size]
     self.constants_spec = None
     self._states = None
     self._num_constants = None
コード例 #26
0
 def __init__(self,
              kernel_size,
              sigma,
              upsample_factor,
              index=None,
              coordinate_scale=1.0,
              confidence_scale=1.0,
              data_format=None,
              **kwargs):
     super(SubpixelMaxima2D, self).__init__(**kwargs)
     self.data_format = normalize_data_format(data_format)
     self.input_spec = InputSpec(ndim=4)
     self.kernel_size = kernel_size
     self.sigma = sigma
     self.upsample_factor = upsample_factor
     self.index = index
     self.coordinate_scale = coordinate_scale
     self.confidence_scale = confidence_scale
コード例 #27
0
    def __init__(self,
                 upsampling=(2, 2),
                 output_size=None,
                 data_format=None,
                 **kwargs):

        super(BilinearUpsampling, self).__init__(**kwargs)

        self.data_format = conv_utils.normalize_data_format(data_format)
        self.input_spec = InputSpec(ndim=4)
        if output_size:
            self.output_size = conv_utils.normalize_tuple(
                output_size, 2, 'output_size')
            self.upsampling = None
        else:
            self.output_size = None
            self.upsampling = conv_utils.normalize_tuple(
                upsampling, 2, 'upsampling')
コード例 #28
0
ファイル: l0norm.py プロジェクト: asim800/l0
    def build(self, input_shape):
        input_shape = tensor_shape.TensorShape(input_shape)
        if input_shape[-1].value is None:
            raise ValueError('The last dimension of the inputs to `Dense` '
                             'should be defined. Found `None`.')
        self.input_spec = InputSpec(min_ndim=2,
                                    axes={-1: input_shape[-1].value})
        self.kernel = self.add_variable(
            'kernel',
            shape=[input_shape[-1].value, self.units],
            initializer=self.kernel_initializer,
            regularizer=self.kernel_regularizer,
            constraint=self.kernel_constraint,
            dtype=self.dtype,
            trainable=True)

        self.loc = self.add_variable(
            'loc',
            shape=[input_shape[-1].value, self.units],
            #                                  initializer=tf.keras.initializers.TruncatedNormal(mean=self.loc_mean, stddev=self.loc_stddev, seed=None),
            initializer=tf.keras.initializers.RandomNormal(
                mean=self.loc_mean, stddev=self.loc_stddev, seed=None),
            regularizer=None,
            constraint=None,
            dtype=self.dtype,
            trainable=True)

        self.loc2 = self.loc.numpy()
        #    self.trainable_weights.extend([self.loc])

        if self.use_bias:
            self.bias = self.add_variable('bias',
                                          shape=[
                                              self.units,
                                          ],
                                          initializer=self.bias_initializer,
                                          regularizer=self.bias_regularizer,
                                          constraint=self.bias_constraint,
                                          dtype=self.dtype,
                                          trainable=True)
        else:
            self.bias = None
        self.built = True
コード例 #29
0
    def __init__(self, padding=(1, 1), dim_ordering='default', **kwargs):
        super(ReflectionPadding2D, self).__init__(**kwargs)

        if dim_ordering == 'default':
            dim_ordering = 'tf'  #K.image_dim_ordering()

        self.padding = padding
        if isinstance(padding, dict):
            if set(padding.keys()) <= {
                    'top_pad', 'bottom_pad', 'left_pad', 'right_pad'
            }:
                self.top_pad = padding.get('top_pad', 0)
                self.bottom_pad = padding.get('bottom_pad', 0)
                self.left_pad = padding.get('left_pad', 0)
                self.right_pad = padding.get('right_pad', 0)
            else:
                raise ValueError(
                    'Unexpected key found in `padding` dictionary. '
                    'Keys have to be in {"top_pad", "bottom_pad", '
                    '"left_pad", "right_pad"}.'
                    'Found: ' + str(padding.keys()))
        else:
            padding = tuple(padding)
            if len(padding) == 2:
                self.top_pad = padding[0]
                self.bottom_pad = padding[0]
                self.left_pad = padding[1]
                self.right_pad = padding[1]
            elif len(padding) == 4:
                self.top_pad = padding[0]
                self.bottom_pad = padding[1]
                self.left_pad = padding[2]
                self.right_pad = padding[3]
            else:
                raise TypeError('`padding` should be tuple of int '
                                'of length 2 or 4, or dict. '
                                'Found: ' + str(padding))

        if dim_ordering not in {'tf'}:
            raise ValueError('dim_ordering must be in {tf}.')
        self.dim_ordering = dim_ordering
        self.input_spec = [InputSpec(ndim=4)]
コード例 #30
0
    def build(self, input_shape):
        dim = input_shape[self.axis]

        if dim is None:
            raise ValueError('Axis ' + str(self.axis) + ' of '
                             'input tensor should have a defined dimension '
                             'but the layer received an input with shape ' +
                             str(input_shape) + '.')

        if dim < self.groups:
            raise ValueError('Number of groups (' + str(self.groups) +
                             ') cannot be '
                             'more than the number of channels (' + str(dim) +
                             ').')

        if dim % self.groups != 0:
            raise ValueError('Number of groups (' + str(self.groups) +
                             ') must be a '
                             'multiple of the number of channels (' +
                             str(dim) + ').')

        self.input_spec = InputSpec(ndim=len(input_shape),
                                    axes={self.axis: dim})
        shape = (dim, )

        if self.scale:
            self.gamma = self.add_weight(shape=shape,
                                         name='gamma',
                                         initializer=self.gamma_initializer,
                                         regularizer=self.gamma_regularizer,
                                         constraint=self.gamma_constraint)
        else:
            self.gamma = None
        if self.center:
            self.beta = self.add_weight(shape=shape,
                                        name='beta',
                                        initializer=self.beta_initializer,
                                        regularizer=self.beta_regularizer,
                                        constraint=self.beta_constraint)
        else:
            self.beta = None
        self.built = True