Ejemplo n.º 1
0
 def build(self, two_input_shapes):
     [input_shape_targets, input_shape_main] = two_input_shapes
     input_shape_main = tensor_shape.TensorShape(input_shape_main)
     input_channel = self._get_input_channel(input_shape_main)
     kernel_shape = self.kernel_size + (input_channel, self.filters)
     channel_axis = self._get_channel_axis()
     self.input_spec = (InputSpec(ndim=self.rank + 2,
                                  axes={channel_axis: input_channel}),
                        InputSpec(ndim=self.rank + 2,
                                  axes={channel_axis: input_channel}))
     self._build_conv_op_input_shape = input_shape_main
     self._build_input_channel = input_channel
     self._padding_op = self._get_padding_op()
     self._conv_op_data_format = conv_utils.convert_data_format(
         self.data_format, self.rank + 2)
     self._convolution_op = nn_ops.Convolution(
         input_shape_main,
         filter_shape=tensor_shape.TensorShape(kernel_shape),
         dilation_rate=self.dilation_rate,
         strides=self.strides,
         padding=self._padding_op,
         data_format=self._conv_op_data_format)
     self.image_height = input_shape_main[1]
     self.image_width = input_shape_main[2]
     self.input_channels = input_shape_main[3]
     num_patches = self.image_width * self.image_height
     self.num_patches = num_patches
     self.target_matrix = self.add_weight(
         'target_matrix',
         shape=[self.realisation_batch_size * num_patches, self.filters],
         initializer=self.kernel_initializer,
         regularizer=self.kernel_regularizer,
         constraint=self.kernel_constraint,
         dtype=self.dtype,
         trainable=True)
     self.built = True
Ejemplo n.º 2
0
            def build(self, input_shape):
                input_shape = tensor_shape.TensorShape(input_shape)
                if self.data_format == 'channels_first':
                    channel_axis = 1
                else:
                    channel_axis = -1
                if input_shape.dims[channel_axis].value is None:
                    raise ValueError('The channel dimension of the inputs '
                                     'should be defined. Found `None`.')
                input_dim = int(input_shape[channel_axis])
                kernel_shape = self.kernel_size + (input_dim, self.filters)

                self.kernel = self.add_weight(
                    name='kernel',
                    shape=kernel_shape,
                    initializer=self.kernel_initializer,
                    regularizer=self.kernel_regularizer,
                    constraint=self.kernel_constraint,
                    trainable=self.kernel_trainable,
                    dtype=self.dtype)
                if self.use_bias:
                    self.bias = self.add_weight(
                        name='bias',
                        shape=(self.filters, ),
                        initializer=self.bias_initializer,
                        regularizer=self.bias_regularizer,
                        constraint=self.bias_constraint,
                        trainable=self.bias_trainable,
                        dtype=self.dtype)
                else:
                    self.bias = None
                self.input_spec = InputSpec(ndim=self.rank + 2,
                                            axes={channel_axis: input_dim})
                if self.padding == 'causal':
                    op_padding = 'valid'
                else:
                    op_padding = self.padding
                if not isinstance(op_padding, (list, tuple)):
                    op_padding = op_padding.upper()
                self._convolution_op = nn_ops.Convolution(
                    input_shape,
                    filter_shape=self.kernel.shape,
                    dilation_rate=self.dilation_rate,
                    strides=self.strides,
                    padding=op_padding,
                    data_format=conv_utils.convert_data_format(
                        self.data_format, self.rank + 2))
                self.built = True
Ejemplo n.º 3
0
  def __init__(self,
               height_factor,
               width_factor,
               fill_mode='reflect',
               interpolation='bilinear',
               seed=None,
               name=None,
               **kwargs):
    self.height_factor = height_factor
    if isinstance(height_factor, (tuple, list)):
      self.height_lower = height_factor[0]
      self.height_upper = height_factor[1]
    else:
      self.height_lower = self.height_upper = height_factor
    if self.height_lower < 0. or self.height_upper < 0.:
      raise ValueError('`height_factor` cannot have negative values, '
                       'got {}'.format(height_factor))
    if self.height_lower > self.height_upper:
      raise ValueError('`height_factor` cannot have lower bound larger than '
                       'upper bound, got {}.'.format(height_factor))

    self.width_factor = width_factor
    if isinstance(width_factor, (tuple, list)):
      self.width_lower = width_factor[0]
      self.width_upper = width_factor[1]
    else:
      self.width_lower = self.width_upper = width_factor
    if self.width_lower < 0. or self.width_upper < 0.:
      raise ValueError('`width_factor` cannot have negative values, '
                       'got {}'.format(width_factor))
    if self.width_lower > self.width_upper:
      raise ValueError('`width_factor` cannot have lower bound larger than '
                       'upper bound, got {}.'.format(width_factor))

    if fill_mode not in {'reflect', 'wrap', 'constant'}:
      raise NotImplementedError(
          'Unknown `fill_mode` {}. Only `reflect`, `wrap` and '
          '`constant` are supported.'.format(fill_mode))
    if interpolation not in {'nearest', 'bilinear'}:
      raise NotImplementedError(
          'Unknown `interpolation` {}. Only `nearest` and '
          '`bilinear` are supported.'.format(interpolation))
    self.fill_mode = fill_mode
    self.interpolation = interpolation
    self.seed = seed
    self._rng = make_generator(self.seed)
    self.input_spec = InputSpec(ndim=4)
    super(RandomZoom, self).__init__(name=name, **kwargs)
Ejemplo n.º 4
0
 def __init__(self, probability, factor=None, seed=random.randint(0,1000), name=None, **kwargs):
     self.probability = probability
     self.factor = factor if factor else probability
     if isinstance(factor, (tuple, list)):
         self.lower = factor[0]
         self.upper = factor[1]
     else:
         self.lower = 0.0
         self.upper = factor
     if self.lower < 0. or self.upper < 0. or self.lower > 1.:
         raise ValueError('Factor cannot have negative values or greater than 1.0,'
                          ' got {}'.format(factor))
     self.seed = seed        
     self.input_spec = InputSpec(ndim=4)
     self._rng = augr.get(self.seed)
     super(RandomBlendGrayscale, self).__init__(name=name, **kwargs)
Ejemplo n.º 5
0
 def build(self, input_shape):
     self.alpha = self.add_weight(shape=(1, ),
                                  name="alpha",
                                  initializer=self.alpha_initializer,
                                  regularizer=None,
                                  constraint=None,
                                  dtype=self.dtype,
                                  trainable=True)
     channel_axis = (1 if is_channels_first(self.data_format) else
                     len(input_shape) - 1)
     axes = {}
     for i in range(1, len(input_shape)):
         if i != channel_axis:
             axes[i] = input_shape[i]
     self.input_spec = InputSpec(ndim=len(input_shape), axes=axes)
     self.built = True
Ejemplo n.º 6
0
 def build(self, input_shape):
     param_shape = list(input_shape[1:])
     if self.shared_axes is not None:
         for i in self.shared_axes:
             param_shape[i - 1] = 1
     if sum(param_shape) == len(param_shape):
         param_shape = [
             1,
         ]
     self.alpha = self.add_weight(shape=param_shape,
                                  name='alpha',
                                  initializer=self.alpha_initializer,
                                  regularizer=self.alpha_regularizer,
                                  constraint=self.alpha_constraint)
     self.input_spec = InputSpec(ndim=len(input_shape), axes={})
     self.built = True
Ejemplo n.º 7
0
 def __init__(self,
              rank,
              filters,
              kernel_size,
              strides=1,
              padding='valid',
              data_format=None,
              dilation_rate=1,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              trainable=True,
              name=None,
              **kwargs):
     super(ConvAux, self).__init__(
         trainable=trainable,
         name=name,
         activity_regularizer=regularizers.get(activity_regularizer),
         **kwargs)
     self.rank = rank
     if filters is not None and not isinstance(filters, int):
         filters = int(filters)
     self.filters = filters
     self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
                                                   'kernel_size')
     self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
     self.padding = conv_utils.normalize_padding(padding)
     if (self.padding == 'causal'):
         raise ValueError('Causal padding is only supported.')
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.dilation_rate = conv_utils.normalize_tuple(
         dilation_rate, rank, 'dilation_rate')
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.input_spec = InputSpec(ndim=self.rank + 2)
Ejemplo n.º 8
0
    def build(self, input_shape):
        dtype = dtypes.as_dtype(self.dtype or K.floatx())
        if not (dtype.is_floating or dtype.is_complex):
            raise TypeError('Unable to build `OnlineBolzmannCell` layer with non-floating point '
                            'dtype %s' % (dtype,))
        input_shape = tensor_shape.TensorShape(input_shape)
        if tensor_shape.dimension_value(input_shape[-1]) is None:
            raise ValueError('The last dimension of the inputs to `OnlineBolzmannCell` '
                            'should be defined. Found `None`.')

        last_dim = tensor_shape.dimension_value(input_shape[-1])
        batch_dim = tensor_shape.dimension_value(input_shape[0])
        self.input_spec = InputSpec(min_ndim=2, axes={-1: last_dim})

        self.kernel = self.add_weight(
            name='w',
            shape=(last_dim, self.units),
            constraint=self.kernel_constraint,
            initializer=self.kernel_initializer,
            regularizer=self.kernel_regularizer,
            dtype=self.dtype,
            trainable=True)
        self.m, self.r = None, None
        
        if self.use_bias:
            self.bias_hidden = self.add_weight(
                name='b_h',
                shape=(self.units,),
                constraint=self.bias_constraint,
                initializer=self.bias_initializer,
                regularizer=self.bias_regularizer,
                dtype=self.dtype,
                trainable=True)
            self.bias_visible = self.add_weight(
                name='b_v',
                shape=(last_dim,),
                constraint=self.bias_constraint,
                initializer=self.bias_initializer,
                regularizer=self.bias_regularizer,
                dtype=self.dtype,
                trainable=True)

            self.m_h, self.m_v, self.r_h, self.r_v = None, None, None, None
        else:
            self.bias_hidden = None
            self.bias_visible = None
        self.built = True
Ejemplo n.º 9
0
    def __init__(self,
                 height_factor,
                 width_factor,
                 fill_mode='nearest',
                 fill_value=0.,
                 seed=None,
                 name=None,
                 **kwargs):
        self.height_factor = height_factor
        if isinstance(height_factor, (tuple, list)):
            self.height_lower = abs(height_factor[0])
            self.height_upper = height_factor[1]
        else:
            self.height_lower = self.height_upper = height_factor
        if self.height_upper < 0.:
            raise ValueError(
                '`height_factor` cannot have negative values as upper '
                'bound, got {}'.format(height_factor))
        if abs(self.height_lower) > 1. or abs(self.height_upper) > 1.:
            raise ValueError(
                '`height_factor` must have values between [-1, 1], '
                'got {}'.format(height_factor))

        self.width_factor = width_factor
        if isinstance(width_factor, (tuple, list)):
            self.width_lower = abs(width_factor[0])
            self.width_upper = width_factor[1]
        else:
            self.width_lower = self.width_upper = width_factor
        if self.width_upper < 0.:
            raise ValueError(
                '`width_factor` cannot have negative values as upper '
                'bound, got {}'.format(width_factor))
        if abs(self.width_lower) > 1. or abs(self.width_upper) > 1.:
            raise ValueError(
                '`width_factor` must have values between [-1, 1], '
                'got {}'.format(width_factor))

        if fill_mode not in {'nearest', 'bilinear'}:
            raise NotImplementedError(
                '`fill_mode` {} is not supported yet.'.format(fill_mode))
        self.fill_mode = fill_mode
        self.fill_value = fill_value
        self.seed = seed
        self._rng = make_generator(self.seed)
        self.input_spec = InputSpec(ndim=4)
        super(RandomTranslation, self).__init__(name=name, **kwargs)
Ejemplo n.º 10
0
  def build(self, input_shape):
  input_shape = tensor_shape.TensorShape(input_shape)
  last_dim = tensor_shape.dimension_value(input_shape[-1])
  self.input_spec = InputSpec(min_ndim=2, axes={-1: last_dim})
  self.kernel = self.add_weight(
        'kernel',
        shape=[last_dim, self.units],
        initializer=self.kernel_initializer,
        dtype=self.dtype,
        trainable=True)

  def call(self, input):
    self.add_loss(self.rate * tf.reduce_sum(inputs))
    rank = inputs.shape.rank
    if rank is not None and rank > 2:
	  outputs = standard_ops.tensordot(inputs, self.kernel, [[rank - 1], [0]])
     if not context.executing_eagerly():
        shape = inputs.shape.as_list()
        output_shape = shape[:-1] + [self.units]
        outputs.set_shape(output_shape)
    else:
      inputs = math_ops.cast(inputs, self._compute_dtype)
      if K.is_sparse(inputs):
        outputs = sparse_ops.sparse_tensor_dense_matmul(inputs, self.kernel)
      else:
        outputs = gen_math_ops.mat_mul(inputs, self.kernel)
    if self.activation is not None:
      return self.activation(outputs) 
    return outputs

  def compute_output_shape(self, input_shape):
    input_shape = tensor_shape.TensorShape(input_shape)
    input_shape = input_shape.with_rank_at_least(2)
    if tensor_shape.dimension_value(input_shape[-1]) is None:
      raise ValueError(
          'The innermost dimension of input_shape must be defined, but saw: %s'
          % input_shape)
    return input_shape[:-1].concatenate(self.units)

  def get_config(self):
    config = {
        'units': self.units,
        'activation': activations.serialize(self.activation),
        'kernel_initializer': initializers.serialize(self.kernel_initializer),
    }
    base_config = super(SNNDense, self).get_config()
    return dict(list(base_config.items()) + list(config.items()))
 def build(self, input_shape):
     dtype = dtypes.as_dtype(self.dtype or K.floatx())
     if not (dtype.is_floating or dtype.is_complex):
         raise TypeError(
             'Unable to build `Dense` layer with non-floating point '
             'dtype %s' % (dtype, ))
     input_shape = tensor_shape.TensorShape(input_shape)
     if tensor_shape.dimension_value(input_shape[-1]) is None:
         raise ValueError('The last dimension of the inputs to `Dense` '
                          'should be defined. Found `None`.')
     last_dim = tensor_shape.dimension_value(input_shape[-1])
     self.input_spec = InputSpec(min_ndim=2, axes={-1: last_dim})
     if self.use_bias:
         pass
     else:
         self.bias = None
     self.built = True
 def build(self, input_shape):
     input_shape = tensor_shape.TensorShape(input_shape)
     input_channel = self._get_input_channel(input_shape)
     kernel_shape = self.kernel_size + (input_channel, self.filters)
     if self.use_bias:
         pass
     else:
         self.bias = None
     channel_axis = self._get_channel_axis()
     self.input_spec = InputSpec(ndim=self.rank + 2,
                                 axes={channel_axis: input_channel})
     self._build_conv_op_input_shape = input_shape
     self._build_input_channel = input_channel
     self._padding_op = self._get_padding_op()
     self._conv_op_data_format = conv_utils.convert_data_format(
         self.data_format, self.rank + 2)
     self.built = True
Ejemplo n.º 13
0
    def __init__(self,
                 units,
                 activation=None,
                 use_bias=True,
                 scale=True,
                 scf_min=0.2,
                 scf_max=2.0,
                 dropconnect_prob=0.05,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'), )

        super(ScaledLinear, self).__init__(
            activity_regularizer=regularizers.get(activity_regularizer),
            **kwargs)

        # Save params
        self.units = units
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.dropconnect_prob = dropconnect_prob
        self.scale = scale
        self.scf_min = scf_min
        self.scf_max = scf_max
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.kwargs = kwargs

        # Initialize scaling factor
        self.scaler = ScalingFactor(scf_min=self.scf_min,
                                    scf_max=self.scf_max,
                                    name="scaling_factor")

        self.supports_masking = True
        self.input_spec = InputSpec(min_ndim=2)
Ejemplo n.º 14
0
 def __init__(self, factor, interpolation='bilinear', seed=None, **kwargs):
     self.factor = factor
     if isinstance(factor, (tuple, list)):
         self.height_lower = -factor[0]
         self.height_upper = factor[1]
     else:
         self.height_lower = self.height_upper = factor
     if self.height_lower > 1.:
         raise ValueError(
             '`factor` cannot have abs lower bound larger than 1.0, '
             'got {}'.format(factor))
     self.interpolation = interpolation
     self._interpolation_method = get_interpolation(interpolation)
     self.input_spec = InputSpec(ndim=4)
     self.seed = seed
     self._rng = make_generator(self.seed)
     super(RandomHeight, self).__init__(**kwargs)
Ejemplo n.º 15
0
 def __init__(self, filters, kernelYX, padding="same", **kwargs):  #REFLECT
     kernelYX = ensure_multiplicity(2, kernelYX)
     for k in kernelYX:
         assert k % 2 == 1, "kernel size must be uneven on all spatial axis"
     if padding == "same":
         padding = "CONSTANT"
     name = kwargs.pop('name', None)
     self.padding_constant_value = kwargs.pop('constant_values', 0)
     self.convL = Conv3D(filters=filters,
                         kernel_size=(kernelYX[0], kernelYX[1], 2),
                         padding="valid",
                         name=name + "conv" if name is not None else None,
                         **kwargs)
     self.input_spec = InputSpec(ndim=4)
     self.padding = padding
     self.ker_center = [(k - 1) // 2 for k in kernelYX]
     super().__init__(name)
Ejemplo n.º 16
0
 def build(self, input_shape):
   input_shape = tensor_shape.TensorShape(input_shape).as_list()
   if len(input_shape) < 3:
     raise ValueError(
         '`TimeDistributed` Layer should be passed an `input_shape ` '
         'with at least 3 dimensions, received: ' + str(input_shape))
   # Don't enforce the batch or time dimension.
   self.input_spec = InputSpec(shape=[None, None] + input_shape[2:])
   child_input_shape = [input_shape[0]] + input_shape[2:]
   if not self.layer.built:
     # The base layer class calls a conversion function on the input shape to
     # convert it to a TensorShape. The conversion function requires a
     # tuple which is why we cast the shape.
     self.layer.build(tuple(child_input_shape))
     self.layer.built = True
   super(TimeDistributed, self).build()
   self.built = True
Ejemplo n.º 17
0
    def build(self, input_shape):
        input_shape = tensor_shape.TensorShape(input_shape)
        input_channel = self._get_input_channel(input_shape)
        if input_channel % self.groups != 0:
            raise ValueError(
                'The number of input channels must be evenly divisible by the number '
                'of groups. Received groups={}, but the input has {} channels '
                '(full input shape is {}).'.format(self.groups, input_channel,
                                                   input_shape))
        kernel_shape = self.kernel_size + (input_channel // self.groups,
                                           self.filters)

        self.kernel = self.add_weight(name='kernel',
                                      shape=kernel_shape,
                                      initializer=self.kernel_initializer,
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint,
                                      trainable=True,
                                      dtype=self.dtype)
        if self.use_bias:
            self.bias = self.add_weight(name='bias',
                                        shape=(self.filters, ),
                                        initializer=self.bias_initializer,
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint,
                                        trainable=True,
                                        dtype=self.dtype)
        else:
            self.bias = None
        channel_axis = self._get_channel_axis()
        self.input_spec = InputSpec(ndim=self.rank + 2,
                                    axes={channel_axis: input_channel})

        self._build_conv_op_input_shape = input_shape
        self._build_input_channel = input_channel
        self._padding_op = self._get_padding_op()
        self._conv_op_data_format = conv_utils.convert_data_format(
            self.data_format, self.rank + 2)
        self._convolution_op = Convolution(
            input_shape,
            filter_shape=self.kernel.shape,
            dilation_rate=self.dilation_rate,
            strides=self.strides,
            padding=self._padding_op,
            data_format=self._conv_op_data_format)
        self.built = True
Ejemplo n.º 18
0
 def build(self, input_shape):
     input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)
     input_dims = nest.flatten(
         nest.map_structure(lambda x: x.ndims, input_shape))
     if any(dim < 3 for dim in input_dims):
         raise ValueError(
             '`TimeDistributed` Layer should be passed an `input_shape ` '
             'with at least 3 dimensions, received: ' + str(input_shape))
     # Don't enforce the batch or time dimension.
     self.input_spec = nest.map_structure(
         lambda x: InputSpec(shape=[None, None] + x.as_list()[2:]),
         input_shape)
     child_input_shape = nest.map_structure(self._remove_timesteps,
                                            input_shape)
     child_input_shape = tf_utils.convert_shapes(child_input_shape)
     super(TimeDistributed, self).build(tuple(child_input_shape))
     self.built = True
Ejemplo n.º 19
0
    def __init__(
            self,
            input_rows,
            input_dim,
            output_dim,
            support,  # input_rows x input_rows
            activation=None,
            use_bias=False,
            kernel_initializer='glorot_uniform',
            bias_initializer='zeros',
            kernel_regularizer=None,
            bias_regularizer=None,
            kernel_constraint=None,
            bias_constraint=None,
            dropout=0.,
            sparse_inputs=False,
            featureless=False,
            model='gcn',
            perturbation=None,
            **kwargs):
        super(GraphConvolution, self).__init__(**kwargs)

        self.input_rows = input_rows
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.support = support

        self.activation = activation
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.dropout = dropout
        self.sparse_inputs = sparse_inputs
        self.featureless = featureless
        self.model = model
        self.perturbation = perturbation

        self.supports_masking = True
        self.input_spec = InputSpec(min_ndim=2)
Ejemplo n.º 20
0
 def __init__(self,
              filters,
              kernel_size,
              strides=1,
              padding='valid',
              data_format=None,
              dilation_rate=1,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              trainable=True,
              name=None,
              **kwargs):
     super(ConvHole2D, self).__init__(
         trainable=trainable,
         name=name,
         activity_regularizer=regularizers.get(activity_regularizer),
         **kwargs)
     self.filters = filters
     self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2,
                                                   'kernel_size')
     self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
     self.padding = conv_utils.normalize_padding(padding)
     if self.padding == 'causal':
         raise ValueError(
             'Causal padding is only supported for `Conv1D` and ``SeparableConv1D`.'
         )
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.dilation_rate = conv_utils.normalize_tuple(
         dilation_rate, 2, 'dilation_rate')
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.input_spec = InputSpec(ndim=4)
Ejemplo n.º 21
0
    def build(self, input_shape):
        input_shape = tensor_shape.TensorShape(input_shape)
        input_channel = self._get_input_channel(input_shape)
        kernel_shape = self.kernel_size + (input_channel, self.filters)

        self.kernelA = self.add_weight(name='kernelA',
                                       shape=kernel_shape,
                                       initializer=self.kernel_initializer,
                                       regularizer=self.kernel_regularizer,
                                       constraint=self.kernel_constraint,
                                       trainable=True,
                                       dtype=self.dtype)

        self.kernelB = K.constant(self.kernelB_init_weight)
        self.kernel = K.transpose(
            K.dot(K.transpose(self.kernelA), self.kernelB))

        if self.use_bias:
            self.bias = self.add_weight(name='bias',
                                        shape=(self.filters, ),
                                        initializer=self.bias_initializer,
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint,
                                        trainable=True,
                                        dtype=self.dtype)
        else:
            self.bias = None

        channel_axis = self._get_channel_axis()
        self.input_spec = InputSpec(ndim=self.rank + 2,
                                    axes={channel_axis: input_channel})

        self._build_conv_op_input_shape = input_shape
        self._build_input_channel = input_channel
        self._padding_op = self._get_padding_op()
        self._conv_op_data_format = conv_utils.convert_data_format(
            self.data_format, self.rank + 2)
        self._convolution_op = nn_ops.Convolution(
            input_shape,
            filter_shape=self.kernel.shape,
            dilation_rate=self.dilation_rate,
            strides=self.strides,
            padding=self._padding_op,
            data_format=self._conv_op_data_format)
        self.built = True
Ejemplo n.º 22
0
    def __init__(self,
                 height_factor,
                 width_factor,
                 fill_mode='nearest',
                 fill_value=0.,
                 seed=None,
                 name=None,
                 **kwargs):
        self.height_factor = height_factor
        if isinstance(height_factor, (tuple, list)):
            self.height_lower = height_factor[0]
            self.height_upper = height_factor[1]
        else:
            self.height_lower = self.height_upper = height_factor
        if self.height_lower < 0. or self.height_upper < 0.:
            raise ValueError('`height_factor` cannot have negative values, '
                             'got {}'.format(height_factor))
        if self.height_lower > self.height_upper:
            raise ValueError(
                '`height_factor` cannot have lower bound larger than '
                'upper bound, got {}.'.format(height_factor))

        self.width_factor = width_factor
        if isinstance(width_factor, (tuple, list)):
            self.width_lower = width_factor[0]
            self.width_upper = width_factor[1]
        else:
            self.width_lower = self.width_upper = width_factor
        if self.width_lower < 0. or self.width_upper < 0.:
            raise ValueError('`width_factor` cannot have negative values, '
                             'got {}'.format(width_factor))
        if self.width_lower > self.width_upper:
            raise ValueError(
                '`width_factor` cannot have lower bound larger than '
                'upper bound, got {}.'.format(width_factor))

        if fill_mode not in {'nearest', 'bilinear'}:
            raise NotImplementedError(
                '`fill_mode` {} is not supported yet.'.format(fill_mode))
        self.fill_mode = fill_mode
        self.fill_value = fill_value
        self.seed = seed
        self._rng = make_generator(self.seed)
        self.input_spec = InputSpec(ndim=4)
        super(RandomZoom, self).__init__(name=name, **kwargs)
Ejemplo n.º 23
0
 def __init__(self,
              activation=None,
              kernel_initializer='glorot_uniform',
              kernel_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              **kwargs):
     if 'input_shape' not in kwargs and 'input_dim' in kwargs:
         kwargs['input_shape'] = (kwargs.pop('input_dim'),)
     super(LearnableNoise, self).__init__(**kwargs)
     self.activation = activations.get(activation)
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.input_spec = InputSpec(min_ndim=2)
     self.noise_layer = tf.keras.layers.GaussianNoise(1.0)
     self.supports_masking = True
Ejemplo n.º 24
0
 def build(self, input_shape):
     dtype = dtypes.as_dtype(self.dtype or K.floatx())
     if not (dtype.is_floating or dtype.is_complex):
         raise TypeError(
             'Unable to build `Dense` layer with non-floating point '
             'dtype %s' % (dtype, ))
     input_shape = tensor_shape.TensorShape(input_shape)
     if tensor_shape.dimension_value(input_shape[-1]) is None:
         raise ValueError('The last dimension of the inputs to `Dense` '
                          'should be defined. Found `None`.')
     last_dim = tensor_shape.dimension_value(input_shape[-1])
     self.input_spec = InputSpec(min_ndim=2, axes={-1: last_dim})
     self.kernel = self.add_weight('kernel',
                                   shape=[last_dim, self.units],
                                   initializer=self.kernel_initializer,
                                   dtype=self.dtype,
                                   trainable=True)
     self.built = True
Ejemplo n.º 25
0
  def __init__(self,
               units,
               activation=None,
               kernel_initializer='glorot_uniform',
               kernel_regularizer=None,
               activity_regularizer=None,
               kernel_constraint=None,
               **kwargs):
    if 'input_shape' not in kwargs and 'input_dim' in kwargs:
      kwargs['input_shape'] = (kwargs.pop('input_dim'),)

    super(RWNN, self).__init__()

    self.units = int(units) if not isinstance(units, int) else units
    self.activation = activations.get(activation)
    self.kernel_initializer = initializers.get(kernel_initializer)
    self.supports_masking = True
    self.input_spec = InputSpec(min_ndim=2)
Ejemplo n.º 26
0
 def build(self, input_shape):
     input_shape = tensor_shape.TensorShape(input_shape)
     last_dim = tensor_shape.dimension_value(input_shape[-1])
     if last_dim is None:
         raise ValueError('The last dimension of the inputs to `Dense` '
                          'should be defined. Found `None`.')
     self.input_spec = InputSpec(min_ndim=2, axes={-1: last_dim})
     shape = len(input_shape.as_list()) * [1]
     shape[-1] = last_dim
     self.kernel = self.add_weight(shape=shape,
                                   initializer=self.kernel_initializer,
                                   name='kernel',
                                   regularizer=self.kernel_regularizer,
                                   constraint=self.kernel_constraint,
                                   dtype=self.dtype,
                                   trainable=True)
     super(LearnableNoise, self).build(input_shape)
     self.built = True
Ejemplo n.º 27
0
 def __init__(self, factor, fill_mode='nearest', seed=None, **kwargs):
     self.factor = factor
     if isinstance(factor, (tuple, list)):
         self.lower = factor[0]
         self.upper = factor[1]
     else:
         self.lower = self.upper = factor
     if self.lower < 0. or self.upper < 0.:
         raise ValueError('Factor cannot have negative values, '
                          'got {}'.format(factor))
     if fill_mode not in {'nearest', 'bilinear'}:
         raise NotImplementedError(
             '`fill_mode` {} is not supported yet.'.format(fill_mode))
     self.fill_mode = fill_mode
     self.seed = seed
     self._rng = make_generator(self.seed)
     self.input_spec = InputSpec(ndim=4)
     super(RandomRotation, self).__init__(**kwargs)
Ejemplo n.º 28
0
 def __init__(self,
              filters,
              kernelYX,
              padding="same",
              **kwargs):  #padding can also be REFLECT
     self.kernelYX = tuple(ensure_multiplicity(2, kernelYX))
     for k in kernelYX:
         assert k % 2 == 1, "kernel size must be uneven on all spatial axis"
     self.ker_center = [(k - 1) // 2 for k in kernelYX]
     if padding == "same":
         padding = "CONSTANT"
     self._name = kwargs.pop('name', "Conv3DYXC")
     self.padding_constant_value = kwargs.pop('constant_values', 0)
     self.input_spec = InputSpec(ndim=4)
     self.padding = padding
     self.filters = filters
     self.conv_args = kwargs
     super().__init__(self._name)
Ejemplo n.º 29
0
 def __init__(self,
              factor,
              seed=random.randint(0, 1000),
              name=None,
              **kwargs):
     self.factor = factor
     if isinstance(factor, (tuple, list)):
         self.lower = factor[0]
         self.upper = factor[1]
     else:
         self.lower = self.upper = factor
     if self.lower < 0. or self.upper < 0. or self.lower > 1.:
         raise ValueError(
             'Factor cannot have negative values or greater than 1.0,'
             ' got {}'.format(factor))
     self.seed = seed
     self.input_spec = InputSpec(ndim=4)
     super(RandomSaturation, self).__init__(name=name, **kwargs)
 def build(self, input_shape):
     param_shape = list(input_shape[1:])
     if self.shared_axes is not None:
         for i in self.shared_axes:
             param_shape[i - 1] = 1
     self.alpha = self.add_weight(shape=param_shape,
                                  name='alpha',
                                  initializer=self.alpha_initializer,
                                  regularizer=self.alpha_regularizer,
                                  constraint=self.alpha_constraint)
     # Set input spec
     axes = {}
     if self.shared_axes:
         for i in range(1, len(input_shape)):
             if i not in self.shared_axes:
                 axes[i] = input_shape[i]
     self.input_spec = InputSpec(ndim=len(input_shape), axes=axes)
     self.built = True