Esempio n. 1
0
 def build(self, input_shape):
     input_shape = tensor_shape.TensorShape(input_shape).as_list()
     if len(input_shape) < 3:
         raise ValueError(
             '`TimeDistributed` Layer should be passed an `input_shape ` '
             'with at least 3 dimensions, received: ' + str(input_shape))
     # Don't enforce the batch or time dimension.
     self.input_spec = InputSpec(shape=[None, None] + input_shape[2:])
     child_input_shape = [input_shape[0]] + input_shape[2:]
     super(TimeDistributed, self).build(tuple(child_input_shape))
     self.built = True
Esempio n. 2
0
    def __init__(self, units, mask, activation=None, use_bias=True, **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'), )

        # each instance of the class will have all of these attributes
        self.mask_tensor = mask
        self.activation = activations.get(activation)
        self.units = units
        self.use_bias = use_bias
        self.input_spec = InputSpec(min_ndim=2)
        super(MaskedDense, self).__init__(**kwargs)
Esempio n. 3
0
 def __init__(self,
              height,
              width,
              interpolation='bilinear',
              name=None,
              **kwargs):
   self.target_height = height
   self.target_width = width
   self.interpolation = interpolation
   self._interpolation_method = get_interpolation(interpolation)
   self.input_spec = InputSpec(ndim=4)
   super(Resizing, self).__init__(name=name, **kwargs)
Esempio n. 4
0
 def __init__(self, horizontal=None, vertical=None, seed=None, **kwargs):
   # If both arguments are None, set both to True.
   if horizontal is None and vertical is None:
     self.horizontal = True
     self.vertical = True
   else:
     self.horizontal = horizontal or False
     self.vertical = vertical or False
   self.seed = seed
   self._rng = make_generator(self.seed)
   self.input_spec = InputSpec(ndim=4)
   super(RandomFlip, self).__init__(**kwargs)
  def __init__(self,
               cell,          # Normal RNN cell
               output_layer,  # Output layer turning the RNN cell into the desired output dim
               likelihood_fn, # Likelihood function used for computing gradients
               niter,         # Number of iterations to run the RIM for
               **kwargs):

    super(self.__class__, self).__init__(cell, **kwargs)
    self.output_layer = output_layer
    self.likelihood_fn = likelihood_fn
    self.niter = niter
    self.input_spec = [InputSpec(ndim=5)]
Esempio n. 6
0
    def build(self, input_shape):
        input_shape = tensor_shape.TensorShape(input_shape)
        input_channel = self._get_input_channel(input_shape)
        if input_channel % self.groups != 0:
            raise ValueError(
                'The number of input channels must be evenly divisible by the number '
                'of groups. Received groups={}, but the input has {} channels '
                '(full input shape is {}).'.format(self.groups, input_channel,
                                                   input_shape))
        kernel_shape = self.kernel_size + (
            input_channel // self.groups,
            self.filters,
        )

        self.kernel = self.add_weight(
            name='kernel',
            shape=kernel_shape,
            initializer=self.kernel_initializer,
            regularizer=self.kernel_regularizer,
            constraint=self.kernel_constraint,
            trainable=True,
            dtype=self.dtype,
        )
        if self.use_bias:
            self.bias = self.add_weight(
                name='bias',
                shape=(self.filters, ),
                initializer=self.bias_initializer,
                regularizer=self.bias_regularizer,
                constraint=self.bias_constraint,
                trainable=True,
                dtype=self.dtype,
            )
        else:
            self.bias = None
        channel_axis = self._get_channel_axis()
        self.input_spec = InputSpec(ndim=self.rank + 2,
                                    axes={channel_axis: input_channel})

        self._build_conv_op_input_shape = input_shape
        self._build_input_channel = input_channel
        self._padding_op = self._get_padding_op()
        self._conv_op_data_format = conv_utils.convert_data_format(
            self.data_format, self.rank + 2)
        self._convolution_op = Convolution(
            input_shape,
            filter_shape=self.kernel.shape,
            dilation_rate=self.dilation_rate,
            strides=self.strides,
            padding=self._padding_op,
            data_format=self._conv_op_data_format,
        )
        self.built = True
Esempio n. 7
0
 def __init__(self, factor, seed=None, name=None, **kwargs):
   self.factor = factor
   if isinstance(factor, (tuple, list)):
     self.lower = factor[0]
     self.upper = factor[1]
   else:
     self.lower = self.upper = factor
   if self.lower < 0. or self.upper < 0. or self.lower > 1.:
     raise ValueError('Factor cannot have negative values, '
                      'got {}'.format(factor))
   self.seed = seed
   self.input_spec = InputSpec(ndim=4)
   super(RandomContrast, self).__init__(name=name, **kwargs)
 def __init__(self, elementwise=False, name=None, **kwargs):
     super(AutoAugment, self).__init__(name=name, **kwargs)
     self.elementwise = elementwise
     self.transforms = [
         tf.keras.Sequential([
             augmentations.RandomChance(_get_transform(t1, m1), p1),
             augmentations.RandomChance(_get_transform(t2, m2), p2),
         ]) for (t1, p1, m1), (t2, p2, m2) in _AUTO_AUGMENT_POLICY_V0
     ]
     self._transform = augmentations.RandomChoice(self.transforms,
                                                  n_transforms=1,
                                                  elementwise=elementwise)
     self.input_spec = InputSpec(ndim=4, dtype=tf.uint8)
Esempio n. 9
0
 def __init__(self, factor, seed=random.randint(0,1000), name=None, **kwargs):
     self.factor = factor
     if isinstance(factor, (tuple, list)):
         self.lower = factor[0]
         self.upper = factor[1]
     else:
         self.lower = self.upper = factor
     if self.lower < 0. or self.upper < 0. or self.lower > 1.:
         raise ValueError('Factor cannot have negative values or greater than 1.0,'
                          ' got {}'.format(factor))
     self.seed = seed
     self.input_spec = InputSpec(ndim=4)
     super(RandomBrightness, self).__init__(name=name, **kwargs)
Esempio n. 10
0
 def __init__(self,
              level,
              interpolation="nearest",
              fill_mode="constant",
              fill_value=0.0,
              name=None,
              **kwargs):
     super(ShearY, self).__init__(name=name, **kwargs)
     self.level = level
     self.interpolation = interpolation
     self.fill_mode = fill_mode
     self.fill_value = fill_value
     self.input_spec = InputSpec(ndim=4)
Esempio n. 11
0
 def __init__(self,
              pixels,
              interpolation="nearest",
              fill_mode="constant",
              fill_value=0.0,
              name=None,
              **kwargs):
     super(TranslateY, self).__init__(name=name, **kwargs)
     self.pixels = pixels
     self.interpolation = interpolation
     self.fill_mode = fill_mode
     self.fill_value = fill_value
     self.input_spec = InputSpec(ndim=4)
Esempio n. 12
0
 def build(self, input_shape):
   input_shape = tensor_shape.TensorShape(input_shape).as_list()
   assert len(input_shape) >= 3
   self.input_spec = InputSpec(shape=input_shape)
   child_input_shape = [input_shape[0]] + input_shape[2:]
   if not self.layer.built:
     # The base layer class calls a conversion function on the input shape to
     # convert it to a TensorShape. The conversion function requires a
     # tuple which is why we cast the shape.
     self.layer.build(tuple(child_input_shape))
     self.layer.built = True
   super(TimeDistributed, self).build()
   self.built = True
Esempio n. 13
0
 def __init__(self, num_templates=10, **kwargs):
     WeightedMixIn.__init__(self)
     new_kwargs = kwargs.copy()
     new_kwargs["center"] = False
     new_kwargs["scale"] = False
     BatchNormalization.__init__(self, **new_kwargs)
     self.num_templates = num_templates
     if "scale" not in kwargs or kwargs["scale"]:
         self.add_template_variable(weight_name="template_gamma")
     if "center" not in kwargs or kwargs["center"]:
         self.add_template_variable(weight_name="template_beta")
     mixture_input_spec = InputSpec(ndim=1)
     self.input_spec = (self.input_spec, mixture_input_spec)
Esempio n. 14
0
  def __init__(self,
               max_tokens=None,
               output_mode=BINARY,
               sparse=False,
               **kwargs):
    # 'output_mode' must be one of (COUNT, BINARY, TFIDF)
    layer_utils.validate_string_arg(
        output_mode,
        allowable_strings=(COUNT, BINARY, TFIDF),
        layer_name="CategoryEncoding",
        arg_name="output_mode")

    # If max_tokens is set, the value must be greater than 1 - otherwise we
    # are creating a 0-element vocab, which doesn't make sense.
    if max_tokens is not None and max_tokens < 1:
      raise ValueError("max_tokens must be > 1.")

    # We need to call super() before we call _add_state_variable().
    combiner = _CategoryEncodingCombiner(
        max_tokens=max_tokens,
        compute_idf=output_mode == TFIDF)
    super(CategoryEncoding, self).__init__(combiner=combiner, **kwargs)
    base_preprocessing_layer.keras_kpl_gauge.get_cell(
        "CategoryEncoding").set(True)

    self.max_tokens = max_tokens
    self.output_mode = output_mode
    self.sparse = sparse
    self._called = False

    if self.output_mode == TFIDF:
      # The TF-IDF weight may have a (None,) tensorshape. This creates
      # a 1D variable with arbitrary shape, which we can assign any weight to
      # so long as it has 1 dimension. In order to properly initialize this
      # weight in Keras, we need to provide a custom callable initializer which
      # does not depend on the shape of the weight (as all other initializers
      # do) since the weight is not known. Hence the lambda shape, dtype: [0].
      if max_tokens is None:
        initializer = lambda shape, dtype: [0]
      else:
        initializer = init_ops.zeros_initializer

      # We are adding these here instead of in build() since they do not depend
      # on the input shape at all.
      self.tf_idf_weights = self._add_state_variable(
          name=_IDF_NAME,
          shape=tensor_shape.TensorShape((max_tokens,)),
          dtype=K.floatx(),
          initializer=initializer)

      self.input_spec = InputSpec(ndim=2)
Esempio n. 15
0
    def build(self, input_shape):
        dtype = dtypes.as_dtype(self.dtype or K.floatx())
        if not (dtype.is_floating or dtype.is_complex):
            raise TypeError(
                'Unable to build `Dense_plasticity` layer with non-floating point '
                'dtype %s' % (dtype, ))
        input_shape = tensor_shape.TensorShape(input_shape)
        if tensor_shape.dimension_value(input_shape[-1]) is None:
            raise ValueError(
                'The last dimension of the inputs to `Dense_plasticity` '
                'should be defined. Found `None`.')
        last_dim = tensor_shape.dimension_value(input_shape[-1])
        self.input_spec = InputSpec(min_ndim=2, axes={-1: last_dim})
        self.kernel = self.add_weight('kernel',
                                      shape=[last_dim, self.units],
                                      initializer=self.kernel_initializer,
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint,
                                      dtype=self.dtype,
                                      trainable=True)

        #plasticity
        self.kernel_p = self.add_weight(
            'kernel_p',
            shape=[last_dim, self.units],
            initializer=keras.initializers.Constant(),
            regularizer=self.kernel_regularizer,
            constraint=self.kernel_constraint,
            dtype=self.dtype,
            trainable=True)
        self.hebb = self.add_weight('hebb',
                                    shape=[last_dim, self.units],
                                    initializer=self.kernel_initializer,
                                    regularizer=self.kernel_regularizer,
                                    constraint=self.kernel_constraint,
                                    dtype=self.dtype,
                                    trainable=False)

        if self.use_bias:
            self.bias = self.add_weight('bias',
                                        shape=[
                                            self.units,
                                        ],
                                        initializer=self.bias_initializer,
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint,
                                        dtype=self.dtype,
                                        trainable=True)
        else:
            self.bias = None
        self.built = True
Esempio n. 16
0
 def __init__(self,
              rank,
              filters,
              kernel_size,
              strides=1,
              padding='valid',
              data_format=None,
              dilation_rate=1,
              activation=None,
              is_mc=True,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              trainable=True,
              name=None,
              **kwargs):
     super(VWNConv, self).__init__(
         trainable=trainable,
         name=name,
         activity_regularizer=regularizers.get(activity_regularizer),
         **kwargs)
     self.rank = rank
     self.filters = filters
     self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
                                                   'kernel_size')
     self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
     self.padding = conv_utils.normalize_padding(padding)
     if (self.padding == 'causal'
             and not isinstance(self, (Conv1D, SeparableConv1D))):
         raise ValueError('Causal padding is only supported for `Conv1D`'
                          'and ``SeparableConv1D`.')
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.dilation_rate = conv_utils.normalize_tuple(
         dilation_rate, rank, 'dilation_rate')
     self.is_mc = tf.cast(is_mc, dtype=tf.bool)
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.input_spec = InputSpec(ndim=self.rank + 2)
     self.a_initializer = initializers.Constant(
         1e-04)  # ADDED  (what is a)  (use keras initializers??)
  def __init__(self,
		            units,
                rate=1e-2,
       	      	activation=selu(input),
	            	kernel_initializer='lecun_uniform', 
	        **kwargs):
    if 'input_shape' not in kwargs and 'input_dim' in kwargs:
      kwargs['input_shape'] = (kwargs.pop('input_dim'),)

    super(SNNDense, self).__init__()
    self.units = int(units) if not isinstance(units, int) else units
    self.activation = activations.get(activation)
    self.kernel_initializer = initializers.get(kernel_initializer)
    self.input_spec = InputSpec(min_ndim=2)
Esempio n. 18
0
 def build(self, input_shape):
     dtype = dtypes.as_dtype(self.dtype or k.floatx())
     if not (dtype.is_floating or dtype.is_complex):
         raise TypeError('Unable to build `NoisyDense` layer with non-floating point'
                         'dtype %s' % (dtype,))
     input_shape = tensor_shape.TensorShape(input_shape)
     if tensor_shape.dimension_value(input_shape[-1]) is None:
         raise ValueError('The last dimension of the inputs to `NoisyDense` '
                          'should be defined. Found `None`.')
     last_dim = tensor_shape.dimension_value(input_shape[-1])
     self.input_spec = InputSpec(min_ndim=2,
                                 axes={-1: last_dim})
     if self.std_func is None:
         std = math.sqrt(3 / input_shape[-1])
     else:
         std = self.std_func(input_shape[-1])
     if self.sigma_func is not None:
         sigma_init = self.sigma_func(self.sigma_init, input_shape[-1])
     else:
         sigma_init = self.sigma_init
     self.mu_weights = self.add_weight(
         'mu_weights',
         shape=[last_dim, self.units],
         initializer=initializers.RandomUniform(minval=-std, maxval=std),
         regularizer=self.kernel_regularizer,
         constraint=self.kernel_constraint,
         dtype=self.dtype,
         trainable=True)
     self.sigma_weights = self.add_weight(
         'sigma_weights',
         shape=[last_dim, self.units],
         initializer=initializers.Constant(value=sigma_init),
         dtype=self.dtype,
         trainable=True)
     if self.use_bias:
         self.mu_bias = self.add_weight(
             'mu_bias',
             shape=[self.units, ],
             initializer=initializers.RandomUniform(minval=-std, maxval=std),
             regularizer=self.bias_regularizer,
             constraint=self.bias_constraint,
             dtype=self.dtype,
             trainable=True)
         self.sigma_bias = self.add_weight(
             'sigma_bias',
             shape=[self.units, ],
             initializer=initializers.Constant(value=sigma_init),
             dtype=self.dtype,
             trainable=True)
     self.built = True
Esempio n. 19
0
 def __init__(self, pool_function, pool_size, strides,
              padding='valid', data_format='channels_last',
              name=None, **kwargs):
   super(Pooling1D, self).__init__(name=name, **kwargs)
   if data_format is None:
     data_format = backend.image_data_format()
   if strides is None:
     strides = pool_size
   self.pool_function = pool_function
   self.pool_size = conv_utils.normalize_tuple(pool_size, 1, 'pool_size')
   self.strides = conv_utils.normalize_tuple(strides, 1, 'strides')
   self.padding = conv_utils.normalize_padding(padding)
   self.data_format = conv_utils.normalize_data_format(data_format)
   self.input_spec = InputSpec(ndim=3)
Esempio n. 20
0
 def __init__(self,
              degrees,
              interpolation="nearest",
              fill_mode="constant",
              fill_value=0.0,
              name=None,
              **kwargs):
     super(Rotate, self).__init__(name=name, **kwargs)
     self.degrees = degrees
     self.interpolation = interpolation
     self.fill_mode = fill_mode
     self.fill_value = fill_value
     self._radians = degrees * math.pi / 180.0
     self.input_spec = InputSpec(ndim=4)
Esempio n. 21
0
    def __init__(self,
                 units,
                 learning_rate=0.01,
                 online=True,
                 n_passes=1,
                 return_hidden=True,
                 use_bias=True,
                 visible_activation='sigmoid',
                 hidden_activation='sigmoid',
                 kernel_initializer='glorot_uniform',
                 bias_initializer='glorot_uniform',
                 kernel_regularizer='l2',
                 bias_regularizer='l2',
                 activity_regularizer='l2',
                #  kernel_constraint=None,
                kernel_constraint=constraints.MinMaxNorm(
                     min_value=-1.0, max_value=1.0, rate=1.0, axis=-1
                ),
                #  bias_constraint=None,
                bias_constraint=constraints.MinMaxNorm(
                     min_value=-1.0, max_value=1.0, rate=1.0, axis=-1
                ),
                 optimizer='Adam',
                 ** kwargs):
        
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'),)

        super(OnlineBolzmannCell, self).__init__(
            activity_regularizer=regularizers.get(activity_regularizer), **kwargs)

        self.units = units
        self.learning_rate = learning_rate
        self.online = online
        self.return_hidden = return_hidden
        self.visible_activation = activations.get(visible_activation)
        self.hidden_activation = activations.get(hidden_activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.optimizer = optimizers.get(optimizer)
        self.optimizer.learning_rate = self.learning_rate
        self.n_passes = n_passes

        self.supports_masking = True
        self.input_spec = InputSpec(min_ndim=2)
Esempio n. 22
0
    def __init__(self,
                 units,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 activations_datatype_size_byte=1,
                 weights_datatype_size_byte=1,
                 results_datatype_size_byte=4,
                 systolic_array_height=256,
                 systolic_array_width=256,
                 activation_fifo_depth=8,
                 accumulator_array_height=4096,
                 log_file_output_dir='.',
                 model_name='unnamed',
                 **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'), )

        super(mpusim_fc_base, self).__init__(
            activity_regularizer=regularizers.get(activity_regularizer),
            **kwargs)
        self.units = int(units)
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.supports_masking = True
        self.input_spec = InputSpec(min_ndim=2)

        self.activations_datatype_size_byte = activations_datatype_size_byte
        self.weights_datatype_size_byte = weights_datatype_size_byte
        self.results_datatype_size_byte = results_datatype_size_byte
        self.systolic_array_height = systolic_array_height
        self.systolic_array_width = systolic_array_width
        self.activation_fifo_depth = activation_fifo_depth
        self.accumulator_array_height = accumulator_array_height
        self.log_file_output_dir = log_file_output_dir
        self.model_name = model_name
Esempio n. 23
0
    def build(self, input_shape):
        input_shape = tensor_shape.TensorShape(input_shape)
        input_channel = self._get_input_channel(input_shape)
        kernel_shape = self.kernel_size + (input_channel, self.filters)

        self.kernel_weights = self.add_weight(
            name='kernel_weights',
            shape=kernel_shape,
            initializer=tf.random_normal_initializer(mean=0.0, stddev=0.05),
            regularizer=self.kernel_regularizer,
            constraint=self.kernel_constraint,
            trainable=True,
            dtype=self.dtype)
        self.kernel_log_scale = self.add_weight(name='kernel_log_scale',
                                                shape=(1, 1, 1, self.filters),
                                                initializer=tf.constant_initializer(value=0.),
                                                regularizer=None,
                                                constraint=None,
                                                trainable=True,
                                                dtype=self.dtype)
        if self.use_bias:
            self.bias = self.add_weight(
                name='bias',
                shape=(self.filters,),
                initializer=self.bias_initializer,
                regularizer=self.bias_regularizer,
                constraint=self.bias_constraint,
                trainable=True,
                dtype=self.dtype)
        else:
            self.bias = None

        channel_axis = self._get_channel_axis()
        self.input_spec = InputSpec(ndim=self.rank + 2,
                                    axes={channel_axis: input_channel})

        self._build_conv_op_input_shape = input_shape
        self._build_input_channel = input_channel
        self._padding_op = self._get_padding_op()
        self._conv_op_data_format = conv_utils.convert_data_format(
            self.data_format, self.rank + 2)
        self._convolution_op = nn_ops.Convolution(
            input_shape,
            filter_shape=self.kernel.shape,
            dilation_rate=self.dilation_rate,
            strides=self.strides,
            padding=self._padding_op,
            data_format=self._conv_op_data_format)
        self.built = True
Esempio n. 24
0
 def build(self, input_shape):
     dtype = dtypes.as_dtype(self.dtype or tfk.backend.floatx())
     if not (dtype.is_floating or dtype.is_complex):
         raise TypeError(
             'Unable to build `Dense` layer with non-floating point '
             'dtype %s' % (dtype, ))
     input_shape = tensor_shape.TensorShape(input_shape)
     if tensor_shape.dimension_value(input_shape[-1]) is None:
         raise ValueError('The last dimension of the inputs to `Dense` '
                          'should be defined. Found `None`.')
     last_dim = tensor_shape.dimension_value(input_shape[-1])
     self.input_spec = InputSpec(min_ndim=2, axes={-1: last_dim})
     self.kernel = self.build_kernel(shape=[last_dim, self.units])
     self.build_bias(self.units)
     self.built = True
Esempio n. 25
0
    def __init__(self,
                 min_side=None,
                 max_side=None,
                 interpolation="bilinear",
                 name=None,
                 **kwargs):
        super(ResizingMinMax, self).__init__(name=name, **kwargs)

        if min_side is None and max_side is None:
            raise ValueError("Must specify either 'min_side' or 'max_side'.")

        self.min_side = min_side
        self.max_side = max_side
        self.interpolation = interpolation
        self.input_spec = InputSpec(ndim=4)
Esempio n. 26
0
 def build(self, two_input_shapes):
     [input_shape_targets, input_shape_main] = two_input_shapes
     input_shape_main = tensor_shape.TensorShape(input_shape_main)
     input_channel = self._get_input_channel(input_shape_main)
     kernel_shape = self.kernel_size + (input_channel, self.filters)
     channel_axis = self._get_channel_axis()
     self.input_spec = (InputSpec(ndim=self.rank + 2,
                                  axes={channel_axis: input_channel}),
                        InputSpec(ndim=self.rank + 2,
                                  axes={channel_axis: input_channel}))
     self._build_conv_op_input_shape = input_shape_main
     self._build_input_channel = input_channel
     self._padding_op = self._get_padding_op()
     self._conv_op_data_format = conv_utils.convert_data_format(
         self.data_format, self.rank + 2)
     self._convolution_op = nn_ops.Convolution(
         input_shape_main,
         filter_shape=tensor_shape.TensorShape(kernel_shape),
         dilation_rate=self.dilation_rate,
         strides=self.strides,
         padding=self._padding_op,
         data_format=self._conv_op_data_format)
     self.image_height = input_shape_main[1]
     self.image_width = input_shape_main[2]
     self.input_channels = input_shape_main[3]
     num_patches = self.image_width * self.image_height
     self.num_patches = num_patches
     self.target_matrix = self.add_weight(
         'target_matrix',
         shape=[self.realisation_batch_size * num_patches, self.filters],
         initializer=self.kernel_initializer,
         regularizer=self.kernel_regularizer,
         constraint=self.kernel_constraint,
         dtype=self.dtype,
         trainable=True)
     self.built = True
Esempio n. 27
0
 def build(self, input_shape):
   input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)
   input_dims = nest.flatten(
       nest.map_structure(lambda x: x.ndims, input_shape))
   if any(dim < 3 for dim in input_dims):
     raise ValueError(
         '`TimeDistributed` Layer should be passed an `input_shape ` '
         'with at least 3 dimensions, received: ' + str(input_shape))
   # Don't enforce the batch or time dimension.
   self.input_spec = nest.map_structure(
       lambda x: InputSpec(shape=[None, None] + x.as_list()[2:]), input_shape)
   child_input_shape = nest.map_structure(self._remove_timesteps, input_shape)
   child_input_shape = tf_utils.convert_shapes(child_input_shape)
   super(TimeDistributed, self).build(tuple(child_input_shape))
   self.built = True
Esempio n. 28
0
  def __init__(self,
               height_factor,
               width_factor,
               fill_mode='reflect',
               interpolation='bilinear',
               seed=None,
               name=None,
               **kwargs):
    self.height_factor = height_factor
    if isinstance(height_factor, (tuple, list)):
      self.height_lower = height_factor[0]
      self.height_upper = height_factor[1]
    else:
      self.height_lower = self.height_upper = height_factor
    if self.height_lower < 0. or self.height_upper < 0.:
      raise ValueError('`height_factor` cannot have negative values, '
                       'got {}'.format(height_factor))
    if self.height_lower > self.height_upper:
      raise ValueError('`height_factor` cannot have lower bound larger than '
                       'upper bound, got {}.'.format(height_factor))

    self.width_factor = width_factor
    if isinstance(width_factor, (tuple, list)):
      self.width_lower = width_factor[0]
      self.width_upper = width_factor[1]
    else:
      self.width_lower = self.width_upper = width_factor
    if self.width_lower < 0. or self.width_upper < 0.:
      raise ValueError('`width_factor` cannot have negative values, '
                       'got {}'.format(width_factor))
    if self.width_lower > self.width_upper:
      raise ValueError('`width_factor` cannot have lower bound larger than '
                       'upper bound, got {}.'.format(width_factor))

    if fill_mode not in {'reflect', 'wrap', 'constant'}:
      raise NotImplementedError(
          'Unknown `fill_mode` {}. Only `reflect`, `wrap` and '
          '`constant` are supported.'.format(fill_mode))
    if interpolation not in {'nearest', 'bilinear'}:
      raise NotImplementedError(
          'Unknown `interpolation` {}. Only `nearest` and '
          '`bilinear` are supported.'.format(interpolation))
    self.fill_mode = fill_mode
    self.interpolation = interpolation
    self.seed = seed
    self._rng = make_generator(self.seed)
    self.input_spec = InputSpec(ndim=4)
    super(RandomZoom, self).__init__(name=name, **kwargs)
Esempio n. 29
0
            def build(self, input_shape):
                input_shape = tensor_shape.TensorShape(input_shape)
                if self.data_format == 'channels_first':
                    channel_axis = 1
                else:
                    channel_axis = -1
                if input_shape.dims[channel_axis].value is None:
                    raise ValueError('The channel dimension of the inputs '
                                     'should be defined. Found `None`.')
                input_dim = int(input_shape[channel_axis])
                kernel_shape = self.kernel_size + (input_dim, self.filters)

                self.kernel = self.add_weight(
                    name='kernel',
                    shape=kernel_shape,
                    initializer=self.kernel_initializer,
                    regularizer=self.kernel_regularizer,
                    constraint=self.kernel_constraint,
                    trainable=self.kernel_trainable,
                    dtype=self.dtype)
                if self.use_bias:
                    self.bias = self.add_weight(
                        name='bias',
                        shape=(self.filters, ),
                        initializer=self.bias_initializer,
                        regularizer=self.bias_regularizer,
                        constraint=self.bias_constraint,
                        trainable=self.bias_trainable,
                        dtype=self.dtype)
                else:
                    self.bias = None
                self.input_spec = InputSpec(ndim=self.rank + 2,
                                            axes={channel_axis: input_dim})
                if self.padding == 'causal':
                    op_padding = 'valid'
                else:
                    op_padding = self.padding
                if not isinstance(op_padding, (list, tuple)):
                    op_padding = op_padding.upper()
                self._convolution_op = nn_ops.Convolution(
                    input_shape,
                    filter_shape=self.kernel.shape,
                    dilation_rate=self.dilation_rate,
                    strides=self.strides,
                    padding=op_padding,
                    data_format=conv_utils.convert_data_format(
                        self.data_format, self.rank + 2))
                self.built = True
Esempio n. 30
0
 def build(self, input_shape):
     self.alpha = self.add_weight(shape=(1, ),
                                  name="alpha",
                                  initializer=self.alpha_initializer,
                                  regularizer=None,
                                  constraint=None,
                                  dtype=self.dtype,
                                  trainable=True)
     channel_axis = (1 if is_channels_first(self.data_format) else
                     len(input_shape) - 1)
     axes = {}
     for i in range(1, len(input_shape)):
         if i != channel_axis:
             axes[i] = input_shape[i]
     self.input_spec = InputSpec(ndim=len(input_shape), axes=axes)
     self.built = True