Пример #1
0
 def build(self, input_shape):
     self._route_conv = nn_blocks.ConvBN(filters=self._filters // 2,
                                         kernel_size=(1, 1),
                                         strides=(1, 1),
                                         padding="same",
                                         **self._base_config)
     if self._upsample:
         self._process_conv = nn_blocks.ConvBN(filters=self._filters // 4,
                                               kernel_size=(1, 1),
                                               strides=(1, 1),
                                               padding="same",
                                               **self._base_config)
         self._upsampling_block = tf.keras.layers.UpSampling2D(
             size=self._upsample_size)
Пример #2
0
  def build(self, inputs):
    self.key_list = inputs.keys()

    keys = [int(key) for key in self.key_list]
    self._min_level = min(keys)
    self._max_level = max(keys)

    self._head = dict()
    for key in self.key_list:
      self._head[key] = nn_blocks.ConvBN(**self._base_config)
    return
Пример #3
0
 def _tiny_stack(self, inputs, config, name):
     x = tf.keras.layers.MaxPool2D(pool_size=2,
                                   strides=config.strides,
                                   padding='same',
                                   data_format=None,
                                   name=f"{name}_tiny/pool")(inputs)
     self._default_dict['activation'] = self._get_activation(
         config.activation)
     self._default_dict['name'] = f"{name}_tiny/conv"
     x = nn_blocks.ConvBN(filters=config.filters,
                          kernel_size=(3, 3),
                          strides=(1, 1),
                          padding='same',
                          **self._default_dict)(x)
     self._default_dict['activation'] = self._activation
     self._default_dict['name'] = None
     return x
Пример #4
0
  def test_gradient_pass_though(self, filters):
    loss = ks.losses.MeanSquaredError()
    optimizer = ks.optimizers.SGD()
    with tf.device("/CPU:0"):
      test_layer = nn_blocks.ConvBN(filters, kernel_size=(3, 3), padding="same")

    init = tf.random_normal_initializer()
    x = tf.Variable(
        initial_value=init(shape=(1, 224, 224, 3), dtype=tf.float32))
    y = tf.Variable(
        initial_value=init(shape=(1, 224, 224, filters), dtype=tf.float32))

    with tf.GradientTape() as tape:
      x_hat = test_layer(x)
      grad_loss = loss(x_hat, y)
    grad = tape.gradient(grad_loss, test_layer.trainable_variables)
    optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))
    self.assertNotIn(None, grad)
Пример #5
0
 def test_pass_through(self, kernel_size, padding, strides):
   if padding == "same":
     pad_const = 1
   else:
     pad_const = 0
   x = ks.Input(shape=(224, 224, 3))
   test_layer = nn_blocks.ConvBN(
       filters=64,
       kernel_size=kernel_size,
       padding=padding,
       strides=strides,
       trainable=False)
   outx = test_layer(x)
   print(outx.shape.as_list())
   test = [
       None,
       int((224 - kernel_size[0] + (2 * pad_const)) / strides[0] + 1),
       int((224 - kernel_size[1] + (2 * pad_const)) / strides[1] + 1), 64
   ]
   print(test)
   self.assertAllEqual(outx.shape.as_list(), test)
Пример #6
0
  def build_layer(self, layer_type, filters, filter_scale, count, stack_type,
                  downsample):
    if stack_type is not None:
      layers = []
      if layer_type == "residual":
        for _ in range(count):
          layers.append(
              nn_blocks.DarkResidual(
                  filters=filters // filter_scale, filter_scale=filter_scale))
      else:
        for _ in range(count):
          layers.append(nn_blocks.ConvBN(filters=filters))

      if stack_type == "model":
        layers = tf.keras.Sequential(layers=layers)
    else:
      layers = None

    stack = nn_blocks.CSPStack(
        filters=filters,
        filter_scale=filter_scale,
        downsample=downsample,
        model_to_wrap=layers)
    return stack
    def __init__(self,
                 backbone,
                 num_classes,
                 input_specs=layers.InputSpec(shape=[None, None, None, 3]),
                 dropout_rate=0.0,
                 kernel_initializer='random_uniform',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 add_head_batch_norm=False,
                 use_sync_bn: bool = False,
                 norm_momentum: float = 0.99,
                 norm_epsilon: float = 0.001,
                 **kwargs):
        """Classification initialization function.

    Args:
      backbone: a backbone network.
      num_classes: `int` number of classes in classification task.
      input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
      dropout_rate: `float` rate for dropout regularization.
      kernel_initializer: kernel initializer for the dense layer.
      kernel_regularizer: tf.keras.regularizers.Regularizer object. Default to
                          None.
      bias_regularizer: tf.keras.regularizers.Regularizer object. Default to
                          None.
      add_head_batch_norm: `bool` whether to add a batch normalization layer
        before pool.
      use_sync_bn: `bool` if True, use synchronized batch normalization.
      norm_momentum: `float` normalization momentum for the moving average.
      norm_epsilon: `float` small float added to variance to avoid dividing by
        zero.
      **kwargs: keyword arguments to be passed.
    """
        self._self_setattr_tracking = False
        self._config_dict = {
            'backbone': backbone,
            'num_classes': num_classes,
            'input_specs': input_specs,
            'dropout_rate': dropout_rate,
            'kernel_initializer': kernel_initializer,
            'kernel_regularizer': kernel_regularizer,
            'bias_regularizer': bias_regularizer,
            'add_head_batch_norm': add_head_batch_norm,
            'use_sync_bn': use_sync_bn,
            'norm_momentum': norm_momentum,
            'norm_epsilon': norm_epsilon,
        }
        self._input_specs = input_specs
        self._kernel_regularizer = kernel_regularizer
        self._bias_regularizer = bias_regularizer
        self._backbone = backbone
        # self._head = tf.keras.layers.Dense(
        #     num_classes, kernel_initializer=kernel_initializer,
        #     kernel_regularizer=self._kernel_regularizer,
        #     bias_regularizer=self._bias_regularizer)

        #self._head = tf.keras.layers.Conv2D(num_classes, kernel_size=1, kernel_initializer=kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)

        self._head = nn_blocks.ConvBN(
            filters=num_classes,
            kernel_size=1,
            kernel_initializer=kernel_initializer,
            kernel_regularizer=self._kernel_regularizer,
            bias_regularizer=self._bias_regularizer,
            use_bn=False,
            activation=None)

        self._head2 = tf.keras.layers.Dense(
            num_classes,
            kernel_initializer=kernel_initializer,
            kernel_regularizer=self._kernel_regularizer,
            bias_regularizer=self._bias_regularizer)

        if use_sync_bn:
            self._norm = tf.keras.layers.experimental.SyncBatchNormalization
        else:
            self._norm = tf.keras.layers.BatchNormalization
        axis = -1 if tf.keras.backend.image_data_format(
        ) == 'channels_last' else 1

        inputs = tf.keras.Input(shape=input_specs.shape[1:])
        endpoints = backbone(inputs)
        x = endpoints[max(endpoints.keys())]

        if add_head_batch_norm:
            x = self._norm(axis=axis,
                           momentum=norm_momentum,
                           epsilon=norm_epsilon)(x)
        x = tf.keras.layers.GlobalAveragePooling2D()(x)
        x = tf.expand_dims(x, axis=1)
        x = tf.expand_dims(x, axis=1)
        #x = tf.keras.layers.Dropout(dropout_rate)(x)
        #x = tf.keras.layers.AveragePooling2D(pool_size=8)(x)
        x = self._head(x)
        x = tf.keras.activations.softmax(x, axis=-1)
        x = tf.squeeze(x, axis=1)
        x = tf.squeeze(x, axis=1)
        #x = self._head2(x)

        super(ClassificationModel, self).__init__(inputs=inputs,
                                                  outputs=x,
                                                  **kwargs)