Exemplo n.º 1
0
 def build(self, input_shape):
     self._head = dict()
     for key in self._key_list:
         scale = 2**int(key)
         self._head[key] = nn_blocks.ConvBN(bias_initializer=self.bias_init(
             scale, input_shape[key][-1]),
                                            **self._conv_config)
Exemplo n.º 2
0
 def _tiny_stack(self, inputs, config, name):
     x = tf.keras.layers.MaxPool2D(pool_size=2,
                                   strides=config.strides,
                                   padding='same',
                                   data_format=None,
                                   name=f'{name}_tiny/pool')(inputs)
     self._default_dict['activation'] = self._get_activation(
         config.activation)
     self._default_dict['name'] = f'{name}_tiny/conv'
     x = nn_blocks.ConvBN(filters=config.filters,
                          kernel_size=(3, 3),
                          strides=(1, 1),
                          padding='same',
                          **self._default_dict)(x)
     self._default_dict['activation'] = self._activation
     self._default_dict['name'] = None
     return x
Exemplo n.º 3
0
  def test_gradient_pass_though(self, filters):
    loss = tf.keras.losses.MeanSquaredError()
    optimizer = tf.keras.optimizers.SGD()
    with tf.device('/CPU:0'):
      test_layer = nn_blocks.ConvBN(filters, kernel_size=(3, 3), padding='same')

    init = tf.random_normal_initializer()
    x = tf.Variable(
        initial_value=init(shape=(1, 224, 224, 3), dtype=tf.float32))
    y = tf.Variable(
        initial_value=init(shape=(1, 224, 224, filters), dtype=tf.float32))

    with tf.GradientTape() as tape:
      x_hat = test_layer(x)
      grad_loss = loss(x_hat, y)
    grad = tape.gradient(grad_loss, test_layer.trainable_variables)
    optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))
    self.assertNotIn(None, grad)
Exemplo n.º 4
0
 def test_pass_through(self, kernel_size, padding, strides):
     if padding == 'same':
         pad_const = 1
     else:
         pad_const = 0
     x = tf.keras.Input(shape=(224, 224, 3))
     test_layer = nn_blocks.ConvBN(filters=64,
                                   kernel_size=kernel_size,
                                   padding=padding,
                                   strides=strides,
                                   trainable=False)
     outx = test_layer(x)
     print(outx.shape.as_list())
     test = [
         None,
         int((224 - kernel_size[0] + (2 * pad_const)) / strides[0] + 1),
         int((224 - kernel_size[1] + (2 * pad_const)) / strides[1] + 1), 64
     ]
     print(test)
     self.assertAllEqual(outx.shape.as_list(), test)
Exemplo n.º 5
0
    def build_layer(self, layer_type, filters, filter_scale, count, stack_type,
                    downsample):
        if stack_type is not None:
            layers = []
            if layer_type == "residual":
                for _ in range(count):
                    layers.append(
                        nn_blocks.DarkResidual(filters=filters // filter_scale,
                                               filter_scale=filter_scale))
            else:
                for _ in range(count):
                    layers.append(nn_blocks.ConvBN(filters=filters))

            if stack_type == "model":
                layers = tf.keras.Sequential(layers=layers)
        else:
            layers = None

        stack = nn_blocks.CSPStack(filters=filters,
                                   filter_scale=filter_scale,
                                   downsample=downsample,
                                   model_to_wrap=layers)
        return stack
Exemplo n.º 6
0
 def build(self, input_shape):
     self._head = dict()
     for key in self._key_list:
         self._head[key] = nn_blocks.ConvBN(**self._conv_config)