示例#1
0
    def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]):
        """Build variables and child layers to prepare for calling."""
        if self._use_explicit_padding and self._kernel_size > 1:
            padding_size = nn_layers.get_padding_for_kernel_size(
                self._kernel_size)
            self._pad = tf.keras.layers.ZeroPadding2D(padding_size)
        conv2d_quantized = (helper.Conv2DQuantized if self._use_normalization
                            else helper.Conv2DOutputQuantized)

        self._conv0 = conv2d_quantized(
            filters=self._filters,
            kernel_size=self._kernel_size,
            strides=self._strides,
            padding=self._padding,
            use_bias=self._use_bias,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer,
            bias_regularizer=self._bias_regularizer,
            activation=helper.NoOpActivation())
        if self._use_normalization:
            self._norm0 = helper.norm_by_activation(
                self._activation, self._norm_with_quantize,
                self._norm)(axis=self._bn_axis,
                            momentum=self._norm_momentum,
                            epsilon=self._norm_epsilon)
        self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2(
            tf_utils.get_activation(self._activation, use_keras_layer=True),
            configs.Default8BitActivationQuantizeConfig())
        super(Conv2DBNBlockQuantized, self).build(input_shape)
示例#2
0
  def build(self, input_shape: Union[tf.TensorShape, List[tf.TensorShape]]):
    """Creates the variables of the head."""
    if self._config_dict['use_separable_conv']:
      conv_op = SeparableConv2DQuantized
    else:
      conv_op = helper.quantize_wrapped_layer(
          tf.keras.layers.Conv2D,
          configs.Default8BitConvQuantizeConfig(
              ['kernel'], ['activation'], False))
    conv_kwargs = {
        'filters': self._config_dict['num_filters'],
        'kernel_size': 3,
        'padding': 'same',
        'bias_initializer': tf.zeros_initializer(),
        'bias_regularizer': self._config_dict['bias_regularizer'],
    }
    if not self._config_dict['use_separable_conv']:
      conv_kwargs.update({
          'kernel_initializer': tf.keras.initializers.RandomNormal(
              stddev=0.01),
          'kernel_regularizer': self._config_dict['kernel_regularizer'],
      })

    base_bn_op = (tf.keras.layers.experimental.SyncBatchNormalization
                  if self._config_dict['use_sync_bn']
                  else tf.keras.layers.BatchNormalization)
    bn_op = helper.norm_by_activation(
        self._config_dict['activation'],
        helper.quantize_wrapped_layer(
            base_bn_op, configs.Default8BitOutputQuantizeConfig()),
        helper.quantize_wrapped_layer(
            base_bn_op, configs.NoOpQuantizeConfig()))

    bn_kwargs = {
        'axis': self._bn_axis,
        'momentum': self._config_dict['norm_momentum'],
        'epsilon': self._config_dict['norm_epsilon'],
    }

    # Class net.
    self._cls_convs = []
    self._cls_norms = []
    for level in range(
        self._config_dict['min_level'], self._config_dict['max_level'] + 1):
      this_level_cls_norms = []
      for i in range(self._config_dict['num_convs']):
        if level == self._config_dict['min_level']:
          cls_conv_name = 'classnet-conv_{}'.format(i)
          self._cls_convs.append(conv_op(name=cls_conv_name, **conv_kwargs))
        cls_norm_name = 'classnet-conv-norm_{}_{}'.format(level, i)
        this_level_cls_norms.append(bn_op(name=cls_norm_name, **bn_kwargs))
      self._cls_norms.append(this_level_cls_norms)

    classifier_kwargs = {
        'filters': (
            self._config_dict['num_classes'] *
            self._config_dict['num_anchors_per_location']),
        'kernel_size': 3,
        'padding': 'same',
        'bias_initializer': tf.constant_initializer(-np.log((1 - 0.01) / 0.01)),
        'bias_regularizer': self._config_dict['bias_regularizer'],
    }
    if not self._config_dict['use_separable_conv']:
      classifier_kwargs.update({
          'kernel_initializer': tf.keras.initializers.RandomNormal(stddev=1e-5),
          'kernel_regularizer': self._config_dict['kernel_regularizer'],
      })
    self._classifier = conv_op(
        name='scores', last_quantize=True, **classifier_kwargs)

    # Box net.
    self._box_convs = []
    self._box_norms = []
    for level in range(
        self._config_dict['min_level'], self._config_dict['max_level'] + 1):
      this_level_box_norms = []
      for i in range(self._config_dict['num_convs']):
        if level == self._config_dict['min_level']:
          box_conv_name = 'boxnet-conv_{}'.format(i)
          self._box_convs.append(conv_op(name=box_conv_name, **conv_kwargs))
        box_norm_name = 'boxnet-conv-norm_{}_{}'.format(level, i)
        this_level_box_norms.append(bn_op(name=box_norm_name, **bn_kwargs))
      self._box_norms.append(this_level_box_norms)

    box_regressor_kwargs = {
        'filters': (self._config_dict['num_params_per_anchor'] *
                    self._config_dict['num_anchors_per_location']),
        'kernel_size': 3,
        'padding': 'same',
        'bias_initializer': tf.zeros_initializer(),
        'bias_regularizer': self._config_dict['bias_regularizer'],
    }
    if not self._config_dict['use_separable_conv']:
      box_regressor_kwargs.update({
          'kernel_initializer': tf.keras.initializers.RandomNormal(
              stddev=1e-5),
          'kernel_regularizer': self._config_dict['kernel_regularizer'],
      })
    self._box_regressor = conv_op(
        name='boxes', last_quantize=True, **box_regressor_kwargs)

    # Attribute learning nets.
    if self._config_dict['attribute_heads']:
      self._att_predictors = {}
      self._att_convs = {}
      self._att_norms = {}

      for att_config in self._config_dict['attribute_heads']:
        att_name = att_config['name']
        att_type = att_config['type']
        att_size = att_config['size']
        att_convs_i = []
        att_norms_i = []

        # Build conv and norm layers.
        for level in range(self._config_dict['min_level'],
                           self._config_dict['max_level'] + 1):
          this_level_att_norms = []
          for i in range(self._config_dict['num_convs']):
            if level == self._config_dict['min_level']:
              att_conv_name = '{}-conv_{}'.format(att_name, i)
              att_convs_i.append(conv_op(name=att_conv_name, **conv_kwargs))
            att_norm_name = '{}-conv-norm_{}_{}'.format(att_name, level, i)
            this_level_att_norms.append(bn_op(name=att_norm_name, **bn_kwargs))
          att_norms_i.append(this_level_att_norms)
        self._att_convs[att_name] = att_convs_i
        self._att_norms[att_name] = att_norms_i

        # Build the final prediction layer.
        att_predictor_kwargs = {
            'filters':
                (att_size * self._config_dict['num_anchors_per_location']),
            'kernel_size': 3,
            'padding': 'same',
            'bias_initializer': tf.zeros_initializer(),
            'bias_regularizer': self._config_dict['bias_regularizer'],
        }
        if att_type == 'regression':
          att_predictor_kwargs.update(
              {'bias_initializer': tf.zeros_initializer()})
        elif att_type == 'classification':
          att_predictor_kwargs.update({
              'bias_initializer':
                  tf.constant_initializer(-np.log((1 - 0.01) / 0.01))
          })
        else:
          raise ValueError(
              'Attribute head type {} not supported.'.format(att_type))

        if not self._config_dict['use_separable_conv']:
          att_predictor_kwargs.update({
              'kernel_initializer':
                  tf.keras.initializers.RandomNormal(stddev=1e-5),
              'kernel_regularizer':
                  self._config_dict['kernel_regularizer'],
          })

        self._att_predictors[att_name] = conv_op(
            name='{}_attributes'.format(att_name), **att_predictor_kwargs)

    super().build(input_shape)
示例#3
0
文件: nn_layers.py 项目: npfp/models
    def build(self, input_shape):
        height = input_shape[1]
        width = input_shape[2]
        channels = input_shape[3]

        norm_layer = (tf.keras.layers.experimental.SyncBatchNormalization
                      if self._use_sync_bn else
                      tf.keras.layers.BatchNormalization)
        norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer)
        norm_no_quantize = helper.BatchNormalizationNoQuantized(norm_layer)
        norm = helper.norm_by_activation(self._activation, norm_with_quantize,
                                         norm_no_quantize)

        self.aspp_layers = []

        conv1 = helper.Conv2DQuantized(
            filters=self._output_channels,
            kernel_size=(1, 1),
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer,
            use_bias=False,
            activation=helper.NoOpActivation())
        norm1 = norm(axis=self._bn_axis,
                     momentum=self._batchnorm_momentum,
                     epsilon=self._batchnorm_epsilon)

        self.aspp_layers.append([conv1, norm1])

        for dilation_rate in self._dilation_rates:
            leading_layers = []
            kernel_size = (3, 3)
            if self._use_depthwise_convolution:
                leading_layers += [
                    helper.DepthwiseConv2DOutputQuantized(
                        depth_multiplier=1,
                        kernel_size=kernel_size,
                        padding='same',
                        depthwise_regularizer=self._kernel_regularizer,
                        depthwise_initializer=self._kernel_initializer,
                        dilation_rate=dilation_rate,
                        use_bias=False,
                        activation=helper.NoOpActivation())
                ]
                kernel_size = (1, 1)
            conv_dilation = leading_layers + [
                helper.Conv2DQuantized(
                    filters=self._output_channels,
                    kernel_size=kernel_size,
                    padding='same',
                    kernel_regularizer=self._kernel_regularizer,
                    kernel_initializer=self._kernel_initializer,
                    dilation_rate=dilation_rate,
                    use_bias=False,
                    activation=helper.NoOpActivation())
            ]
            norm_dilation = norm(axis=self._bn_axis,
                                 momentum=self._batchnorm_momentum,
                                 epsilon=self._batchnorm_epsilon)

            self.aspp_layers.append(conv_dilation + [norm_dilation])

        if self._pool_kernel_size is None:
            pooling = [
                helper.GlobalAveragePooling2DQuantized(),
                helper.ReshapeQuantized((1, 1, channels))
            ]
        else:
            pooling = [
                helper.AveragePooling2DQuantized(self._pool_kernel_size)
            ]

        conv2 = helper.Conv2DQuantized(
            filters=self._output_channels,
            kernel_size=(1, 1),
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer,
            use_bias=False,
            activation=helper.NoOpActivation())
        norm2 = norm(axis=self._bn_axis,
                     momentum=self._batchnorm_momentum,
                     epsilon=self._batchnorm_epsilon)

        self.aspp_layers.append(pooling + [conv2, norm2])
        self._resizing_layer = helper.ResizingQuantized(
            height, width, interpolation=self._interpolation)

        self._projection = [
            helper.Conv2DQuantized(filters=self._output_channels,
                                   kernel_size=(1, 1),
                                   kernel_initializer=self._kernel_initializer,
                                   kernel_regularizer=self._kernel_regularizer,
                                   use_bias=False,
                                   activation=helper.NoOpActivation()),
            norm_with_quantize(axis=self._bn_axis,
                               momentum=self._batchnorm_momentum,
                               epsilon=self._batchnorm_epsilon)
        ]
        self._dropout_layer = tf.keras.layers.Dropout(rate=self._dropout)
        self._concat_layer = helper.ConcatenateQuantized(axis=-1)
示例#4
0
文件: nn_layers.py 项目: npfp/models
    def build(self, input_shape: Sequence[tf.TensorShape]):
        """Creates the variables of the segmentation head."""
        # When input_shape is a list/tuple, the first corresponds to backbone
        # features used for resizing the decoder features (the second) if feature
        # fusion type is `deeplabv3plus`.
        backbone_shape = input_shape[0]
        use_depthwise_convolution = self._config_dict[
            'use_depthwise_convolution']
        random_initializer = tf.keras.initializers.RandomNormal(stddev=0.01)
        conv_kwargs = {
            'kernel_size': 3 if not use_depthwise_convolution else 1,
            'padding': 'same',
            'use_bias': False,
            'kernel_initializer': random_initializer,
            'kernel_regularizer': self._config_dict['kernel_regularizer'],
        }

        norm_layer = (tf.keras.layers.experimental.SyncBatchNormalization
                      if self._config_dict['use_sync_bn'] else
                      tf.keras.layers.BatchNormalization)
        norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer)
        norm_no_quantize = helper.BatchNormalizationNoQuantized(norm_layer)
        norm = helper.norm_by_activation(self._config_dict['activation'],
                                         norm_with_quantize, norm_no_quantize)

        bn_kwargs = {
            'axis': self._bn_axis,
            'momentum': self._config_dict['norm_momentum'],
            'epsilon': self._config_dict['norm_epsilon'],
        }

        if self._config_dict['feature_fusion'] == 'deeplabv3plus':
            # Deeplabv3+ feature fusion layers.
            self._dlv3p_conv = helper.Conv2DQuantized(
                kernel_size=1,
                padding='same',
                use_bias=False,
                kernel_initializer=tf.keras.initializers.RandomNormal(
                    stddev=0.01),
                kernel_regularizer=self._config_dict['kernel_regularizer'],
                name='segmentation_head_deeplabv3p_fusion_conv',
                filters=self._config_dict['low_level_num_filters'],
                activation=helper.NoOpActivation())

            self._dlv3p_norm = norm(
                name='segmentation_head_deeplabv3p_fusion_norm', **bn_kwargs)

        # Segmentation head layers.
        self._convs = []
        self._norms = []
        for i in range(self._config_dict['num_convs']):
            if use_depthwise_convolution:
                self._convs.append(
                    helper.DepthwiseConv2DQuantized(
                        name='segmentation_head_depthwise_conv_{}'.format(i),
                        kernel_size=3,
                        padding='same',
                        use_bias=False,
                        depthwise_initializer=random_initializer,
                        depthwise_regularizer=self.
                        _config_dict['kernel_regularizer'],
                        depth_multiplier=1,
                        activation=helper.NoOpActivation()))
                norm_name = 'segmentation_head_depthwise_norm_{}'.format(i)
                self._norms.append(norm(name=norm_name, **bn_kwargs))
            conv_name = 'segmentation_head_conv_{}'.format(i)
            self._convs.append(
                helper.Conv2DQuantized(
                    name=conv_name,
                    filters=self._config_dict['num_filters'],
                    activation=helper.NoOpActivation(),
                    **conv_kwargs))
            norm_name = 'segmentation_head_norm_{}'.format(i)
            self._norms.append(norm(name=norm_name, **bn_kwargs))

        self._classifier = helper.Conv2DOutputQuantized(
            name='segmentation_output',
            filters=self._config_dict['num_classes'],
            kernel_size=self._config_dict['prediction_kernel_size'],
            padding='same',
            bias_initializer=tf.zeros_initializer(),
            kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
            kernel_regularizer=self._config_dict['kernel_regularizer'],
            bias_regularizer=self._config_dict['bias_regularizer'],
            activation=helper.NoOpActivation())

        self._upsampling_layer = helper.UpSampling2DQuantized(
            size=(self._config_dict['upsample_factor'],
                  self._config_dict['upsample_factor']),
            interpolation='nearest')
        self._resizing_layer = tf.keras.layers.Resizing(
            backbone_shape[1], backbone_shape[2], interpolation='bilinear')

        self._concat_layer = helper.ConcatenateQuantized(axis=self._bn_axis)

        super().build(input_shape)
示例#5
0
    def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]):
        """Build variables and child layers to prepare for calling."""
        expand_filters = self._in_filters
        if self._expand_ratio > 1:
            # First 1x1 conv for channel expansion.
            expand_filters = nn_layers.make_divisible(
                self._in_filters * self._expand_ratio, self._divisible_by)

            expand_kernel = 1 if self._use_depthwise else self._kernel_size
            expand_stride = 1 if self._use_depthwise else self._strides

            self._conv0 = helper.Conv2DQuantized(
                filters=expand_filters,
                kernel_size=expand_kernel,
                strides=expand_stride,
                padding='same',
                use_bias=False,
                kernel_initializer=self._kernel_initializer,
                kernel_regularizer=self._kernel_regularizer,
                bias_regularizer=self._bias_regularizer,
                activation=helper.NoOpActivation())
            self._norm0 = helper.norm_by_activation(
                self._activation, self._norm_with_quantize,
                self._norm)(axis=self._bn_axis,
                            momentum=self._norm_momentum,
                            epsilon=self._norm_epsilon)
            self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2(
                tf_utils.get_activation(self._activation,
                                        use_keras_layer=True),
                configs.Default8BitActivationQuantizeConfig())
        if self._use_depthwise:
            # Depthwise conv.
            self._conv1 = helper.DepthwiseConv2DQuantized(
                kernel_size=(self._kernel_size, self._kernel_size),
                strides=self._strides,
                padding='same',
                depth_multiplier=1,
                dilation_rate=self._dilation_rate,
                use_bias=False,
                depthwise_initializer=self._kernel_initializer,
                depthwise_regularizer=self._depthsize_regularizer,
                bias_regularizer=self._bias_regularizer,
                activation=helper.NoOpActivation())
            self._norm1 = helper.norm_by_activation(
                self._depthwise_activation, self._norm_with_quantize,
                self._norm)(axis=self._bn_axis,
                            momentum=self._norm_momentum,
                            epsilon=self._norm_epsilon)
            self._depthwise_activation_layer = (
                tfmot.quantization.keras.QuantizeWrapperV2(
                    tf_utils.get_activation(self._depthwise_activation,
                                            use_keras_layer=True),
                    configs.Default8BitActivationQuantizeConfig()))

        # Squeeze and excitation.
        if self._se_ratio and self._se_ratio > 0 and self._se_ratio <= 1:
            logging.info('Use Squeeze and excitation.')
            in_filters = self._in_filters
            if self._expand_se_in_filters:
                in_filters = expand_filters
            self._squeeze_excitation = qat_nn_layers.SqueezeExcitationQuantized(
                in_filters=in_filters,
                out_filters=expand_filters,
                se_ratio=self._se_ratio,
                divisible_by=self._divisible_by,
                round_down_protect=self._se_round_down_protect,
                kernel_initializer=self._kernel_initializer,
                kernel_regularizer=self._kernel_regularizer,
                bias_regularizer=self._bias_regularizer,
                activation=self._se_inner_activation,
                gating_activation=self._se_gating_activation)
        else:
            self._squeeze_excitation = None

        # Last 1x1 conv.
        self._conv2 = helper.Conv2DQuantized(
            filters=self._out_filters,
            kernel_size=1,
            strides=1,
            padding='same',
            use_bias=False,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer,
            bias_regularizer=self._bias_regularizer,
            activation=helper.NoOpActivation())
        self._norm2 = self._norm_with_quantize(axis=self._bn_axis,
                                               momentum=self._norm_momentum,
                                               epsilon=self._norm_epsilon)

        if self._stochastic_depth_drop_rate:
            self._stochastic_depth = nn_layers.StochasticDepth(
                self._stochastic_depth_drop_rate)
        else:
            self._stochastic_depth = None
        self._add = tfmot.quantization.keras.QuantizeWrapperV2(
            tf.keras.layers.Add(),
            configs.Default8BitQuantizeConfig([], [], True))

        super(InvertedBottleneckBlockQuantized, self).build(input_shape)