Example #1
0
def _clone_function_for_fpn(layer):
    if isinstance(layer,
                  (tf.keras.layers.BatchNormalization,
                   tf.keras.layers.experimental.SyncBatchNormalization)):
        return tfmot.quantization.keras.quantize_annotate_layer(
            qat_nn_layers.BatchNormalizationWrapper(layer),
            qat_configs.Default8BitOutputQuantizeConfig())
    return layer
Example #2
0
 def _create_layer_metadata(
     self, layer_class_name: str
 ) -> Mapping[str, tfmot.quantization.keras.QuantizeConfig]:
     if layer_class_name in _LAYER_NAMES:
         layer_metadata = {'quantize_config': configs.NoOpQuantizeConfig()}
     else:
         layer_metadata = {
             'quantize_config': configs.Default8BitOutputQuantizeConfig()
         }
     return layer_metadata
Example #3
0
    def replacement(self, match_layer: LayerNode) -> LayerNode:
        """See base class."""
        bottleneck_layer = match_layer.layer
        bottleneck_config = bottleneck_layer['config']
        bottleneck_names_and_weights = list(match_layer.names_and_weights)
        quantized_layer = self._quantized_layer_class(**bottleneck_config)
        dummy_input_shape = [1, 64, 128, 1]
        # SegmentationHead layer requires a tuple of 2 tensors.
        if isinstance(quantized_layer,
                      quantized_nn_layers.SegmentationHeadQuantized):
            dummy_input_shape = ([1, 1, 1, 1], [1, 1, 1, 1])
        quantized_layer.compute_output_shape(dummy_input_shape)
        quantized_names_and_weights = zip(
            [weight.name for weight in quantized_layer.weights],
            quantized_layer.get_weights())
        match_idx = 0
        names_and_weights = []
        for name_and_weight in quantized_names_and_weights:
            if not self._is_quantization_weight_name(name=name_and_weight[0]):
                name_and_weight = bottleneck_names_and_weights[match_idx]
                match_idx = match_idx + 1
            names_and_weights.append(name_and_weight)

        if match_idx != len(bottleneck_names_and_weights):
            raise ValueError(
                '{}/{} of Bottleneck weights is transformed.'.format(
                    match_idx, len(bottleneck_names_and_weights)))
        quantized_layer_config = keras.layers.serialize(quantized_layer)
        quantized_layer_config['name'] = quantized_layer_config['config'][
            'name']
        if bottleneck_layer['class_name'] in [
                'Vision>Conv2DBNBlock',
                'Vision>InvertedBottleneckBlock',
                'Vision>SegmentationHead',
                'Vision>SpatialPyramidPooling',
                'Vision>ASPP',
                # TODO(yeqing): Removes the Beta layers.
                'Beta>Conv2DBNBlock',
                'Beta>InvertedBottleneckBlock',
                'Beta>SegmentationHead',
                'Beta>SpatialPyramidPooling',
                'Beta>ASPP'
        ]:
            layer_metadata = {'quantize_config': configs.NoOpQuantizeConfig()}
        else:
            layer_metadata = {
                'quantize_config': configs.Default8BitOutputQuantizeConfig()
            }

        return LayerNode(quantized_layer_config,
                         metadata=layer_metadata,
                         names_and_weights=names_and_weights)
Example #4
0
DepthwiseConv2DQuantized = quantize_wrapped_layer(
    tf.keras.layers.DepthwiseConv2D,
    configs.Default8BitConvQuantizeConfig(['depthwise_kernel'], ['activation'],
                                          False))
DepthwiseConv2DOutputQuantized = quantize_wrapped_layer(
    tf.keras.layers.DepthwiseConv2D,
    configs.Default8BitConvQuantizeConfig(['depthwise_kernel'], ['activation'],
                                          True))
GlobalAveragePooling2DQuantized = quantize_wrapped_layer(
    tf.keras.layers.GlobalAveragePooling2D,
    configs.Default8BitQuantizeConfig([], [], True))
AveragePooling2DQuantized = quantize_wrapped_layer(
    tf.keras.layers.AveragePooling2D,
    configs.Default8BitQuantizeConfig([], [], True))
ResizingQuantized = quantize_wrapped_layer(
    tf.keras.layers.Resizing, configs.Default8BitQuantizeConfig([], [], True))
ConcatenateQuantized = quantize_wrapped_layer(
    tf.keras.layers.Concatenate, configs.Default8BitQuantizeConfig([], [],
                                                                   True))
UpSampling2DQuantized = quantize_wrapped_layer(
    tf.keras.layers.UpSampling2D,
    configs.Default8BitQuantizeConfig([], [], True))
ReshapeQuantized = quantize_wrapped_layer(
    tf.keras.layers.Reshape, configs.Default8BitQuantizeConfig([], [], True))

# pylint:disable=g-long-lambda
BatchNormalizationQuantized = lambda norm_layer: quantize_wrapped_layer(
    norm_layer, configs.Default8BitOutputQuantizeConfig())
BatchNormalizationNoQuantized = lambda norm_layer: quantize_wrapped_layer(
    norm_layer, configs.NoOpQuantizeConfig())
Example #5
0
  def build(self, input_shape: Union[tf.TensorShape, List[tf.TensorShape]]):
    """Creates the variables of the head."""
    if self._config_dict['use_separable_conv']:
      conv_op = SeparableConv2DQuantized
    else:
      conv_op = helper.quantize_wrapped_layer(
          tf.keras.layers.Conv2D,
          configs.Default8BitConvQuantizeConfig(
              ['kernel'], ['activation'], False))
    conv_kwargs = {
        'filters': self._config_dict['num_filters'],
        'kernel_size': 3,
        'padding': 'same',
        'bias_initializer': tf.zeros_initializer(),
        'bias_regularizer': self._config_dict['bias_regularizer'],
    }
    if not self._config_dict['use_separable_conv']:
      conv_kwargs.update({
          'kernel_initializer': tf.keras.initializers.RandomNormal(
              stddev=0.01),
          'kernel_regularizer': self._config_dict['kernel_regularizer'],
      })

    base_bn_op = (tf.keras.layers.experimental.SyncBatchNormalization
                  if self._config_dict['use_sync_bn']
                  else tf.keras.layers.BatchNormalization)
    bn_op = helper.norm_by_activation(
        self._config_dict['activation'],
        helper.quantize_wrapped_layer(
            base_bn_op, configs.Default8BitOutputQuantizeConfig()),
        helper.quantize_wrapped_layer(
            base_bn_op, configs.NoOpQuantizeConfig()))

    bn_kwargs = {
        'axis': self._bn_axis,
        'momentum': self._config_dict['norm_momentum'],
        'epsilon': self._config_dict['norm_epsilon'],
    }

    # Class net.
    self._cls_convs = []
    self._cls_norms = []
    for level in range(
        self._config_dict['min_level'], self._config_dict['max_level'] + 1):
      this_level_cls_norms = []
      for i in range(self._config_dict['num_convs']):
        if level == self._config_dict['min_level']:
          cls_conv_name = 'classnet-conv_{}'.format(i)
          self._cls_convs.append(conv_op(name=cls_conv_name, **conv_kwargs))
        cls_norm_name = 'classnet-conv-norm_{}_{}'.format(level, i)
        this_level_cls_norms.append(bn_op(name=cls_norm_name, **bn_kwargs))
      self._cls_norms.append(this_level_cls_norms)

    classifier_kwargs = {
        'filters': (
            self._config_dict['num_classes'] *
            self._config_dict['num_anchors_per_location']),
        'kernel_size': 3,
        'padding': 'same',
        'bias_initializer': tf.constant_initializer(-np.log((1 - 0.01) / 0.01)),
        'bias_regularizer': self._config_dict['bias_regularizer'],
    }
    if not self._config_dict['use_separable_conv']:
      classifier_kwargs.update({
          'kernel_initializer': tf.keras.initializers.RandomNormal(stddev=1e-5),
          'kernel_regularizer': self._config_dict['kernel_regularizer'],
      })
    self._classifier = conv_op(
        name='scores', last_quantize=True, **classifier_kwargs)

    # Box net.
    self._box_convs = []
    self._box_norms = []
    for level in range(
        self._config_dict['min_level'], self._config_dict['max_level'] + 1):
      this_level_box_norms = []
      for i in range(self._config_dict['num_convs']):
        if level == self._config_dict['min_level']:
          box_conv_name = 'boxnet-conv_{}'.format(i)
          self._box_convs.append(conv_op(name=box_conv_name, **conv_kwargs))
        box_norm_name = 'boxnet-conv-norm_{}_{}'.format(level, i)
        this_level_box_norms.append(bn_op(name=box_norm_name, **bn_kwargs))
      self._box_norms.append(this_level_box_norms)

    box_regressor_kwargs = {
        'filters': (self._config_dict['num_params_per_anchor'] *
                    self._config_dict['num_anchors_per_location']),
        'kernel_size': 3,
        'padding': 'same',
        'bias_initializer': tf.zeros_initializer(),
        'bias_regularizer': self._config_dict['bias_regularizer'],
    }
    if not self._config_dict['use_separable_conv']:
      box_regressor_kwargs.update({
          'kernel_initializer': tf.keras.initializers.RandomNormal(
              stddev=1e-5),
          'kernel_regularizer': self._config_dict['kernel_regularizer'],
      })
    self._box_regressor = conv_op(
        name='boxes', last_quantize=True, **box_regressor_kwargs)

    # Attribute learning nets.
    if self._config_dict['attribute_heads']:
      self._att_predictors = {}
      self._att_convs = {}
      self._att_norms = {}

      for att_config in self._config_dict['attribute_heads']:
        att_name = att_config['name']
        att_type = att_config['type']
        att_size = att_config['size']
        att_convs_i = []
        att_norms_i = []

        # Build conv and norm layers.
        for level in range(self._config_dict['min_level'],
                           self._config_dict['max_level'] + 1):
          this_level_att_norms = []
          for i in range(self._config_dict['num_convs']):
            if level == self._config_dict['min_level']:
              att_conv_name = '{}-conv_{}'.format(att_name, i)
              att_convs_i.append(conv_op(name=att_conv_name, **conv_kwargs))
            att_norm_name = '{}-conv-norm_{}_{}'.format(att_name, level, i)
            this_level_att_norms.append(bn_op(name=att_norm_name, **bn_kwargs))
          att_norms_i.append(this_level_att_norms)
        self._att_convs[att_name] = att_convs_i
        self._att_norms[att_name] = att_norms_i

        # Build the final prediction layer.
        att_predictor_kwargs = {
            'filters':
                (att_size * self._config_dict['num_anchors_per_location']),
            'kernel_size': 3,
            'padding': 'same',
            'bias_initializer': tf.zeros_initializer(),
            'bias_regularizer': self._config_dict['bias_regularizer'],
        }
        if att_type == 'regression':
          att_predictor_kwargs.update(
              {'bias_initializer': tf.zeros_initializer()})
        elif att_type == 'classification':
          att_predictor_kwargs.update({
              'bias_initializer':
                  tf.constant_initializer(-np.log((1 - 0.01) / 0.01))
          })
        else:
          raise ValueError(
              'Attribute head type {} not supported.'.format(att_type))

        if not self._config_dict['use_separable_conv']:
          att_predictor_kwargs.update({
              'kernel_initializer':
                  tf.keras.initializers.RandomNormal(stddev=1e-5),
              'kernel_regularizer':
                  self._config_dict['kernel_regularizer'],
          })

        self._att_predictors[att_name] = conv_op(
            name='{}_attributes'.format(att_name), **att_predictor_kwargs)

    super().build(input_shape)
Example #6
0
    def __init__(
            self,
            filters: int,
            strides: int,
            dilation_rate: int = 1,
            use_projection: bool = False,
            se_ratio: Optional[float] = None,
            resnetd_shortcut: bool = False,
            stochastic_depth_drop_rate: Optional[float] = None,
            kernel_initializer: str = 'VarianceScaling',
            kernel_regularizer: tf.keras.regularizers.Regularizer = None,
            bias_regularizer: tf.keras.regularizers.Regularizer = None,
            activation: str = 'relu',
            use_sync_bn: bool = False,
            norm_momentum: float = 0.99,
            norm_epsilon: float = 0.001,
            bn_trainable: bool = True,  # pytype: disable=annotation-type-mismatch  # typed-keras
            **kwargs):
        """Initializes a standard bottleneck block with BN after convolutions.

    Args:
      filters: An `int` number of filters for the first two convolutions. Note
        that the third and final convolution will use 4 times as many filters.
      strides: An `int` block stride. If greater than 1, this block will
        ultimately downsample the input.
      dilation_rate: An `int` dilation_rate of convolutions. Default to 1.
      use_projection: A `bool` for whether this block should use a projection
        shortcut (versus the default identity shortcut). This is usually `True`
        for the first block of a block group, which may change the number of
        filters and the resolution.
      se_ratio: A `float` or None. Ratio of the Squeeze-and-Excitation layer.
      resnetd_shortcut: A `bool`. If True, apply the resnetd style modification
        to the shortcut connection.
      stochastic_depth_drop_rate: A `float` or None. If not None, drop rate for
        the stochastic depth layer.
      kernel_initializer: A `str` of kernel_initializer for convolutional
        layers.
      kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
        Conv2D. Default to None.
      bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d.
        Default to None.
      activation: A `str` name of the activation function.
      use_sync_bn: A `bool`. If True, use synchronized batch normalization.
      norm_momentum: A `float` of normalization momentum for the moving average.
      norm_epsilon: A `float` added to variance to avoid dividing by zero.
      bn_trainable: A `bool` that indicates whether batch norm layers should be
        trainable. Default to True.
      **kwargs: Additional keyword arguments to be passed.
    """
        super(BottleneckBlockQuantized, self).__init__(**kwargs)

        self._filters = filters
        self._strides = strides
        self._dilation_rate = dilation_rate
        self._use_projection = use_projection
        self._se_ratio = se_ratio
        self._resnetd_shortcut = resnetd_shortcut
        self._use_sync_bn = use_sync_bn
        self._activation = activation
        self._stochastic_depth_drop_rate = stochastic_depth_drop_rate
        self._kernel_initializer = kernel_initializer
        self._norm_momentum = norm_momentum
        self._norm_epsilon = norm_epsilon
        self._kernel_regularizer = kernel_regularizer
        self._bias_regularizer = bias_regularizer
        if use_sync_bn:
            self._norm = _quantize_wrapped_layer(
                tf.keras.layers.experimental.SyncBatchNormalization,
                configs.NoOpQuantizeConfig())
            self._norm_with_quantize = _quantize_wrapped_layer(
                tf.keras.layers.experimental.SyncBatchNormalization,
                configs.Default8BitOutputQuantizeConfig())
        else:
            self._norm = _quantize_wrapped_layer(
                tf.keras.layers.BatchNormalization,
                configs.NoOpQuantizeConfig())
            self._norm_with_quantize = _quantize_wrapped_layer(
                tf.keras.layers.BatchNormalization,
                configs.Default8BitOutputQuantizeConfig())
        if tf.keras.backend.image_data_format() == 'channels_last':
            self._bn_axis = -1
        else:
            self._bn_axis = 1
        self._bn_trainable = bn_trainable
Example #7
0
    def __init__(self,
                 in_filters,
                 out_filters,
                 expand_ratio,
                 strides,
                 kernel_size=3,
                 se_ratio=None,
                 stochastic_depth_drop_rate=None,
                 kernel_initializer='VarianceScaling',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activation='relu',
                 se_inner_activation='relu',
                 se_gating_activation='sigmoid',
                 se_round_down_protect=True,
                 expand_se_in_filters=False,
                 depthwise_activation=None,
                 use_sync_bn=False,
                 dilation_rate=1,
                 divisible_by=1,
                 regularize_depthwise=False,
                 use_depthwise=True,
                 use_residual=True,
                 norm_momentum=0.99,
                 norm_epsilon=0.001,
                 output_intermediate_endpoints=False,
                 **kwargs):
        """Initializes an inverted bottleneck block with BN after convolutions.

    Args:
      in_filters: An `int` number of filters of the input tensor.
      out_filters: An `int` number of filters of the output tensor.
      expand_ratio: An `int` of expand_ratio for an inverted bottleneck block.
      strides: An `int` block stride. If greater than 1, this block will
        ultimately downsample the input.
      kernel_size: An `int` kernel_size of the depthwise conv layer.
      se_ratio: A `float` or None. If not None, se ratio for the squeeze and
        excitation layer.
      stochastic_depth_drop_rate: A `float` or None. if not None, drop rate for
        the stochastic depth layer.
      kernel_initializer: A `str` of kernel_initializer for convolutional
        layers.
      kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
        Conv2D. Default to None.
      bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d.
        Default to None.
      activation: A `str` name of the activation function.
      se_inner_activation: A `str` name of squeeze-excitation inner activation.
      se_gating_activation: A `str` name of squeeze-excitation gating
        activation.
      se_round_down_protect: A `bool` of whether round down more than 10% will
        be allowed in SE layer.
      expand_se_in_filters: A `bool` of whether or not to expand in_filter in
        squeeze and excitation layer.
      depthwise_activation: A `str` name of the activation function for
        depthwise only.
      use_sync_bn: A `bool`. If True, use synchronized batch normalization.
      dilation_rate: An `int` that specifies the dilation rate to use for.
      divisible_by: An `int` that ensures all inner dimensions are divisible by
        this number.
      dilated convolution: An `int` to specify the same value for all spatial
        dimensions.
      regularize_depthwise: A `bool` of whether or not apply regularization on
        depthwise.
      use_depthwise: A `bool` of whether to uses fused convolutions instead of
        depthwise.
      use_residual: A `bool` of whether to include residual connection between
        input and output.
      norm_momentum: A `float` of normalization momentum for the moving average.
      norm_epsilon: A `float` added to variance to avoid dividing by zero.
      output_intermediate_endpoints: A `bool` of whether or not output the
        intermediate endpoints.
      **kwargs: Additional keyword arguments to be passed.
    """
        super(InvertedBottleneckBlockQuantized, self).__init__(**kwargs)

        self._in_filters = in_filters
        self._out_filters = out_filters
        self._expand_ratio = expand_ratio
        self._strides = strides
        self._kernel_size = kernel_size
        self._se_ratio = se_ratio
        self._divisible_by = divisible_by
        self._stochastic_depth_drop_rate = stochastic_depth_drop_rate
        self._dilation_rate = dilation_rate
        self._use_sync_bn = use_sync_bn
        self._regularize_depthwise = regularize_depthwise
        self._use_depthwise = use_depthwise
        self._use_residual = use_residual
        self._activation = activation
        self._se_inner_activation = se_inner_activation
        self._se_gating_activation = se_gating_activation
        self._se_round_down_protect = se_round_down_protect
        self._depthwise_activation = depthwise_activation
        self._kernel_initializer = kernel_initializer
        self._norm_momentum = norm_momentum
        self._norm_epsilon = norm_epsilon
        self._kernel_regularizer = kernel_regularizer
        self._bias_regularizer = bias_regularizer
        self._expand_se_in_filters = expand_se_in_filters
        self._output_intermediate_endpoints = output_intermediate_endpoints

        norm_layer = (tf.keras.layers.experimental.SyncBatchNormalization
                      if use_sync_bn else tf.keras.layers.BatchNormalization)
        self._norm_with_quantize = _quantize_wrapped_layer(
            norm_layer, configs.Default8BitOutputQuantizeConfig())
        self._norm = _quantize_wrapped_layer(norm_layer,
                                             configs.NoOpQuantizeConfig())

        if tf.keras.backend.image_data_format() == 'channels_last':
            self._bn_axis = -1
        else:
            self._bn_axis = 1
        if not depthwise_activation:
            self._depthwise_activation = activation
        if regularize_depthwise:
            self._depthsize_regularizer = kernel_regularizer
        else:
            self._depthsize_regularizer = None
Example #8
0
    def __init__(self,
                 filters: int,
                 kernel_size: int = 3,
                 strides: int = 1,
                 use_bias: bool = False,
                 use_explicit_padding: bool = False,
                 activation: str = 'relu6',
                 kernel_initializer: str = 'VarianceScaling',
                 kernel_regularizer: Optional[
                     tf.keras.regularizers.Regularizer] = None,
                 bias_regularizer: Optional[
                     tf.keras.regularizers.Regularizer] = None,
                 use_normalization: bool = True,
                 use_sync_bn: bool = False,
                 norm_momentum: float = 0.99,
                 norm_epsilon: float = 0.001,
                 **kwargs):
        """A convolution block with batch normalization.

    Args:
      filters: An `int` number of filters for the first two convolutions. Note
        that the third and final convolution will use 4 times as many filters.
      kernel_size: An `int` specifying the height and width of the 2D
        convolution window.
      strides: An `int` of block stride. If greater than 1, this block will
        ultimately downsample the input.
      use_bias: If True, use bias in the convolution layer.
      use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
        inputs so that the output dimensions are the same as if 'SAME' padding
        were used.
      activation: A `str` name of the activation function.
      kernel_initializer: A `str` for kernel initializer of convolutional
        layers.
      kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
        Conv2D. Default to None.
      bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
        Default to None.
      use_normalization: If True, use batch normalization.
      use_sync_bn: If True, use synchronized batch normalization.
      norm_momentum: A `float` of normalization momentum for the moving average.
      norm_epsilon: A `float` added to variance to avoid dividing by zero.
      **kwargs: Additional keyword arguments to be passed.
    """
        super(Conv2DBNBlockQuantized, self).__init__(**kwargs)
        self._filters = filters
        self._kernel_size = kernel_size
        self._strides = strides
        self._activation = activation
        self._use_bias = use_bias
        self._use_explicit_padding = use_explicit_padding
        self._kernel_initializer = kernel_initializer
        self._kernel_regularizer = kernel_regularizer
        self._bias_regularizer = bias_regularizer
        self._use_normalization = use_normalization
        self._use_sync_bn = use_sync_bn
        self._norm_momentum = norm_momentum
        self._norm_epsilon = norm_epsilon

        if use_explicit_padding and kernel_size > 1:
            self._padding = 'valid'
        else:
            self._padding = 'same'

        norm_layer = (tf.keras.layers.experimental.SyncBatchNormalization
                      if use_sync_bn else tf.keras.layers.BatchNormalization)
        self._norm_with_quantize = _quantize_wrapped_layer(
            norm_layer, configs.Default8BitOutputQuantizeConfig())
        self._norm = _quantize_wrapped_layer(norm_layer,
                                             configs.NoOpQuantizeConfig())

        if tf.keras.backend.image_data_format() == 'channels_last':
            self._bn_axis = -1
        else:
            self._bn_axis = 1
Example #9
0
    def build(self, input_shape):
        height = input_shape[1]
        width = input_shape[2]
        channels = input_shape[3]

        norm_layer = (tf.keras.layers.experimental.SyncBatchNormalization
                      if self._use_sync_bn else
                      tf.keras.layers.BatchNormalization)
        norm_with_quantize = _quantize_wrapped_layer(
            norm_layer, configs.Default8BitOutputQuantizeConfig())
        norm = norm_with_quantize if self._activation not in [
            'relu', 'relu6'
        ] else _quantize_wrapped_layer(norm_layer,
                                       configs.NoOpQuantizeConfig())

        conv2d_quantized = _quantize_wrapped_layer(
            tf.keras.layers.Conv2D,
            configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'],
                                                  False))
        depthwise_conv2d_quantized_output_quantized = _quantize_wrapped_layer(
            tf.keras.layers.DepthwiseConv2D,
            configs.Default8BitConvQuantizeConfig(['depthwise_kernel'],
                                                  ['activation'], True))

        self.aspp_layers = []

        conv1 = conv2d_quantized(filters=self._output_channels,
                                 kernel_size=(1, 1),
                                 kernel_initializer=self._kernel_initializer,
                                 kernel_regularizer=self._kernel_regularizer,
                                 use_bias=False,
                                 activation=NoOpActivation())
        norm1 = norm(axis=self._bn_axis,
                     momentum=self._batchnorm_momentum,
                     epsilon=self._batchnorm_epsilon)

        self.aspp_layers.append([conv1, norm1])

        for dilation_rate in self._dilation_rates:
            leading_layers = []
            kernel_size = (3, 3)
            if self._use_depthwise_convolution:
                leading_layers += [
                    depthwise_conv2d_quantized_output_quantized(
                        depth_multiplier=1,
                        kernel_size=kernel_size,
                        padding='same',
                        depthwise_regularizer=self._kernel_regularizer,
                        depthwise_initializer=self._kernel_initializer,
                        dilation_rate=dilation_rate,
                        use_bias=False,
                        activation=NoOpActivation())
                ]
                kernel_size = (1, 1)
            conv_dilation = leading_layers + [
                conv2d_quantized(filters=self._output_channels,
                                 kernel_size=kernel_size,
                                 padding='same',
                                 kernel_regularizer=self._kernel_regularizer,
                                 kernel_initializer=self._kernel_initializer,
                                 dilation_rate=dilation_rate,
                                 use_bias=False,
                                 activation=NoOpActivation())
            ]
            norm_dilation = norm(axis=self._bn_axis,
                                 momentum=self._batchnorm_momentum,
                                 epsilon=self._batchnorm_epsilon)

            self.aspp_layers.append(conv_dilation + [norm_dilation])

        if self._pool_kernel_size is None:
            pooling = [
                _quantize_wrapped_layer(
                    tf.keras.layers.GlobalAveragePooling2D,
                    configs.Default8BitQuantizeConfig([], [], True))(),
                _quantize_wrapped_layer(
                    tf.keras.layers.Reshape,
                    configs.Default8BitQuantizeConfig([], [],
                                                      True))((1, 1, channels))
            ]
        else:
            pooling = [
                _quantize_wrapped_layer(
                    tf.keras.layers.AveragePooling2D,
                    configs.Default8BitQuantizeConfig([], [], True))(
                        self._pool_kernel_size)
            ]

        conv2 = conv2d_quantized(filters=self._output_channels,
                                 kernel_size=(1, 1),
                                 kernel_initializer=self._kernel_initializer,
                                 kernel_regularizer=self._kernel_regularizer,
                                 use_bias=False,
                                 activation=NoOpActivation())
        norm2 = norm(axis=self._bn_axis,
                     momentum=self._batchnorm_momentum,
                     epsilon=self._batchnorm_epsilon)

        self.aspp_layers.append(pooling + [conv2, norm2])

        resizing = _quantize_wrapped_layer(
            tf.keras.layers.Resizing,
            configs.Default8BitQuantizeConfig([], [], True))
        self._resizing_layer = resizing(height,
                                        width,
                                        interpolation=self._interpolation)

        self._projection = [
            conv2d_quantized(filters=self._output_channels,
                             kernel_size=(1, 1),
                             kernel_initializer=self._kernel_initializer,
                             kernel_regularizer=self._kernel_regularizer,
                             use_bias=False,
                             activation=NoOpActivation()),
            norm_with_quantize(axis=self._bn_axis,
                               momentum=self._batchnorm_momentum,
                               epsilon=self._batchnorm_epsilon)
        ]
        self._dropout_layer = tf.keras.layers.Dropout(rate=self._dropout)
        concat = _quantize_wrapped_layer(
            tf.keras.layers.Concatenate,
            configs.Default8BitQuantizeConfig([], [], True))
        self._concat_layer = concat(axis=-1)
Example #10
0
    def build(self, input_shape: Sequence[tf.TensorShape]):
        """Creates the variables of the segmentation head."""
        # When input_shape is a list/tuple, the first corresponds to backbone
        # features used for resizing the decoder features (the second) if feature
        # fusion type is `deeplabv3plus`.
        backbone_shape = input_shape[0]
        use_depthwise_convolution = self._config_dict[
            'use_depthwise_convolution']
        random_initializer = tf.keras.initializers.RandomNormal(stddev=0.01)
        conv2d_quantized = _quantize_wrapped_layer(
            tf.keras.layers.Conv2D,
            configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'],
                                                  False))
        conv2d_quantized_output_quantized = _quantize_wrapped_layer(
            tf.keras.layers.Conv2D,
            configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'],
                                                  True))
        depthwise_conv2d_quantized = _quantize_wrapped_layer(
            tf.keras.layers.DepthwiseConv2D,
            configs.Default8BitConvQuantizeConfig(['depthwise_kernel'],
                                                  ['activation'], False))
        conv_kwargs = {
            'kernel_size': 3 if not use_depthwise_convolution else 1,
            'padding': 'same',
            'use_bias': False,
            'kernel_initializer': random_initializer,
            'kernel_regularizer': self._config_dict['kernel_regularizer'],
        }

        norm_layer = (tf.keras.layers.experimental.SyncBatchNormalization
                      if self._config_dict['use_sync_bn'] else
                      tf.keras.layers.BatchNormalization)
        norm_with_quantize = _quantize_wrapped_layer(
            norm_layer, configs.Default8BitOutputQuantizeConfig())
        norm = norm_with_quantize if self._config_dict['activation'] not in [
            'relu', 'relu6'
        ] else _quantize_wrapped_layer(norm_layer,
                                       configs.NoOpQuantizeConfig())

        bn_kwargs = {
            'axis': self._bn_axis,
            'momentum': self._config_dict['norm_momentum'],
            'epsilon': self._config_dict['norm_epsilon'],
        }

        if self._config_dict['feature_fusion'] == 'deeplabv3plus':
            # Deeplabv3+ feature fusion layers.
            self._dlv3p_conv = conv2d_quantized(
                kernel_size=1,
                padding='same',
                use_bias=False,
                kernel_initializer=tf.keras.initializers.RandomNormal(
                    stddev=0.01),
                kernel_regularizer=self._config_dict['kernel_regularizer'],
                name='segmentation_head_deeplabv3p_fusion_conv',
                filters=self._config_dict['low_level_num_filters'],
                activation=NoOpActivation())

            self._dlv3p_norm = norm(
                name='segmentation_head_deeplabv3p_fusion_norm', **bn_kwargs)

        # Segmentation head layers.
        self._convs = []
        self._norms = []
        for i in range(self._config_dict['num_convs']):
            if use_depthwise_convolution:
                self._convs.append(
                    depthwise_conv2d_quantized(
                        name='segmentation_head_depthwise_conv_{}'.format(i),
                        kernel_size=3,
                        padding='same',
                        use_bias=False,
                        depthwise_initializer=random_initializer,
                        depthwise_regularizer=self.
                        _config_dict['kernel_regularizer'],
                        depth_multiplier=1,
                        activation=NoOpActivation()))
                norm_name = 'segmentation_head_depthwise_norm_{}'.format(i)
                self._norms.append(norm(name=norm_name, **bn_kwargs))
            conv_name = 'segmentation_head_conv_{}'.format(i)
            self._convs.append(
                conv2d_quantized(name=conv_name,
                                 filters=self._config_dict['num_filters'],
                                 activation=NoOpActivation(),
                                 **conv_kwargs))
            norm_name = 'segmentation_head_norm_{}'.format(i)
            self._norms.append(norm(name=norm_name, **bn_kwargs))

        self._classifier = conv2d_quantized_output_quantized(
            name='segmentation_output',
            filters=self._config_dict['num_classes'],
            kernel_size=self._config_dict['prediction_kernel_size'],
            padding='same',
            bias_initializer=tf.zeros_initializer(),
            kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
            kernel_regularizer=self._config_dict['kernel_regularizer'],
            bias_regularizer=self._config_dict['bias_regularizer'],
            activation=NoOpActivation())

        upsampling = _quantize_wrapped_layer(
            tf.keras.layers.UpSampling2D,
            configs.Default8BitQuantizeConfig([], [], True))
        self._upsampling_layer = upsampling(
            size=(self._config_dict['upsample_factor'],
                  self._config_dict['upsample_factor']),
            interpolation='nearest')
        self._resizing_layer = tf.keras.layers.Resizing(
            backbone_shape[1], backbone_shape[2], interpolation='bilinear')

        concat = _quantize_wrapped_layer(
            tf.keras.layers.Concatenate,
            configs.Default8BitQuantizeConfig([], [], True))
        self._concat_layer = concat(axis=self._bn_axis)

        super().build(input_shape)