Beispiel #1
0
 def _create_gating_activation_layer(self):
     if self._gating_activation == 'hard_sigmoid':
         # Convert hard_sigmoid activation to quantizable keras layers so each op
         # can be properly quantized.
         # Formula is hard_sigmoid(x) = relu6(x + 3) * 0.16667.
         self._add_quantizer('add_three')
         self._add_quantizer('divide_six')
         self._relu6 = tfmot.quantization.keras.QuantizeWrapperV2(
             tf_utils.get_activation('relu6', use_keras_layer=True),
             configs.Default8BitActivationQuantizeConfig())
     else:
         self._gating_activation_layer = tfmot.quantization.keras.QuantizeWrapperV2(
             tf_utils.get_activation(self._gating_activation,
                                     use_keras_layer=True),
             configs.Default8BitActivationQuantizeConfig())
Beispiel #2
0
    def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]):
        """Build variables and child layers to prepare for calling."""
        if self._use_explicit_padding and self._kernel_size > 1:
            padding_size = nn_layers.get_padding_for_kernel_size(
                self._kernel_size)
            self._pad = tf.keras.layers.ZeroPadding2D(padding_size)
        conv2d_quantized = _quantize_wrapped_layer(
            tf.keras.layers.Conv2D,
            configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'],
                                                  not self._use_normalization))
        self._conv0 = conv2d_quantized(
            filters=self._filters,
            kernel_size=self._kernel_size,
            strides=self._strides,
            padding=self._padding,
            use_bias=self._use_bias,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer,
            bias_regularizer=self._bias_regularizer,
            activation=NoOpActivation())
        if self._use_normalization:
            self._norm0 = self._norm_by_activation(self._activation)(
                axis=self._bn_axis,
                momentum=self._norm_momentum,
                epsilon=self._norm_epsilon)
        self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2(
            tf_utils.get_activation(self._activation, use_keras_layer=True),
            configs.Default8BitActivationQuantizeConfig())

        super(Conv2DBNBlockQuantized, self).build(input_shape)
Beispiel #3
0
    def __init__(self,
                 output_channels: int,
                 dilation_rates: List[int],
                 pool_kernel_size: Optional[List[int]] = None,
                 use_sync_bn: bool = False,
                 batchnorm_momentum: float = 0.99,
                 batchnorm_epsilon: float = 0.001,
                 activation: str = 'relu',
                 dropout: float = 0.5,
                 kernel_initializer: str = 'GlorotUniform',
                 kernel_regularizer: Optional[
                     tf.keras.regularizers.Regularizer] = None,
                 interpolation: str = 'bilinear',
                 use_depthwise_convolution: bool = False,
                 **kwargs):
        """Initializes `SpatialPyramidPooling`.

    Args:
      output_channels: Number of channels produced by SpatialPyramidPooling.
      dilation_rates: A list of integers for parallel dilated conv.
      pool_kernel_size: A list of integers or None. If None, global average
        pooling is applied, otherwise an average pooling of pool_kernel_size is
        applied.
      use_sync_bn: A bool, whether or not to use sync batch normalization.
      batchnorm_momentum: A float for the momentum in BatchNorm. Defaults to
        0.99.
      batchnorm_epsilon: A float for the epsilon value in BatchNorm. Defaults to
        0.001.
      activation: A `str` for type of activation to be used. Defaults to 'relu'.
      dropout: A float for the dropout rate before output. Defaults to 0.5.
      kernel_initializer: Kernel initializer for conv layers. Defaults to
        `glorot_uniform`.
      kernel_regularizer: Kernel regularizer for conv layers. Defaults to None.
      interpolation: The interpolation method for upsampling. Defaults to
        `bilinear`.
      use_depthwise_convolution: Allows spatial pooling to be separable
        depthwise convolusions. [Encoder-Decoder with Atrous Separable
        Convolution for Semantic Image Segmentation](
         https://arxiv.org/pdf/1802.02611.pdf)
      **kwargs: Other keyword arguments for the layer.
    """
        super().__init__(output_channels=output_channels,
                         dilation_rates=dilation_rates,
                         use_sync_bn=use_sync_bn,
                         batchnorm_momentum=batchnorm_momentum,
                         batchnorm_epsilon=batchnorm_epsilon,
                         activation=activation,
                         dropout=dropout,
                         kernel_initializer=kernel_initializer,
                         kernel_regularizer=kernel_regularizer,
                         interpolation=interpolation,
                         pool_kernel_size=pool_kernel_size,
                         use_depthwise_convolution=use_depthwise_convolution)

        self._activation_fn = tfmot.quantization.keras.QuantizeWrapperV2(
            tf_utils.get_activation(activation, use_keras_layer=True),
            configs.Default8BitActivationQuantizeConfig())
        self._activation_fn_no_quant = (tf_utils.get_activation(
            activation, use_keras_layer=True))
Beispiel #4
0
    def build(self, input_shape):
        conv2d_quantized = _quantize_wrapped_layer(
            tf.keras.layers.Conv2D,
            configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'],
                                                  False))
        conv2d_quantized_output_quantized = _quantize_wrapped_layer(
            tf.keras.layers.Conv2D,
            configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'],
                                                  True))
        num_reduced_filters = nn_layers.make_divisible(
            max(1, int(self._in_filters * self._se_ratio)),
            divisor=self._divisible_by,
            round_down_protect=self._round_down_protect)

        self._se_reduce = conv2d_quantized(
            filters=num_reduced_filters,
            kernel_size=1,
            strides=1,
            padding='same',
            use_bias=True,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer,
            bias_regularizer=self._bias_regularizer,
            activation=NoOpActivation())

        self._se_expand = conv2d_quantized_output_quantized(
            filters=self._out_filters,
            kernel_size=1,
            strides=1,
            padding='same',
            use_bias=True,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer,
            bias_regularizer=self._bias_regularizer,
            activation=NoOpActivation())

        self._multiply = tfmot.quantization.keras.QuantizeWrapperV2(
            tf.keras.layers.Multiply(),
            configs.Default8BitQuantizeConfig([], [], True))
        self._reduce_mean_quantizer = (
            tfmot.quantization.keras.quantizers.MovingAverageQuantizer(
                num_bits=8,
                per_axis=False,
                symmetric=False,
                narrow_range=False))
        self._reduce_mean_quantizer_vars = self._reduce_mean_quantizer.build(
            None, 'reduce_mean_quantizer_vars', self)

        self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2(
            tf_utils.get_activation(self._activation, use_keras_layer=True),
            configs.Default8BitActivationQuantizeConfig())
        self._create_gating_activation_layer()

        self._build_quantizer_vars()
        super().build(input_shape)
Beispiel #5
0
  def __init__(
      self,
      min_level: int,
      max_level: int,
      num_classes: int,
      num_anchors_per_location: int,
      num_convs: int = 4,
      num_filters: int = 256,
      attribute_heads: Optional[List[Dict[str, Any]]] = None,
      use_separable_conv: bool = False,
      activation: str = 'relu',
      use_sync_bn: bool = False,
      norm_momentum: float = 0.99,
      norm_epsilon: float = 0.001,
      kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
      bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
      num_params_per_anchor: int = 4,
      **kwargs):
    """Initializes a RetinaNet quantized head.

    Args:
      min_level: An `int` number of minimum feature level.
      max_level: An `int` number of maximum feature level.
      num_classes: An `int` number of classes to predict.
      num_anchors_per_location: An `int` number of number of anchors per pixel
        location.
      num_convs: An `int` number that represents the number of the intermediate
        conv layers before the prediction.
      num_filters: An `int` number that represents the number of filters of the
        intermediate conv layers.
      attribute_heads: If not None, a list that contains a dict for each
        additional attribute head. Each dict consists of 3 key-value pairs:
        `name`, `type` ('regression' or 'classification'), and `size` (number
        of predicted values for each instance).
      use_separable_conv: A `bool` that indicates whether the separable
        convolution layers is used.
      activation: A `str` that indicates which activation is used, e.g. 'relu',
        'swish', etc.
      use_sync_bn: A `bool` that indicates whether to use synchronized batch
        normalization across different replicas.
      norm_momentum: A `float` of normalization momentum for the moving average.
      norm_epsilon: A `float` added to variance to avoid dividing by zero.
      kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
        Conv2D. Default is None.
      bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
      num_params_per_anchor: Number of parameters required to specify an anchor
        box. For example, `num_params_per_anchor` would be 4 for axis-aligned
        anchor boxes specified by their y-centers, x-centers, heights, and
        widths.
      **kwargs: Additional keyword arguments to be passed.
    """
    super().__init__(**kwargs)
    self._config_dict = {
        'min_level': min_level,
        'max_level': max_level,
        'num_classes': num_classes,
        'num_anchors_per_location': num_anchors_per_location,
        'num_convs': num_convs,
        'num_filters': num_filters,
        'attribute_heads': attribute_heads,
        'use_separable_conv': use_separable_conv,
        'activation': activation,
        'use_sync_bn': use_sync_bn,
        'norm_momentum': norm_momentum,
        'norm_epsilon': norm_epsilon,
        'kernel_regularizer': kernel_regularizer,
        'bias_regularizer': bias_regularizer,
        'num_params_per_anchor': num_params_per_anchor,
    }

    if tf.keras.backend.image_data_format() == 'channels_last':
      self._bn_axis = -1
    else:
      self._bn_axis = 1
    self._activation = tfmot.quantization.keras.QuantizeWrapperV2(
        tf_utils.get_activation(activation, use_keras_layer=True),
        configs.Default8BitActivationQuantizeConfig())
Beispiel #6
0
    def __init__(self,
                 num_classes: int,
                 level: Union[int, str],
                 num_convs: int = 2,
                 num_filters: int = 256,
                 use_depthwise_convolution: bool = False,
                 prediction_kernel_size: int = 1,
                 upsample_factor: int = 1,
                 feature_fusion: Optional[str] = None,
                 decoder_min_level: Optional[int] = None,
                 decoder_max_level: Optional[int] = None,
                 low_level: int = 2,
                 low_level_num_filters: int = 48,
                 num_decoder_filters: int = 256,
                 activation: str = 'relu',
                 use_sync_bn: bool = False,
                 norm_momentum: float = 0.99,
                 norm_epsilon: float = 0.001,
                 kernel_regularizer: Optional[
                     tf.keras.regularizers.Regularizer] = None,
                 bias_regularizer: Optional[
                     tf.keras.regularizers.Regularizer] = None,
                 **kwargs):
        """Initializes a segmentation head.

    Args:
      num_classes: An `int` number of mask classification categories. The number
        of classes does not include background class.
      level: An `int` or `str`, level to use to build segmentation head.
      num_convs: An `int` number of stacked convolution before the last
        prediction layer.
      num_filters: An `int` number to specify the number of filters used.
        Default is 256.
      use_depthwise_convolution: A bool to specify if use depthwise separable
        convolutions.
      prediction_kernel_size: An `int` number to specify the kernel size of the
        prediction layer.
      upsample_factor: An `int` number to specify the upsampling factor to
        generate finer mask. Default 1 means no upsampling is applied.
      feature_fusion: One of `deeplabv3plus`, `pyramid_fusion`, or None. If
        `deeplabv3plus`, features from decoder_features[level] will be fused
        with low level feature maps from backbone. If `pyramid_fusion`,
        multiscale features will be resized and fused at the target level.
      decoder_min_level: An `int` of minimum level from decoder to use in
        feature fusion. It is only used when feature_fusion is set to
        `panoptic_fpn_fusion`.
      decoder_max_level: An `int` of maximum level from decoder to use in
        feature fusion. It is only used when feature_fusion is set to
        `panoptic_fpn_fusion`.
      low_level: An `int` of backbone level to be used for feature fusion. It is
        used when feature_fusion is set to `deeplabv3plus`.
      low_level_num_filters: An `int` of reduced number of filters for the low
        level features before fusing it with higher level features. It is only
        used when feature_fusion is set to `deeplabv3plus`.
      num_decoder_filters: An `int` of number of filters in the decoder outputs.
        It is only used when feature_fusion is set to `panoptic_fpn_fusion`.
      activation: A `str` that indicates which activation is used, e.g. 'relu',
        'swish', etc.
      use_sync_bn: A `bool` that indicates whether to use synchronized batch
        normalization across different replicas.
      norm_momentum: A `float` of normalization momentum for the moving average.
      norm_epsilon: A `float` added to variance to avoid dividing by zero.
      kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
        Conv2D. Default is None.
      bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
      **kwargs: Additional keyword arguments to be passed.
    """
        super().__init__(**kwargs)

        self._config_dict = {
            'num_classes': num_classes,
            'level': level,
            'num_convs': num_convs,
            'num_filters': num_filters,
            'use_depthwise_convolution': use_depthwise_convolution,
            'prediction_kernel_size': prediction_kernel_size,
            'upsample_factor': upsample_factor,
            'feature_fusion': feature_fusion,
            'decoder_min_level': decoder_min_level,
            'decoder_max_level': decoder_max_level,
            'low_level': low_level,
            'low_level_num_filters': low_level_num_filters,
            'num_decoder_filters': num_decoder_filters,
            'activation': activation,
            'use_sync_bn': use_sync_bn,
            'norm_momentum': norm_momentum,
            'norm_epsilon': norm_epsilon,
            'kernel_regularizer': kernel_regularizer,
            'bias_regularizer': bias_regularizer,
        }
        if tf.keras.backend.image_data_format() == 'channels_last':
            self._bn_axis = -1
        else:
            self._bn_axis = 1
        self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2(
            tf_utils.get_activation(activation, use_keras_layer=True),
            configs.Default8BitActivationQuantizeConfig())
Beispiel #7
0
    def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]):
        """Build variables and child layers to prepare for calling."""
        conv2d_quantized = _quantize_wrapped_layer(
            tf.keras.layers.Conv2D,
            configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'],
                                                  False))
        depthwise_conv2d_quantized = _quantize_wrapped_layer(
            tf.keras.layers.DepthwiseConv2D,
            configs.Default8BitConvQuantizeConfig(['depthwise_kernel'],
                                                  ['activation'], False))
        expand_filters = self._in_filters
        if self._expand_ratio > 1:
            # First 1x1 conv for channel expansion.
            expand_filters = nn_layers.make_divisible(
                self._in_filters * self._expand_ratio, self._divisible_by)

            expand_kernel = 1 if self._use_depthwise else self._kernel_size
            expand_stride = 1 if self._use_depthwise else self._strides

            self._conv0 = conv2d_quantized(
                filters=expand_filters,
                kernel_size=expand_kernel,
                strides=expand_stride,
                padding='same',
                use_bias=False,
                kernel_initializer=self._kernel_initializer,
                kernel_regularizer=self._kernel_regularizer,
                bias_regularizer=self._bias_regularizer,
                activation=NoOpActivation())
            self._norm0 = self._norm_by_activation(self._activation)(
                axis=self._bn_axis,
                momentum=self._norm_momentum,
                epsilon=self._norm_epsilon)
            self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2(
                tf_utils.get_activation(self._activation,
                                        use_keras_layer=True),
                configs.Default8BitActivationQuantizeConfig())

        if self._use_depthwise:
            # Depthwise conv.
            self._conv1 = depthwise_conv2d_quantized(
                kernel_size=(self._kernel_size, self._kernel_size),
                strides=self._strides,
                padding='same',
                depth_multiplier=1,
                dilation_rate=self._dilation_rate,
                use_bias=False,
                depthwise_initializer=self._kernel_initializer,
                depthwise_regularizer=self._depthsize_regularizer,
                bias_regularizer=self._bias_regularizer,
                activation=NoOpActivation())
            self._norm1 = self._norm_by_activation(self._depthwise_activation)(
                axis=self._bn_axis,
                momentum=self._norm_momentum,
                epsilon=self._norm_epsilon)
            self._depthwise_activation_layer = (
                tfmot.quantization.keras.QuantizeWrapperV2(
                    tf_utils.get_activation(self._depthwise_activation,
                                            use_keras_layer=True),
                    configs.Default8BitActivationQuantizeConfig()))

        # Squeeze and excitation.
        if self._se_ratio and self._se_ratio > 0 and self._se_ratio <= 1:
            logging.info('Use Squeeze and excitation.')
            in_filters = self._in_filters
            if self._expand_se_in_filters:
                in_filters = expand_filters
            self._squeeze_excitation = qat_nn_layers.SqueezeExcitationQuantized(
                in_filters=in_filters,
                out_filters=expand_filters,
                se_ratio=self._se_ratio,
                divisible_by=self._divisible_by,
                round_down_protect=self._se_round_down_protect,
                kernel_initializer=self._kernel_initializer,
                kernel_regularizer=self._kernel_regularizer,
                bias_regularizer=self._bias_regularizer,
                activation=self._se_inner_activation,
                gating_activation=self._se_gating_activation)
        else:
            self._squeeze_excitation = None

        # Last 1x1 conv.
        self._conv2 = conv2d_quantized(
            filters=self._out_filters,
            kernel_size=1,
            strides=1,
            padding='same',
            use_bias=False,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer,
            bias_regularizer=self._bias_regularizer,
            activation=NoOpActivation())
        self._norm2 = self._norm_with_quantize(axis=self._bn_axis,
                                               momentum=self._norm_momentum,
                                               epsilon=self._norm_epsilon)

        if self._stochastic_depth_drop_rate:
            self._stochastic_depth = nn_layers.StochasticDepth(
                self._stochastic_depth_drop_rate)
        else:
            self._stochastic_depth = None
        self._add = tfmot.quantization.keras.QuantizeWrapperV2(
            tf.keras.layers.Add(),
            configs.Default8BitQuantizeConfig([], [], True))

        super(InvertedBottleneckBlockQuantized, self).build(input_shape)
Beispiel #8
0
    def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]):
        """Build variables and child layers to prepare for calling."""
        conv2d_quantized = _quantize_wrapped_layer(
            tf.keras.layers.Conv2D,
            configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'],
                                                  False))
        if self._use_projection:
            if self._resnetd_shortcut:
                self._shortcut0 = tf.keras.layers.AveragePooling2D(
                    pool_size=2, strides=self._strides, padding='same')
                self._shortcut1 = conv2d_quantized(
                    filters=self._filters * 4,
                    kernel_size=1,
                    strides=1,
                    use_bias=False,
                    kernel_initializer=self._kernel_initializer,
                    kernel_regularizer=self._kernel_regularizer,
                    bias_regularizer=self._bias_regularizer,
                    activation=NoOpActivation())
            else:
                self._shortcut = conv2d_quantized(
                    filters=self._filters * 4,
                    kernel_size=1,
                    strides=self._strides,
                    use_bias=False,
                    kernel_initializer=self._kernel_initializer,
                    kernel_regularizer=self._kernel_regularizer,
                    bias_regularizer=self._bias_regularizer,
                    activation=NoOpActivation())

            self._norm0 = self._norm_with_quantize(
                axis=self._bn_axis,
                momentum=self._norm_momentum,
                epsilon=self._norm_epsilon,
                trainable=self._bn_trainable)

        self._conv1 = conv2d_quantized(
            filters=self._filters,
            kernel_size=1,
            strides=1,
            use_bias=False,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer,
            bias_regularizer=self._bias_regularizer,
            activation=NoOpActivation())
        self._norm1 = self._norm(axis=self._bn_axis,
                                 momentum=self._norm_momentum,
                                 epsilon=self._norm_epsilon,
                                 trainable=self._bn_trainable)
        self._activation1 = tfmot.quantization.keras.QuantizeWrapperV2(
            tf_utils.get_activation(self._activation, use_keras_layer=True),
            configs.Default8BitActivationQuantizeConfig())

        self._conv2 = conv2d_quantized(
            filters=self._filters,
            kernel_size=3,
            strides=self._strides,
            dilation_rate=self._dilation_rate,
            padding='same',
            use_bias=False,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer,
            bias_regularizer=self._bias_regularizer,
            activation=NoOpActivation())
        self._norm2 = self._norm(axis=self._bn_axis,
                                 momentum=self._norm_momentum,
                                 epsilon=self._norm_epsilon,
                                 trainable=self._bn_trainable)
        self._activation2 = tfmot.quantization.keras.QuantizeWrapperV2(
            tf_utils.get_activation(self._activation, use_keras_layer=True),
            configs.Default8BitActivationQuantizeConfig())

        self._conv3 = conv2d_quantized(
            filters=self._filters * 4,
            kernel_size=1,
            strides=1,
            use_bias=False,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer,
            bias_regularizer=self._bias_regularizer,
            activation=NoOpActivation())
        self._norm3 = self._norm_with_quantize(axis=self._bn_axis,
                                               momentum=self._norm_momentum,
                                               epsilon=self._norm_epsilon,
                                               trainable=self._bn_trainable)
        self._activation3 = tfmot.quantization.keras.QuantizeWrapperV2(
            tf_utils.get_activation(self._activation, use_keras_layer=True),
            configs.Default8BitActivationQuantizeConfig())

        if self._se_ratio and self._se_ratio > 0 and self._se_ratio <= 1:
            self._squeeze_excitation = qat_nn_layers.SqueezeExcitationQuantized(
                in_filters=self._filters * 4,
                out_filters=self._filters * 4,
                se_ratio=self._se_ratio,
                kernel_initializer=self._kernel_initializer,
                kernel_regularizer=self._kernel_regularizer,
                bias_regularizer=self._bias_regularizer)
        else:
            self._squeeze_excitation = None

        if self._stochastic_depth_drop_rate:
            self._stochastic_depth = nn_layers.StochasticDepth(
                self._stochastic_depth_drop_rate)
        else:
            self._stochastic_depth = None
        self._add = tfmot.quantization.keras.QuantizeWrapperV2(
            tf.keras.layers.Add(),
            configs.Default8BitQuantizeConfig([], [], True))

        super(BottleneckBlockQuantized, self).build(input_shape)