def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]): """Build variables and child layers to prepare for calling.""" conv2d_quantized = _quantize_wrapped_layer( tf.keras.layers.Conv2D, configs.DefaultNBitConvQuantizeConfig( ['kernel'], ['activation'], False, num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) self._conv0 = conv2d_quantized( filters=self._filters, kernel_size=self._kernel_size, strides=self._strides, padding='same', use_bias=self._use_bias, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=NoOpActivation()) if self._use_normalization: self._norm0 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon) self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(self._activation, use_keras_layer=True), configs.DefaultNBitActivationQuantizeConfig( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) super(Conv2DBNBlockNBitQuantized, self).build(input_shape)
def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]): """Build variables and child layers to prepare for calling.""" conv2d_quantized = _quantize_wrapped_layer( tf.keras.layers.Conv2D, configs.DefaultNBitConvQuantizeConfig( ['kernel'], ['activation'], False, num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) depthwise_conv2d_quantized = _quantize_wrapped_layer( tf.keras.layers.DepthwiseConv2D, configs.DefaultNBitConvQuantizeConfig( ['depthwise_kernel'], ['activation'], False, num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) expand_filters = self._in_filters if self._expand_ratio > 1: # First 1x1 conv for channel expansion. expand_filters = nn_layers.make_divisible( self._in_filters * self._expand_ratio, self._divisible_by) expand_kernel = 1 if self._use_depthwise else self._kernel_size expand_stride = 1 if self._use_depthwise else self._strides self._conv0 = conv2d_quantized( filters=expand_filters, kernel_size=expand_kernel, strides=expand_stride, padding='same', use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=NoOpActivation()) self._norm0 = self._norm_with_quantize( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon) self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(self._activation, use_keras_layer=True), configs.DefaultNBitActivationQuantizeConfig( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) if self._use_depthwise: # Depthwise conv. self._conv1 = depthwise_conv2d_quantized( kernel_size=(self._kernel_size, self._kernel_size), strides=self._strides, padding='same', depth_multiplier=1, dilation_rate=self._dilation_rate, use_bias=False, depthwise_initializer=self._kernel_initializer, depthwise_regularizer=self._depthsize_regularizer, bias_regularizer=self._bias_regularizer, activation=NoOpActivation()) self._norm1 = self._norm_with_quantize( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon) self._depthwise_activation_layer = ( tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(self._depthwise_activation, use_keras_layer=True), configs.DefaultNBitActivationQuantizeConfig( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation))) # Squeeze and excitation. if self._se_ratio and self._se_ratio > 0 and self._se_ratio <= 1: logging.info('Use Squeeze and excitation.') in_filters = self._in_filters if self._expand_se_in_filters: in_filters = expand_filters self._squeeze_excitation = qat_nn_layers.SqueezeExcitationNBitQuantized( in_filters=in_filters, out_filters=expand_filters, se_ratio=self._se_ratio, divisible_by=self._divisible_by, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=self._se_inner_activation, gating_activation=self._se_gating_activation, num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation) else: self._squeeze_excitation = None # Last 1x1 conv. self._conv2 = conv2d_quantized( filters=self._out_filters, kernel_size=1, strides=1, padding='same', use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=NoOpActivation()) self._norm2 = self._norm_with_quantize( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon) if self._stochastic_depth_drop_rate: self._stochastic_depth = nn_layers.StochasticDepth( self._stochastic_depth_drop_rate) else: self._stochastic_depth = None self._add = tf.keras.layers.Add() super().build(input_shape)
def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]): """Build variables and child layers to prepare for calling.""" conv2d_quantized = _quantize_wrapped_layer( tf.keras.layers.Conv2D, configs.DefaultNBitConvQuantizeConfig( ['kernel'], ['activation'], False, num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) if self._use_projection: if self._resnetd_shortcut: self._shortcut0 = tf.keras.layers.AveragePooling2D( pool_size=2, strides=self._strides, padding='same') self._shortcut1 = conv2d_quantized( filters=self._filters * 4, kernel_size=1, strides=1, use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=NoOpActivation()) else: self._shortcut = conv2d_quantized( filters=self._filters * 4, kernel_size=1, strides=self._strides, use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=NoOpActivation()) self._norm0 = self._norm_with_quantize( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, trainable=self._bn_trainable) self._conv1 = conv2d_quantized( filters=self._filters, kernel_size=1, strides=1, use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=NoOpActivation()) self._norm1 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, trainable=self._bn_trainable) self._activation1 = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(self._activation, use_keras_layer=True), configs.DefaultNBitActivationQuantizeConfig( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) self._conv2 = conv2d_quantized( filters=self._filters, kernel_size=3, strides=self._strides, dilation_rate=self._dilation_rate, padding='same', use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=NoOpActivation()) self._norm2 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, trainable=self._bn_trainable) self._activation2 = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(self._activation, use_keras_layer=True), configs.DefaultNBitActivationQuantizeConfig( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) self._conv3 = conv2d_quantized( filters=self._filters * 4, kernel_size=1, strides=1, use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=NoOpActivation()) self._norm3 = self._norm_with_quantize( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, trainable=self._bn_trainable) self._activation3 = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(self._activation, use_keras_layer=True), configs.DefaultNBitActivationQuantizeConfig( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) if self._se_ratio and self._se_ratio > 0 and self._se_ratio <= 1: self._squeeze_excitation = qat_nn_layers.SqueezeExcitationNBitQuantized( in_filters=self._filters * 4, out_filters=self._filters * 4, se_ratio=self._se_ratio, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation) else: self._squeeze_excitation = None if self._stochastic_depth_drop_rate: self._stochastic_depth = nn_layers.StochasticDepth( self._stochastic_depth_drop_rate) else: self._stochastic_depth = None self._add = tfmot.quantization.keras.QuantizeWrapperV2( tf.keras.layers.Add(), configs.DefaultNBitQuantizeConfig( [], [], True, num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) super().build(input_shape)
def __init__(self, in_filters, out_filters, se_ratio, divisible_by=1, use_3d_input=False, kernel_initializer='VarianceScaling', kernel_regularizer=None, bias_regularizer=None, activation='relu', gating_activation='sigmoid', num_bits_weight=8, num_bits_activation=8, **kwargs): """Initializes a squeeze and excitation layer. Args: in_filters: An `int` number of filters of the input tensor. out_filters: An `int` number of filters of the output tensor. se_ratio: A `float` or None. If not None, se ratio for the squeeze and excitation layer. divisible_by: An `int` that ensures all inner dimensions are divisible by this number. use_3d_input: A `bool` of whether input is 2D or 3D image. kernel_initializer: A `str` of kernel_initializer for convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d. Default to None. activation: A `str` name of the activation function. gating_activation: A `str` name of the activation function for final gating function. num_bits_weight: An `int` number of bits for the weight. Default to 8. num_bits_activation: An `int` number of bits for the weight. Default to 8. **kwargs: Additional keyword arguments to be passed. """ super().__init__(**kwargs) self._in_filters = in_filters self._out_filters = out_filters self._se_ratio = se_ratio self._divisible_by = divisible_by self._use_3d_input = use_3d_input self._activation = activation self._gating_activation = gating_activation self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer self._num_bits_weight = num_bits_weight self._num_bits_activation = num_bits_activation if tf.keras.backend.image_data_format() == 'channels_last': if not use_3d_input: self._spatial_axis = [1, 2] else: self._spatial_axis = [1, 2, 3] else: if not use_3d_input: self._spatial_axis = [2, 3] else: self._spatial_axis = [2, 3, 4] self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(activation, use_keras_layer=True), configs.DefaultNBitActivationQuantizeConfig( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) self._gating_activation_layer = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(gating_activation, use_keras_layer=True), configs.DefaultNBitActivationQuantizeConfig( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation))