Esempio n. 1
0
    def testGetsResultQuantizers_EmptyWhenFalse(self):
        layer = self._simple_dense_layer()
        quantize_config = configs.Default8BitQuantizeConfig([], [], False)

        output_quantizers = quantize_config.get_output_quantizers(layer)

        self.assertEqual([], output_quantizers)
Esempio n. 2
0
    def testGetsResultQuantizers_ReturnsQuantizer(self):
        layer = self._simple_dense_layer()
        quantize_config = configs.Default8BitQuantizeConfig([], [], True)

        output_quantizers = quantize_config.get_output_quantizers(layer)

        self.assertLen(output_quantizers, 1)
        self._assert_activation_quantizers(output_quantizers)
Esempio n. 3
0
    def testSetsQuantizeActivations(self):
        layer = self._simple_dense_layer()
        quantize_activation = tf.keras.activations.relu

        quantize_config = configs.Default8BitQuantizeConfig(['kernel'],
                                                            ['activation'],
                                                            False)
        quantize_config.set_quantize_activations(layer, [quantize_activation])

        self.assertEqual(layer.activation, quantize_activation)
Esempio n. 4
0
    def testSetsQuantizeWeights_ErrorOnWrongShapeOfWeight(self):
        layer = self._simple_dense_layer()
        quantize_kernel = tf.keras.backend.variable(np.ones([1, 2]))

        quantize_config = configs.Default8BitQuantizeConfig(['kernel'],
                                                            ['activation'],
                                                            False)

        with self.assertRaises(ValueError):
            quantize_config.set_quantize_weights(layer, [quantize_kernel])
Esempio n. 5
0
    def build(self, input_shape):
        conv2d_quantized = _quantize_wrapped_layer(
            tf.keras.layers.Conv2D,
            configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'],
                                                  False))
        conv2d_quantized_output_quantized = _quantize_wrapped_layer(
            tf.keras.layers.Conv2D,
            configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'],
                                                  True))
        num_reduced_filters = nn_layers.make_divisible(
            max(1, int(self._in_filters * self._se_ratio)),
            divisor=self._divisible_by,
            round_down_protect=self._round_down_protect)

        self._se_reduce = conv2d_quantized(
            filters=num_reduced_filters,
            kernel_size=1,
            strides=1,
            padding='same',
            use_bias=True,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer,
            bias_regularizer=self._bias_regularizer,
            activation=NoOpActivation())

        self._se_expand = conv2d_quantized_output_quantized(
            filters=self._out_filters,
            kernel_size=1,
            strides=1,
            padding='same',
            use_bias=True,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer,
            bias_regularizer=self._bias_regularizer,
            activation=NoOpActivation())

        self._multiply = tfmot.quantization.keras.QuantizeWrapperV2(
            tf.keras.layers.Multiply(),
            configs.Default8BitQuantizeConfig([], [], True))
        self._reduce_mean_quantizer = (
            tfmot.quantization.keras.quantizers.MovingAverageQuantizer(
                num_bits=8,
                per_axis=False,
                symmetric=False,
                narrow_range=False))
        self._reduce_mean_quantizer_vars = self._reduce_mean_quantizer.build(
            None, 'reduce_mean_quantizer_vars', self)

        self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2(
            tf_utils.get_activation(self._activation, use_keras_layer=True),
            configs.Default8BitActivationQuantizeConfig())
        self._create_gating_activation_layer()

        self._build_quantizer_vars()
        super().build(input_shape)
Esempio n. 6
0
    def testGetsQuantizeActivationsAndQuantizers(self):
        layer = self._simple_dense_layer()

        quantize_config = configs.Default8BitQuantizeConfig(['kernel'],
                                                            ['activation'],
                                                            False)
        (activations, activation_quantizers) = self._convert_list(
            quantize_config.get_activations_and_quantizers(layer))

        self._assert_activation_quantizers(activation_quantizers)
        self.assertEqual([layer.activation], activations)
Esempio n. 7
0
    def testGetsQuantizeWeightsAndQuantizers(self):
        layer = self._simple_dense_layer()

        quantize_config = configs.Default8BitQuantizeConfig(['kernel'],
                                                            ['activation'],
                                                            False)
        (weights, weight_quantizers) = self._convert_list(
            quantize_config.get_weights_and_quantizers(layer))

        self._assert_weight_quantizers(weight_quantizers)
        self.assertEqual([layer.kernel], weights)
Esempio n. 8
0
    def testSetsQuantizeWeights(self):
        layer = self._simple_dense_layer()
        quantize_kernel = tf.keras.backend.variable(
            np.ones(layer.kernel.shape.as_list()))

        quantize_config = configs.Default8BitQuantizeConfig(['kernel'],
                                                            ['activation'],
                                                            False)
        quantize_config.set_quantize_weights(layer, [quantize_kernel])

        self._assert_kernel_equality(layer.kernel, quantize_kernel)
Esempio n. 9
0
    def testSetsQuantizeActivations_ErrorOnWrongNumberOfActivations(self):
        layer = self._simple_dense_layer()
        quantize_activation = tf.keras.activations.relu

        quantize_config = configs.Default8BitQuantizeConfig(['kernel'],
                                                            ['activation'],
                                                            False)

        with self.assertRaises(ValueError):
            quantize_config.set_quantize_activations(layer, [])

        with self.assertRaises(ValueError):
            quantize_config.set_quantize_activations(
                layer, [quantize_activation, quantize_activation])
Esempio n. 10
0
 def _create_gating_activation_layer(self):
     if self._gating_activation == 'hard_sigmoid':
         # Convert hard_sigmoid activation to quantizable keras layers so each op
         # can be properly quantized.
         # Formula is hard_sigmoid(x) = relu6(x + 3) * 0.16667.
         self._add = tfmot.quantization.keras.QuantizeWrapperV2(
             tf.keras.layers.Add(),
             configs.Default8BitQuantizeConfig([], [], True))
         self._relu6 = tfmot.quantization.keras.QuantizeWrapperV2(
             tf_utils.get_activation('relu6', use_keras_layer=True),
             configs.Default8BitActivationQuantizeConfig())
     else:
         self._gating_activation_layer = tfmot.quantization.keras.QuantizeWrapperV2(
             tf_utils.get_activation(self._gating_activation,
                                     use_keras_layer=True),
             configs.Default8BitActivationQuantizeConfig())
Esempio n. 11
0
    def testSerialization(self):
        quantize_config = configs.Default8BitQuantizeConfig(['kernel'],
                                                            ['activation'],
                                                            False)

        expected_config = {
            'class_name': 'Default8BitQuantizeConfig',
            'config': {
                'weight_attrs': ['kernel'],
                'activation_attrs': ['activation'],
                'quantize_output': False
            }
        }
        serialized_quantize_config = tf.keras.utils.serialize_keras_object(
            quantize_config)

        self.assertEqual(expected_config, serialized_quantize_config)

        quantize_config_from_config = tf.keras.utils.deserialize_keras_object(
            serialized_quantize_config,
            module_objects=globals(),
            custom_objects=configs._types_dict())

        self.assertEqual(quantize_config, quantize_config_from_config)
Esempio n. 12
0
    def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]):
        """Build variables and child layers to prepare for calling."""
        conv2d_quantized = _quantize_wrapped_layer(
            tf.keras.layers.Conv2D,
            configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'],
                                                  False))
        depthwise_conv2d_quantized = _quantize_wrapped_layer(
            tf.keras.layers.DepthwiseConv2D,
            configs.Default8BitConvQuantizeConfig(['depthwise_kernel'],
                                                  ['activation'], False))
        expand_filters = self._in_filters
        if self._expand_ratio > 1:
            # First 1x1 conv for channel expansion.
            expand_filters = nn_layers.make_divisible(
                self._in_filters * self._expand_ratio, self._divisible_by)

            expand_kernel = 1 if self._use_depthwise else self._kernel_size
            expand_stride = 1 if self._use_depthwise else self._strides

            self._conv0 = conv2d_quantized(
                filters=expand_filters,
                kernel_size=expand_kernel,
                strides=expand_stride,
                padding='same',
                use_bias=False,
                kernel_initializer=self._kernel_initializer,
                kernel_regularizer=self._kernel_regularizer,
                bias_regularizer=self._bias_regularizer,
                activation=NoOpActivation())
            self._norm0 = self._norm_by_activation(self._activation)(
                axis=self._bn_axis,
                momentum=self._norm_momentum,
                epsilon=self._norm_epsilon)
            self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2(
                tf_utils.get_activation(self._activation,
                                        use_keras_layer=True),
                configs.Default8BitActivationQuantizeConfig())

        if self._use_depthwise:
            # Depthwise conv.
            self._conv1 = depthwise_conv2d_quantized(
                kernel_size=(self._kernel_size, self._kernel_size),
                strides=self._strides,
                padding='same',
                depth_multiplier=1,
                dilation_rate=self._dilation_rate,
                use_bias=False,
                depthwise_initializer=self._kernel_initializer,
                depthwise_regularizer=self._depthsize_regularizer,
                bias_regularizer=self._bias_regularizer,
                activation=NoOpActivation())
            self._norm1 = self._norm_by_activation(self._depthwise_activation)(
                axis=self._bn_axis,
                momentum=self._norm_momentum,
                epsilon=self._norm_epsilon)
            self._depthwise_activation_layer = (
                tfmot.quantization.keras.QuantizeWrapperV2(
                    tf_utils.get_activation(self._depthwise_activation,
                                            use_keras_layer=True),
                    configs.Default8BitActivationQuantizeConfig()))

        # Squeeze and excitation.
        if self._se_ratio and self._se_ratio > 0 and self._se_ratio <= 1:
            logging.info('Use Squeeze and excitation.')
            in_filters = self._in_filters
            if self._expand_se_in_filters:
                in_filters = expand_filters
            self._squeeze_excitation = qat_nn_layers.SqueezeExcitationQuantized(
                in_filters=in_filters,
                out_filters=expand_filters,
                se_ratio=self._se_ratio,
                divisible_by=self._divisible_by,
                round_down_protect=self._se_round_down_protect,
                kernel_initializer=self._kernel_initializer,
                kernel_regularizer=self._kernel_regularizer,
                bias_regularizer=self._bias_regularizer,
                activation=self._se_inner_activation,
                gating_activation=self._se_gating_activation)
        else:
            self._squeeze_excitation = None

        # Last 1x1 conv.
        self._conv2 = conv2d_quantized(
            filters=self._out_filters,
            kernel_size=1,
            strides=1,
            padding='same',
            use_bias=False,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer,
            bias_regularizer=self._bias_regularizer,
            activation=NoOpActivation())
        self._norm2 = self._norm_with_quantize(axis=self._bn_axis,
                                               momentum=self._norm_momentum,
                                               epsilon=self._norm_epsilon)

        if self._stochastic_depth_drop_rate:
            self._stochastic_depth = nn_layers.StochasticDepth(
                self._stochastic_depth_drop_rate)
        else:
            self._stochastic_depth = None
        self._add = tfmot.quantization.keras.QuantizeWrapperV2(
            tf.keras.layers.Add(),
            configs.Default8BitQuantizeConfig([], [], True))

        super(InvertedBottleneckBlockQuantized, self).build(input_shape)
Esempio n. 13
0
    def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]):
        """Build variables and child layers to prepare for calling."""
        conv2d_quantized = _quantize_wrapped_layer(
            tf.keras.layers.Conv2D,
            configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'],
                                                  False))
        if self._use_projection:
            if self._resnetd_shortcut:
                self._shortcut0 = tf.keras.layers.AveragePooling2D(
                    pool_size=2, strides=self._strides, padding='same')
                self._shortcut1 = conv2d_quantized(
                    filters=self._filters * 4,
                    kernel_size=1,
                    strides=1,
                    use_bias=False,
                    kernel_initializer=self._kernel_initializer,
                    kernel_regularizer=self._kernel_regularizer,
                    bias_regularizer=self._bias_regularizer,
                    activation=NoOpActivation())
            else:
                self._shortcut = conv2d_quantized(
                    filters=self._filters * 4,
                    kernel_size=1,
                    strides=self._strides,
                    use_bias=False,
                    kernel_initializer=self._kernel_initializer,
                    kernel_regularizer=self._kernel_regularizer,
                    bias_regularizer=self._bias_regularizer,
                    activation=NoOpActivation())

            self._norm0 = self._norm_with_quantize(
                axis=self._bn_axis,
                momentum=self._norm_momentum,
                epsilon=self._norm_epsilon,
                trainable=self._bn_trainable)

        self._conv1 = conv2d_quantized(
            filters=self._filters,
            kernel_size=1,
            strides=1,
            use_bias=False,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer,
            bias_regularizer=self._bias_regularizer,
            activation=NoOpActivation())
        self._norm1 = self._norm(axis=self._bn_axis,
                                 momentum=self._norm_momentum,
                                 epsilon=self._norm_epsilon,
                                 trainable=self._bn_trainable)
        self._activation1 = tfmot.quantization.keras.QuantizeWrapperV2(
            tf_utils.get_activation(self._activation, use_keras_layer=True),
            configs.Default8BitActivationQuantizeConfig())

        self._conv2 = conv2d_quantized(
            filters=self._filters,
            kernel_size=3,
            strides=self._strides,
            dilation_rate=self._dilation_rate,
            padding='same',
            use_bias=False,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer,
            bias_regularizer=self._bias_regularizer,
            activation=NoOpActivation())
        self._norm2 = self._norm(axis=self._bn_axis,
                                 momentum=self._norm_momentum,
                                 epsilon=self._norm_epsilon,
                                 trainable=self._bn_trainable)
        self._activation2 = tfmot.quantization.keras.QuantizeWrapperV2(
            tf_utils.get_activation(self._activation, use_keras_layer=True),
            configs.Default8BitActivationQuantizeConfig())

        self._conv3 = conv2d_quantized(
            filters=self._filters * 4,
            kernel_size=1,
            strides=1,
            use_bias=False,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer,
            bias_regularizer=self._bias_regularizer,
            activation=NoOpActivation())
        self._norm3 = self._norm_with_quantize(axis=self._bn_axis,
                                               momentum=self._norm_momentum,
                                               epsilon=self._norm_epsilon,
                                               trainable=self._bn_trainable)
        self._activation3 = tfmot.quantization.keras.QuantizeWrapperV2(
            tf_utils.get_activation(self._activation, use_keras_layer=True),
            configs.Default8BitActivationQuantizeConfig())

        if self._se_ratio and self._se_ratio > 0 and self._se_ratio <= 1:
            self._squeeze_excitation = qat_nn_layers.SqueezeExcitationQuantized(
                in_filters=self._filters * 4,
                out_filters=self._filters * 4,
                se_ratio=self._se_ratio,
                kernel_initializer=self._kernel_initializer,
                kernel_regularizer=self._kernel_regularizer,
                bias_regularizer=self._bias_regularizer)
        else:
            self._squeeze_excitation = None

        if self._stochastic_depth_drop_rate:
            self._stochastic_depth = nn_layers.StochasticDepth(
                self._stochastic_depth_drop_rate)
        else:
            self._stochastic_depth = None
        self._add = tfmot.quantization.keras.QuantizeWrapperV2(
            tf.keras.layers.Add(),
            configs.Default8BitQuantizeConfig([], [], True))

        super(BottleneckBlockQuantized, self).build(input_shape)
Esempio n. 14
0
    def build(self, input_shape):
        height = input_shape[1]
        width = input_shape[2]
        channels = input_shape[3]

        norm_layer = (tf.keras.layers.experimental.SyncBatchNormalization
                      if self._use_sync_bn else
                      tf.keras.layers.BatchNormalization)
        norm_with_quantize = _quantize_wrapped_layer(
            norm_layer, configs.Default8BitOutputQuantizeConfig())
        norm = norm_with_quantize if self._activation not in [
            'relu', 'relu6'
        ] else _quantize_wrapped_layer(norm_layer,
                                       configs.NoOpQuantizeConfig())

        conv2d_quantized = _quantize_wrapped_layer(
            tf.keras.layers.Conv2D,
            configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'],
                                                  False))
        depthwise_conv2d_quantized_output_quantized = _quantize_wrapped_layer(
            tf.keras.layers.DepthwiseConv2D,
            configs.Default8BitConvQuantizeConfig(['depthwise_kernel'],
                                                  ['activation'], True))

        self.aspp_layers = []

        conv1 = conv2d_quantized(filters=self._output_channels,
                                 kernel_size=(1, 1),
                                 kernel_initializer=self._kernel_initializer,
                                 kernel_regularizer=self._kernel_regularizer,
                                 use_bias=False,
                                 activation=NoOpActivation())
        norm1 = norm(axis=self._bn_axis,
                     momentum=self._batchnorm_momentum,
                     epsilon=self._batchnorm_epsilon)

        self.aspp_layers.append([conv1, norm1])

        for dilation_rate in self._dilation_rates:
            leading_layers = []
            kernel_size = (3, 3)
            if self._use_depthwise_convolution:
                leading_layers += [
                    depthwise_conv2d_quantized_output_quantized(
                        depth_multiplier=1,
                        kernel_size=kernel_size,
                        padding='same',
                        depthwise_regularizer=self._kernel_regularizer,
                        depthwise_initializer=self._kernel_initializer,
                        dilation_rate=dilation_rate,
                        use_bias=False,
                        activation=NoOpActivation())
                ]
                kernel_size = (1, 1)
            conv_dilation = leading_layers + [
                conv2d_quantized(filters=self._output_channels,
                                 kernel_size=kernel_size,
                                 padding='same',
                                 kernel_regularizer=self._kernel_regularizer,
                                 kernel_initializer=self._kernel_initializer,
                                 dilation_rate=dilation_rate,
                                 use_bias=False,
                                 activation=NoOpActivation())
            ]
            norm_dilation = norm(axis=self._bn_axis,
                                 momentum=self._batchnorm_momentum,
                                 epsilon=self._batchnorm_epsilon)

            self.aspp_layers.append(conv_dilation + [norm_dilation])

        if self._pool_kernel_size is None:
            pooling = [
                _quantize_wrapped_layer(
                    tf.keras.layers.GlobalAveragePooling2D,
                    configs.Default8BitQuantizeConfig([], [], True))(),
                _quantize_wrapped_layer(
                    tf.keras.layers.Reshape,
                    configs.Default8BitQuantizeConfig([], [],
                                                      True))((1, 1, channels))
            ]
        else:
            pooling = [
                _quantize_wrapped_layer(
                    tf.keras.layers.AveragePooling2D,
                    configs.Default8BitQuantizeConfig([], [], True))(
                        self._pool_kernel_size)
            ]

        conv2 = conv2d_quantized(filters=self._output_channels,
                                 kernel_size=(1, 1),
                                 kernel_initializer=self._kernel_initializer,
                                 kernel_regularizer=self._kernel_regularizer,
                                 use_bias=False,
                                 activation=NoOpActivation())
        norm2 = norm(axis=self._bn_axis,
                     momentum=self._batchnorm_momentum,
                     epsilon=self._batchnorm_epsilon)

        self.aspp_layers.append(pooling + [conv2, norm2])

        resizing = _quantize_wrapped_layer(
            tf.keras.layers.Resizing,
            configs.Default8BitQuantizeConfig([], [], True))
        self._resizing_layer = resizing(height,
                                        width,
                                        interpolation=self._interpolation)

        self._projection = [
            conv2d_quantized(filters=self._output_channels,
                             kernel_size=(1, 1),
                             kernel_initializer=self._kernel_initializer,
                             kernel_regularizer=self._kernel_regularizer,
                             use_bias=False,
                             activation=NoOpActivation()),
            norm_with_quantize(axis=self._bn_axis,
                               momentum=self._batchnorm_momentum,
                               epsilon=self._batchnorm_epsilon)
        ]
        self._dropout_layer = tf.keras.layers.Dropout(rate=self._dropout)
        concat = _quantize_wrapped_layer(
            tf.keras.layers.Concatenate,
            configs.Default8BitQuantizeConfig([], [], True))
        self._concat_layer = concat(axis=-1)
Esempio n. 15
0
    def build(self, input_shape: Sequence[tf.TensorShape]):
        """Creates the variables of the segmentation head."""
        # When input_shape is a list/tuple, the first corresponds to backbone
        # features used for resizing the decoder features (the second) if feature
        # fusion type is `deeplabv3plus`.
        backbone_shape = input_shape[0]
        use_depthwise_convolution = self._config_dict[
            'use_depthwise_convolution']
        random_initializer = tf.keras.initializers.RandomNormal(stddev=0.01)
        conv2d_quantized = _quantize_wrapped_layer(
            tf.keras.layers.Conv2D,
            configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'],
                                                  False))
        conv2d_quantized_output_quantized = _quantize_wrapped_layer(
            tf.keras.layers.Conv2D,
            configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'],
                                                  True))
        depthwise_conv2d_quantized = _quantize_wrapped_layer(
            tf.keras.layers.DepthwiseConv2D,
            configs.Default8BitConvQuantizeConfig(['depthwise_kernel'],
                                                  ['activation'], False))
        conv_kwargs = {
            'kernel_size': 3 if not use_depthwise_convolution else 1,
            'padding': 'same',
            'use_bias': False,
            'kernel_initializer': random_initializer,
            'kernel_regularizer': self._config_dict['kernel_regularizer'],
        }

        norm_layer = (tf.keras.layers.experimental.SyncBatchNormalization
                      if self._config_dict['use_sync_bn'] else
                      tf.keras.layers.BatchNormalization)
        norm_with_quantize = _quantize_wrapped_layer(
            norm_layer, configs.Default8BitOutputQuantizeConfig())
        norm = norm_with_quantize if self._config_dict['activation'] not in [
            'relu', 'relu6'
        ] else _quantize_wrapped_layer(norm_layer,
                                       configs.NoOpQuantizeConfig())

        bn_kwargs = {
            'axis': self._bn_axis,
            'momentum': self._config_dict['norm_momentum'],
            'epsilon': self._config_dict['norm_epsilon'],
        }

        if self._config_dict['feature_fusion'] == 'deeplabv3plus':
            # Deeplabv3+ feature fusion layers.
            self._dlv3p_conv = conv2d_quantized(
                kernel_size=1,
                padding='same',
                use_bias=False,
                kernel_initializer=tf.keras.initializers.RandomNormal(
                    stddev=0.01),
                kernel_regularizer=self._config_dict['kernel_regularizer'],
                name='segmentation_head_deeplabv3p_fusion_conv',
                filters=self._config_dict['low_level_num_filters'],
                activation=NoOpActivation())

            self._dlv3p_norm = norm(
                name='segmentation_head_deeplabv3p_fusion_norm', **bn_kwargs)

        # Segmentation head layers.
        self._convs = []
        self._norms = []
        for i in range(self._config_dict['num_convs']):
            if use_depthwise_convolution:
                self._convs.append(
                    depthwise_conv2d_quantized(
                        name='segmentation_head_depthwise_conv_{}'.format(i),
                        kernel_size=3,
                        padding='same',
                        use_bias=False,
                        depthwise_initializer=random_initializer,
                        depthwise_regularizer=self.
                        _config_dict['kernel_regularizer'],
                        depth_multiplier=1,
                        activation=NoOpActivation()))
                norm_name = 'segmentation_head_depthwise_norm_{}'.format(i)
                self._norms.append(norm(name=norm_name, **bn_kwargs))
            conv_name = 'segmentation_head_conv_{}'.format(i)
            self._convs.append(
                conv2d_quantized(name=conv_name,
                                 filters=self._config_dict['num_filters'],
                                 activation=NoOpActivation(),
                                 **conv_kwargs))
            norm_name = 'segmentation_head_norm_{}'.format(i)
            self._norms.append(norm(name=norm_name, **bn_kwargs))

        self._classifier = conv2d_quantized_output_quantized(
            name='segmentation_output',
            filters=self._config_dict['num_classes'],
            kernel_size=self._config_dict['prediction_kernel_size'],
            padding='same',
            bias_initializer=tf.zeros_initializer(),
            kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
            kernel_regularizer=self._config_dict['kernel_regularizer'],
            bias_regularizer=self._config_dict['bias_regularizer'],
            activation=NoOpActivation())

        upsampling = _quantize_wrapped_layer(
            tf.keras.layers.UpSampling2D,
            configs.Default8BitQuantizeConfig([], [], True))
        self._upsampling_layer = upsampling(
            size=(self._config_dict['upsample_factor'],
                  self._config_dict['upsample_factor']),
            interpolation='nearest')
        self._resizing_layer = tf.keras.layers.Resizing(
            backbone_shape[1], backbone_shape[2], interpolation='bilinear')

        concat = _quantize_wrapped_layer(
            tf.keras.layers.Concatenate,
            configs.Default8BitQuantizeConfig([], [], True))
        self._concat_layer = concat(axis=self._bn_axis)

        super().build(input_shape)
Esempio n. 16
0
    tf.keras.layers.Conv2D,
    configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'], False))
Conv2DOutputQuantized = quantize_wrapped_layer(
    tf.keras.layers.Conv2D,
    configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'], True))
DepthwiseConv2DQuantized = quantize_wrapped_layer(
    tf.keras.layers.DepthwiseConv2D,
    configs.Default8BitConvQuantizeConfig(['depthwise_kernel'], ['activation'],
                                          False))
DepthwiseConv2DOutputQuantized = quantize_wrapped_layer(
    tf.keras.layers.DepthwiseConv2D,
    configs.Default8BitConvQuantizeConfig(['depthwise_kernel'], ['activation'],
                                          True))
GlobalAveragePooling2DQuantized = quantize_wrapped_layer(
    tf.keras.layers.GlobalAveragePooling2D,
    configs.Default8BitQuantizeConfig([], [], True))
AveragePooling2DQuantized = quantize_wrapped_layer(
    tf.keras.layers.AveragePooling2D,
    configs.Default8BitQuantizeConfig([], [], True))
ResizingQuantized = quantize_wrapped_layer(
    tf.keras.layers.Resizing, configs.Default8BitQuantizeConfig([], [], True))
ConcatenateQuantized = quantize_wrapped_layer(
    tf.keras.layers.Concatenate, configs.Default8BitQuantizeConfig([], [],
                                                                   True))
UpSampling2DQuantized = quantize_wrapped_layer(
    tf.keras.layers.UpSampling2D,
    configs.Default8BitQuantizeConfig([], [], True))
ReshapeQuantized = quantize_wrapped_layer(
    tf.keras.layers.Reshape, configs.Default8BitQuantizeConfig([], [], True))

# pylint:disable=g-long-lambda
Esempio n. 17
0
    def build(self, input_shape: Sequence[tf.TensorShape]):
        """Creates the variables of the segmentation head."""
        # When input_shape is a list/tuple, the first corresponds to backbone
        # features used for resizing the decoder features (the second) if feature
        # fusion type is `deeplabv3plus`.
        backbone_shape = input_shape[0]
        use_depthwise_convolution = self._config_dict[
            'use_depthwise_convolution']
        random_initializer = tf.keras.initializers.RandomNormal(stddev=0.01)
        conv_kwargs = {
            'kernel_size': 3 if not use_depthwise_convolution else 1,
            'padding': 'same',
            'use_bias': False,
            'kernel_initializer': random_initializer,
            'kernel_regularizer': self._config_dict['kernel_regularizer'],
        }

        norm_layer = (tf.keras.layers.experimental.SyncBatchNormalization
                      if self._config_dict['use_sync_bn'] else
                      tf.keras.layers.BatchNormalization)
        norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer)
        norm_no_quantize = helper.BatchNormalizationNoQuantized(norm_layer)
        norm = helper.norm_by_activation(self._config_dict['activation'],
                                         norm_with_quantize, norm_no_quantize)

        bn_kwargs = {
            'axis': self._bn_axis,
            'momentum': self._config_dict['norm_momentum'],
            'epsilon': self._config_dict['norm_epsilon'],
        }

        if self._config_dict['feature_fusion'] in [
                FeatureFusion.DEEPLABV3PLUS,
                FeatureFusion.DEEPLABV3PLUS_SUM_TO_MERGE
        ]:
            # Deeplabv3+ feature fusion layers.
            self._dlv3p_conv = helper.Conv2DQuantized(
                kernel_size=1,
                padding='same',
                use_bias=False,
                kernel_initializer=tf_utils.clone_initializer(
                    random_initializer),
                kernel_regularizer=self._config_dict['kernel_regularizer'],
                name='segmentation_head_deeplabv3p_fusion_conv',
                filters=self._config_dict['low_level_num_filters'],
                activation=helper.NoOpActivation())

            self._dlv3p_norm = norm(
                name='segmentation_head_deeplabv3p_fusion_norm', **bn_kwargs)

        # Segmentation head layers.
        self._convs = []
        self._norms = []
        for i in range(self._config_dict['num_convs']):
            if use_depthwise_convolution:
                self._convs.append(
                    helper.DepthwiseConv2DQuantized(
                        name='segmentation_head_depthwise_conv_{}'.format(i),
                        kernel_size=3,
                        padding='same',
                        use_bias=False,
                        depthwise_initializer=tf_utils.clone_initializer(
                            random_initializer),
                        depthwise_regularizer=self.
                        _config_dict['kernel_regularizer'],
                        depth_multiplier=1,
                        activation=helper.NoOpActivation()))
                norm_name = 'segmentation_head_depthwise_norm_{}'.format(i)
                self._norms.append(norm(name=norm_name, **bn_kwargs))
            conv_name = 'segmentation_head_conv_{}'.format(i)
            self._convs.append(
                helper.Conv2DQuantized(
                    name=conv_name,
                    filters=self._config_dict['num_filters'],
                    activation=helper.NoOpActivation(),
                    **conv_kwargs))
            norm_name = 'segmentation_head_norm_{}'.format(i)
            self._norms.append(norm(name=norm_name, **bn_kwargs))

        self._classifier = helper.Conv2DOutputQuantized(
            name='segmentation_output',
            filters=self._config_dict['num_classes'],
            kernel_size=self._config_dict['prediction_kernel_size'],
            padding='same',
            bias_initializer=tf.zeros_initializer(),
            kernel_initializer=tf_utils.clone_initializer(random_initializer),
            kernel_regularizer=self._config_dict['kernel_regularizer'],
            bias_regularizer=self._config_dict['bias_regularizer'],
            activation=helper.NoOpActivation())

        self._upsampling_layer = helper.UpSampling2DQuantized(
            size=(self._config_dict['upsample_factor'],
                  self._config_dict['upsample_factor']),
            interpolation='nearest')
        self._resizing_layer = helper.ResizingQuantized(
            backbone_shape[1], backbone_shape[2], interpolation='bilinear')

        self._concat_layer = helper.ConcatenateQuantized(axis=self._bn_axis)
        self._add_layer = tfmot.quantization.keras.QuantizeWrapperV2(
            tf.keras.layers.Add(),
            configs.Default8BitQuantizeConfig([], [], True))

        super().build(input_shape)