Ejemplo n.º 1
0
    def get_output_quantizers(self, layer):
        self._assert_activation_layer(layer)

        if not hasattr(layer.activation, '__name__'):
            raise ValueError('Activation {} not supported by '
                             'DefaultNBitActivationQuantizeConfig.'.format(
                                 layer.activation))

        if layer.activation.__name__ in ['relu', 'swish']:
            # 'relu' should generally get fused into the previous layer.
            return [
                quantizers.MovingAverageQuantizer(
                    num_bits=self._num_bits_activation,
                    per_axis=False,
                    symmetric=False,
                    narrow_range=False)
            ]  # activation/output
        elif layer.activation.__name__ in [
                'linear', 'softmax', 'sigmoid', 'tanh'
        ]:
            return []

        raise ValueError('Activation {} not supported by '
                         'DefaultNBitActivationQuantizeConfig.'.format(
                             layer.activation))
Ejemplo n.º 2
0
 def get_output_quantizers(self, layer):
     return [
         quantizers.MovingAverageQuantizer(num_bits=8,
                                           per_axis=False,
                                           symmetric=False,
                                           narrow_range=False)
     ]
Ejemplo n.º 3
0
    def __init__(self, weight_attrs, activation_attrs, quantize_output):
        self.weight_attrs = weight_attrs
        self.activation_attrs = activation_attrs
        self.quantize_output = quantize_output

        # TODO(pulkitb): For some layers such as Conv2D, per_axis should be True.
        # Add mapping for which layers support per_axis.
        self.weight_quantizer = quantizers.LastValueQuantizer(
            num_bits=8, per_axis=False, symmetric=True, narrow_range=True)
        self.activation_quantizer = quantizers.MovingAverageQuantizer(
            num_bits=8, per_axis=False, symmetric=False, narrow_range=False)
  def replacement(self, match_layer):
    # TODO(pulkitb): Replace quantizer with InputLayer specific quantizer.
    quant_layer = quantize_layer.QuantizeLayer(
        quantizers.MovingAverageQuantizer(
            num_bits=8, per_axis=False, symmetric=False, narrow_range=False))
    layer_config = keras.layers.serialize(quant_layer)
    layer_config['name'] = quant_layer.name

    quant_layer_node = LayerNode(
        layer_config,
        input_layers=[match_layer])

    return quant_layer_node
Ejemplo n.º 5
0
    def __init__(
            self,
            # Conv2D params
            filters,
            kernel_size,
            strides=(1, 1),
            padding='valid',
            data_format=None,
            dilation_rate=(1, 1),
            kernel_initializer='glorot_uniform',
            bias_initializer='zeros',
            kernel_regularizer=None,
            bias_regularizer=None,
            activity_regularizer=None,
            kernel_constraint=None,
            bias_constraint=None,
            name=None,
            # BatchNormalization params
            axis=-1,
            momentum=0.99,
            epsilon=1e-3,
            center=True,
            scale=True,
            beta_initializer='zeros',
            gamma_initializer='ones',
            moving_mean_initializer='zeros',
            moving_variance_initializer='ones',
            beta_regularizer=None,
            gamma_regularizer=None,
            beta_constraint=None,
            gamma_constraint=None,
            renorm=False,
            renorm_clipping=None,
            renorm_momentum=0.99,
            fused=None,
            trainable=True,
            virtual_batch_size=None,
            adjustment=None,
            # Post-batchnorm activation.
            post_activation=None,
            # quantization params
            is_quantized=True,
            **kwargs):
        super(_ConvBatchNorm2D,
              self).__init__(filters,
                             kernel_size,
                             strides=strides,
                             padding=padding,
                             data_format=data_format,
                             dilation_rate=dilation_rate,
                             use_bias=False,
                             kernel_initializer=kernel_initializer,
                             bias_initializer=bias_initializer,
                             kernel_regularizer=kernel_regularizer,
                             bias_regularizer=bias_regularizer,
                             activity_regularizer=activity_regularizer,
                             kernel_constraint=kernel_constraint,
                             bias_constraint=bias_constraint,
                             name=name,
                             **kwargs)

        self.batchnorm = BatchNormalization(
            axis=axis,
            momentum=momentum,
            epsilon=epsilon,
            center=center,
            scale=scale,
            beta_initializer=beta_initializer,
            gamma_initializer=gamma_initializer,
            moving_mean_initializer=moving_mean_initializer,
            moving_variance_initializer=moving_variance_initializer,
            beta_regularizer=beta_regularizer,
            gamma_regularizer=gamma_regularizer,
            beta_constraint=beta_constraint,
            gamma_constraint=gamma_constraint,
            renorm=renorm,
            renorm_clipping=renorm_clipping,
            renorm_momentum=renorm_momentum,
            fused=fused,
            trainable=trainable,
            virtual_batch_size=virtual_batch_size,
            adjustment=adjustment,
        )

        # Named as post_activation to not conflict with Layer self.activation.
        self.post_activation = activations.get(post_activation)

        self.is_quantized = is_quantized
        if self.is_quantized:
            # TODO(b/142132535): update when we move to new quantization scheme.
            self.weight_quantizer = quantizers.LastValueQuantizer(
                num_bits=8, per_axis=False, symmetric=False)

            self.activation_quantizer = quantizers.MovingAverageQuantizer(
                num_bits=8, per_axis=False, symmetric=False)
Ejemplo n.º 6
0
  def __init__(
      self,
      # DepthwiseConv2D params
      kernel_size,
      strides=(1, 1),
      padding='valid',
      depth_multiplier=1,
      data_format=None,
      depthwise_initializer='glorot_uniform',
      depthwise_regularizer=None,
      bias_regularizer=None,
      activity_regularizer=None,
      depthwise_constraint=None,
      bias_constraint=None,
      name=None,
      # BatchNormalization params
      axis=-1,
      momentum=0.99,
      epsilon=1e-3,
      center=True,
      scale=True,
      beta_initializer='zeros',
      gamma_initializer='ones',
      moving_mean_initializer='zeros',
      moving_variance_initializer='ones',
      beta_regularizer=None,
      gamma_regularizer=None,
      beta_constraint=None,
      gamma_constraint=None,
      renorm=False,
      renorm_clipping=None,
      renorm_momentum=0.99,
      fused=None,
      trainable=True,
      virtual_batch_size=None,
      adjustment=None,
      # Post-batchnorm activation instance.
      post_activation=None,
      # quantization params
      is_quantized=True,
      **kwargs):
    super(_DepthwiseConvBatchNorm2D, self).__init__(
        kernel_size,
        strides=strides,
        padding=padding,
        depth_multiplier=depth_multiplier,
        data_format=data_format,
        use_bias=False,
        depthwise_initializer=depthwise_initializer,
        depthwise_regularizer=depthwise_regularizer,
        bias_regularizer=bias_regularizer,
        activity_regularizer=activity_regularizer,
        depthwise_constraint=depthwise_constraint,
        bias_constraint=bias_constraint,
        name=name,
        **kwargs)

    # TODO(b/187881826): conv_batchnorm should use v2 BatchNormalization layer
    self.batchnorm = tf.compat.v1.layers.BatchNormalization(
        axis=axis,
        momentum=momentum,
        epsilon=epsilon,
        center=center,
        scale=scale,
        beta_initializer=beta_initializer,
        gamma_initializer=gamma_initializer,
        moving_mean_initializer=moving_mean_initializer,
        moving_variance_initializer=moving_variance_initializer,
        beta_regularizer=beta_regularizer,
        gamma_regularizer=gamma_regularizer,
        beta_constraint=beta_constraint,
        gamma_constraint=gamma_constraint,
        renorm=renorm,
        renorm_clipping=renorm_clipping,
        renorm_momentum=renorm_momentum,
        fused=fused,
        trainable=trainable,
        virtual_batch_size=virtual_batch_size,
        adjustment=adjustment,
    )
    self.post_activation = tf.keras.activations.get(post_activation)

    self.is_quantized = is_quantized
    if self.is_quantized:
      self.weight_quantizer = default_8bit_quantizers.Default8BitConvWeightsQuantizer(
      )

      self.activation_quantizer = quantizers.MovingAverageQuantizer(
          num_bits=8, per_axis=False, symmetric=False, narrow_range=False)
Ejemplo n.º 7
0
    def testMovingAverageQuantizer(self):
        quantizer = quantizers.MovingAverageQuantizer(**self.quant_params)

        self._test_quantizer(quantizer)