Example #1
0
  def __init__(
      self,
      # DepthwiseConv2D params
      kernel_size,
      strides=(1, 1),
      padding='valid',
      depth_multiplier=1,
      data_format=None,
      depthwise_initializer='glorot_uniform',
      depthwise_regularizer=None,
      bias_regularizer=None,
      activity_regularizer=None,
      depthwise_constraint=None,
      bias_constraint=None,
      name=None,
      # BatchNormalization params
      axis=-1,
      momentum=0.99,
      epsilon=1e-3,
      center=True,
      scale=True,
      beta_initializer='zeros',
      gamma_initializer='ones',
      moving_mean_initializer='zeros',
      moving_variance_initializer='ones',
      beta_regularizer=None,
      gamma_regularizer=None,
      beta_constraint=None,
      gamma_constraint=None,
      renorm=False,
      renorm_clipping=None,
      renorm_momentum=0.99,
      fused=None,
      trainable=True,
      virtual_batch_size=None,
      adjustment=None,
      # Post-batchnorm activation instance.
      post_activation=None,
      # quantization params
      is_quantized=True,
      **kwargs):
    super(_DepthwiseConvBatchNorm2D, self).__init__(
        kernel_size,
        strides=strides,
        padding=padding,
        depth_multiplier=depth_multiplier,
        data_format=data_format,
        use_bias=False,
        depthwise_initializer=depthwise_initializer,
        depthwise_regularizer=depthwise_regularizer,
        bias_regularizer=bias_regularizer,
        activity_regularizer=activity_regularizer,
        depthwise_constraint=depthwise_constraint,
        bias_constraint=bias_constraint,
        name=name,
        **kwargs)

    # TODO(b/187881826): conv_batchnorm should use v2 BatchNormalization layer
    self.batchnorm = tf.compat.v1.layers.BatchNormalization(
        axis=axis,
        momentum=momentum,
        epsilon=epsilon,
        center=center,
        scale=scale,
        beta_initializer=beta_initializer,
        gamma_initializer=gamma_initializer,
        moving_mean_initializer=moving_mean_initializer,
        moving_variance_initializer=moving_variance_initializer,
        beta_regularizer=beta_regularizer,
        gamma_regularizer=gamma_regularizer,
        beta_constraint=beta_constraint,
        gamma_constraint=gamma_constraint,
        renorm=renorm,
        renorm_clipping=renorm_clipping,
        renorm_momentum=renorm_momentum,
        fused=fused,
        trainable=trainable,
        virtual_batch_size=virtual_batch_size,
        adjustment=adjustment,
    )
    self.post_activation = tf.keras.activations.get(post_activation)

    self.is_quantized = is_quantized
    if self.is_quantized:
      self.weight_quantizer = default_8bit_quantizers.Default8BitConvWeightsQuantizer(
      )

      self.activation_quantizer = quantizers.MovingAverageQuantizer(
          num_bits=8, per_axis=False, symmetric=False, narrow_range=False)
Example #2
0
    def __init__(self, weight_attrs, activation_attrs, quantize_output):
        super(Default8BitConvQuantizeConfig,
              self).__init__(weight_attrs, activation_attrs, quantize_output)

        self.weight_quantizer = default_8bit_quantizers.Default8BitConvWeightsQuantizer(
        )
Example #3
0
  def __init__(
      self,
      # Conv2D params
      filters,
      kernel_size,
      strides=(1, 1),
      padding='valid',
      data_format=None,
      dilation_rate=(1, 1),
      kernel_initializer='glorot_uniform',
      kernel_regularizer=None,
      bias_regularizer=None,
      activity_regularizer=None,
      kernel_constraint=None,
      bias_constraint=None,
      name=None,
      # BatchNormalization params
      axis=-1,
      momentum=0.99,
      epsilon=1e-3,
      center=True,
      scale=True,
      beta_initializer='zeros',
      gamma_initializer='ones',
      moving_mean_initializer='zeros',
      moving_variance_initializer='ones',
      beta_regularizer=None,
      gamma_regularizer=None,
      beta_constraint=None,
      gamma_constraint=None,
      renorm=False,
      renorm_clipping=None,
      renorm_momentum=0.99,
      fused=None,
      trainable=True,
      virtual_batch_size=None,
      adjustment=None,
      # Post-batchnorm activation.
      post_activation=None,
      # quantization params
      is_quantized=True,
      **kwargs):
    super(_ConvBatchNorm2D, self).__init__(
        filters,
        kernel_size,
        strides=strides,
        padding=padding,
        data_format=data_format,
        dilation_rate=dilation_rate,
        use_bias=False,
        kernel_initializer=kernel_initializer,
        kernel_regularizer=kernel_regularizer,
        bias_regularizer=bias_regularizer,
        activity_regularizer=activity_regularizer,
        kernel_constraint=kernel_constraint,
        bias_constraint=bias_constraint,
        name=name,
        **kwargs)

    self.batchnorm = normalization.BatchNormalization(
        axis=axis,
        momentum=momentum,
        epsilon=epsilon,
        center=center,
        scale=scale,
        beta_initializer=beta_initializer,
        gamma_initializer=gamma_initializer,
        moving_mean_initializer=moving_mean_initializer,
        moving_variance_initializer=moving_variance_initializer,
        beta_regularizer=beta_regularizer,
        gamma_regularizer=gamma_regularizer,
        beta_constraint=beta_constraint,
        gamma_constraint=gamma_constraint,
        renorm=renorm,
        renorm_clipping=renorm_clipping,
        renorm_momentum=renorm_momentum,
        fused=fused,
        trainable=trainable,
        virtual_batch_size=virtual_batch_size,
        adjustment=adjustment,
    )

    # Named as post_activation to not conflict with Layer self.activation.
    self.post_activation = activations.get(post_activation)

    self.is_quantized = is_quantized
    if self.is_quantized:
      self.weight_quantizer = default_8bit_quantizers.Default8BitConvWeightsQuantizer(
      )

      self.activation_quantizer = quantizers.MovingAverageQuantizer(
          num_bits=8, per_axis=False, symmetric=False, narrow_range=False)