예제 #1
0
    def replacement(self, match_layer: _LayerNode) -> _LayerNode:
        """See base class."""
        bottleneck_layer = match_layer.layer
        bottleneck_config = bottleneck_layer['config']
        bottleneck_config['num_bits_weight'] = self._num_bits_weight
        bottleneck_config['num_bits_activation'] = self._num_bits_activation
        bottleneck_names_and_weights = list(match_layer.names_and_weights)
        quantized_layer = self._quantized_layer_class(**bottleneck_config)
        dummy_input_shape = [1, 1, 1, 1]
        quantized_layer.compute_output_shape(dummy_input_shape)
        quantized_names_and_weights = zip(
            [weight.name for weight in quantized_layer.weights],
            quantized_layer.get_weights())
        match_idx = 0
        names_and_weights = []
        for name_and_weight in quantized_names_and_weights:
            if not self._is_quantization_weight_name(name=name_and_weight[0]):
                name_and_weight = bottleneck_names_and_weights[match_idx]
                match_idx = match_idx + 1
            names_and_weights.append(name_and_weight)

        if match_idx != len(bottleneck_names_and_weights):
            raise ValueError(
                '{}/{} of Bottleneck weights is transformed.'.format(
                    match_idx, len(bottleneck_names_and_weights)))
        quantized_layer_config = keras.layers.serialize(quantized_layer)
        quantized_layer_config['name'] = quantized_layer_config['config'][
            'name']
        layer_metadata = {
            'quantize_config':
            configs.DefaultNBitOutputQuantizeConfig(
                num_bits_weight=self._num_bits_weight,
                num_bits_activation=self._num_bits_activation)
        }

        return _LayerNode(quantized_layer_config,
                          metadata=layer_metadata,
                          names_and_weights=names_and_weights)
예제 #2
0
  def __init__(self,
               filters: int,
               strides: int,
               dilation_rate: int = 1,
               use_projection: bool = False,
               se_ratio: Optional[float] = None,
               resnetd_shortcut: bool = False,
               stochastic_depth_drop_rate: Optional[float] = None,
               kernel_initializer: str = 'VarianceScaling',
               kernel_regularizer: tf.keras.regularizers.Regularizer = None,
               bias_regularizer: tf.keras.regularizers.Regularizer = None,
               activation: str = 'relu',
               use_sync_bn: bool = False,
               norm_momentum: float = 0.99,
               norm_epsilon: float = 0.001,
               bn_trainable: bool = True,
               num_bits_weight: int = 8,
               num_bits_activation: int = 8,  # pytype: disable=annotation-type-mismatch  # typed-keras
               **kwargs):
    """Initializes a standard bottleneck block with BN after convolutions.

    Args:
      filters: An `int` number of filters for the first two convolutions. Note
        that the third and final convolution will use 4 times as many filters.
      strides: An `int` block stride. If greater than 1, this block will
        ultimately downsample the input.
      dilation_rate: An `int` dilation_rate of convolutions. Default to 1.
      use_projection: A `bool` for whether this block should use a projection
        shortcut (versus the default identity shortcut). This is usually `True`
        for the first block of a block group, which may change the number of
        filters and the resolution.
      se_ratio: A `float` or None. Ratio of the Squeeze-and-Excitation layer.
      resnetd_shortcut: A `bool`. If True, apply the resnetd style modification
        to the shortcut connection.
      stochastic_depth_drop_rate: A `float` or None. If not None, drop rate for
        the stochastic depth layer.
      kernel_initializer: A `str` of kernel_initializer for convolutional
        layers.
      kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
        Conv2D. Default to None.
      bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d.
        Default to None.
      activation: A `str` name of the activation function.
      use_sync_bn: A `bool`. If True, use synchronized batch normalization.
      norm_momentum: A `float` of normalization momentum for the moving average.
      norm_epsilon: A `float` added to variance to avoid dividing by zero.
      bn_trainable: A `bool` that indicates whether batch norm layers should be
        trainable. Default to True.
      num_bits_weight: An `int` number of bits for the weight. Default to 8.
      num_bits_activation: An `int` number of bits for the weight. Default to 8.
      **kwargs: Additional keyword arguments to be passed.
    """
    super().__init__(**kwargs)

    self._filters = filters
    self._strides = strides
    self._dilation_rate = dilation_rate
    self._use_projection = use_projection
    self._se_ratio = se_ratio
    self._resnetd_shortcut = resnetd_shortcut
    self._use_sync_bn = use_sync_bn
    self._activation = activation
    self._stochastic_depth_drop_rate = stochastic_depth_drop_rate
    self._kernel_initializer = kernel_initializer
    self._norm_momentum = norm_momentum
    self._norm_epsilon = norm_epsilon
    self._kernel_regularizer = kernel_regularizer
    self._bias_regularizer = bias_regularizer
    self._num_bits_weight = num_bits_weight
    self._num_bits_activation = num_bits_activation
    if use_sync_bn:
      self._norm = _quantize_wrapped_layer(
          tf.keras.layers.experimental.SyncBatchNormalization,
          configs.NoOpQuantizeConfig())
      self._norm_with_quantize = _quantize_wrapped_layer(
          tf.keras.layers.experimental.SyncBatchNormalization,
          configs.DefaultNBitOutputQuantizeConfig(
              num_bits_weight=self._num_bits_weight,
              num_bits_activation=self._num_bits_activation))
    else:
      self._norm = _quantize_wrapped_layer(
          tf.keras.layers.BatchNormalization,
          configs.NoOpQuantizeConfig())
      self._norm_with_quantize = _quantize_wrapped_layer(
          tf.keras.layers.BatchNormalization,
          configs.DefaultNBitOutputQuantizeConfig(
              num_bits_weight=self._num_bits_weight,
              num_bits_activation=self._num_bits_activation))
    if tf.keras.backend.image_data_format() == 'channels_last':
      self._bn_axis = -1
    else:
      self._bn_axis = 1
    self._bn_trainable = bn_trainable
예제 #3
0
  def __init__(self,
               in_filters,
               out_filters,
               expand_ratio,
               strides,
               kernel_size=3,
               se_ratio=None,
               stochastic_depth_drop_rate=None,
               kernel_initializer='VarianceScaling',
               kernel_regularizer=None,
               bias_regularizer=None,
               activation='relu',
               se_inner_activation='relu',
               se_gating_activation='sigmoid',
               expand_se_in_filters=False,
               depthwise_activation=None,
               use_sync_bn=False,
               dilation_rate=1,
               divisible_by=1,
               regularize_depthwise=False,
               use_depthwise=True,
               use_residual=True,
               norm_momentum=0.99,
               norm_epsilon=0.001,
               num_bits_weight: int = 8,
               num_bits_activation: int = 8,
               **kwargs):
    """Initializes an inverted bottleneck block with BN after convolutions.

    Args:
      in_filters: An `int` number of filters of the input tensor.
      out_filters: An `int` number of filters of the output tensor.
      expand_ratio: An `int` of expand_ratio for an inverted bottleneck block.
      strides: An `int` block stride. If greater than 1, this block will
        ultimately downsample the input.
      kernel_size: An `int` kernel_size of the depthwise conv layer.
      se_ratio: A `float` or None. If not None, se ratio for the squeeze and
        excitation layer.
      stochastic_depth_drop_rate: A `float` or None. if not None, drop rate for
        the stochastic depth layer.
      kernel_initializer: A `str` of kernel_initializer for convolutional
        layers.
      kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
        Conv2D. Default to None.
      bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d.
        Default to None.
      activation: A `str` name of the activation function.
      se_inner_activation: A `str` name of squeeze-excitation inner activation.
      se_gating_activation: A `str` name of squeeze-excitation gating
        activation.
      expand_se_in_filters: A `bool` of whether or not to expand in_filter in
        squeeze and excitation layer.
      depthwise_activation: A `str` name of the activation function for
        depthwise only.
      use_sync_bn: A `bool`. If True, use synchronized batch normalization.
      dilation_rate: An `int` that specifies the dilation rate to use for.
      divisible_by: An `int` that ensures all inner dimensions are divisible by
        this number.
      dilated convolution: An `int` to specify the same value for all spatial
        dimensions.
      regularize_depthwise: A `bool` of whether or not apply regularization on
        depthwise.
      use_depthwise: A `bool` of whether to uses fused convolutions instead of
        depthwise.
      use_residual: A `bool` of whether to include residual connection between
        input and output.
      norm_momentum: A `float` of normalization momentum for the moving average.
      norm_epsilon: A `float` added to variance to avoid dividing by zero.
      num_bits_weight: An `int` number of bits for the weight. Default to 8.
      num_bits_activation: An `int` number of bits for the weight. Default to 8.
      **kwargs: Additional keyword arguments to be passed.
    """
    super().__init__(**kwargs)

    self._in_filters = in_filters
    self._out_filters = out_filters
    self._expand_ratio = expand_ratio
    self._strides = strides
    self._kernel_size = kernel_size
    self._se_ratio = se_ratio
    self._divisible_by = divisible_by
    self._stochastic_depth_drop_rate = stochastic_depth_drop_rate
    self._dilation_rate = dilation_rate
    self._use_sync_bn = use_sync_bn
    self._regularize_depthwise = regularize_depthwise
    self._use_depthwise = use_depthwise
    self._use_residual = use_residual
    self._activation = activation
    self._se_inner_activation = se_inner_activation
    self._se_gating_activation = se_gating_activation
    self._depthwise_activation = depthwise_activation
    self._kernel_initializer = kernel_initializer
    self._norm_momentum = norm_momentum
    self._norm_epsilon = norm_epsilon
    self._kernel_regularizer = kernel_regularizer
    self._bias_regularizer = bias_regularizer
    self._expand_se_in_filters = expand_se_in_filters
    self._num_bits_weight = num_bits_weight
    self._num_bits_activation = num_bits_activation

    if use_sync_bn:
      self._norm = _quantize_wrapped_layer(
          tf.keras.layers.experimental.SyncBatchNormalization,
          configs.NoOpQuantizeConfig())
      self._norm_with_quantize = _quantize_wrapped_layer(
          tf.keras.layers.experimental.SyncBatchNormalization,
          configs.DefaultNBitOutputQuantizeConfig(
              num_bits_weight=self._num_bits_weight,
              num_bits_activation=self._num_bits_activation))
    else:
      self._norm = _quantize_wrapped_layer(
          tf.keras.layers.BatchNormalization,
          configs.NoOpQuantizeConfig())
      self._norm_with_quantize = _quantize_wrapped_layer(
          tf.keras.layers.BatchNormalization,
          configs.DefaultNBitOutputQuantizeConfig(
              num_bits_weight=self._num_bits_weight,
              num_bits_activation=self._num_bits_activation))
    if tf.keras.backend.image_data_format() == 'channels_last':
      self._bn_axis = -1
    else:
      self._bn_axis = 1
    if not depthwise_activation:
      self._depthwise_activation = activation
    if regularize_depthwise:
      self._depthsize_regularizer = kernel_regularizer
    else:
      self._depthsize_regularizer = None