コード例 #1
0
    def build(self, input_shape):
        super(QuantizeWrapper, self).build(input_shape)

        self.optimizer_step = self.add_weight(
            'optimizer_step',
            initializer=tf.keras.initializers.Constant(-1),
            dtype=tf.dtypes.int32,
            trainable=False)

        self._weight_vars = []
        for weight, quantizer in \
            self.quantize_config.get_weights_and_quantizers(self.layer):
            quantizer_vars = quantizer.build(weight.shape,
                                             self._weight_name(weight.name),
                                             self)

            self._weight_vars.append((weight, quantizer, quantizer_vars))
            # Needed to ensure unquantized weights get trained as part of the wrapper.
            self._trainable_weights.append(weight)

        self._quantize_activations = []
        for activation, quantizer in \
            self.quantize_config.get_activations_and_quantizers(self.layer):
            quantize_activation = quantize_aware_activation.QuantizeAwareActivation(
                activation, quantizer, self.optimizer_step, self)

            self._quantize_activations.append(quantize_activation)

        self._output_quantizers = self.quantize_config.get_output_quantizers(
            self.layer)
        if self._output_quantizers:
            self._output_quantizer_vars = self._output_quantizers[0].build(
                self.layer.compute_output_shape(input_shape), 'output', self)
コード例 #2
0
    def build(self, input_shape):
        # responsible for trainable self.kernel weights
        super(_ConvBatchNorm2D, self).build(input_shape)

        # resposible for trainable gamma and beta weights
        self.batchnorm.build(self.compute_output_shape(input_shape))

        if self.is_quantized:
            self._weight_min_var = self.add_variable(
                'weight_min',
                initializer=initializers.Constant(-6.0),
                trainable=False)
            self._weight_max_var = self.add_variable(
                'weight_max',
                initializer=initializers.Constant(6.0),
                trainable=False)

            self.optimizer_step = self.add_weight(
                'optimizer_step',
                initializer=initializers.Constant(-1),
                dtype=dtypes.int32,
                trainable=False)

            self.post_activation = quantize_aware_activation.QuantizeAwareActivation(
                self.post_activation, self.activation_quantizer,
                self.optimizer_step, self)
コード例 #3
0
    def build(self, input_shape):
        super(QuantizeWrapper, self).build(input_shape)

        self.optimizer_step = self.add_weight(
            'optimizer_step',
            initializer=initializers.Constant(-1),
            dtype=dtypes.int32,
            trainable=False)

        self._weight_vars = []
        for weight, quantizer in \
            self.quantize_provider.get_weights_and_quantizers(self.layer):
            min_var, max_var = self._add_range_weights(
                self._weight_name(weight.name))

            self._weight_vars.append((weight, quantizer, min_var, max_var))
            # Needed to ensure unquantized weights get trained as part of the wrapper.
            self._trainable_weights.append(weight)

        self._quantize_activations = []
        for activation, quantizer in \
            self.quantize_provider.get_activations_and_quantizers(self.layer):
            quantize_activation = quantize_aware_activation.QuantizeAwareActivation(
                activation, quantizer, self.optimizer_step, self)

            self._quantize_activations.append(quantize_activation)

        self._output_quantizers = self.quantize_provider.get_output_quantizers(
            self.layer)
        if self._output_quantizers:
            self._output_min_max = self._add_range_weights('output')
コード例 #4
0
 def _quantize_activation(activation, parent_class, quantize_params):
   try:
     return quantize_aware_activation.QuantizeAwareActivation(
         activation.__name__, parent_class, **quantize_params)
   except TypeError:
     # Non-standard activation. Could be a custom callable, or an advanced
     # activation. Simply return the original activation for now.
     # TODO(pulkitb): Determine how to handle custom activations and advanced
     # activations.
     return activation