コード例 #1
0
  def testCustomWeightQuantizers_Run(self, quantizer_type):
    init_params = self._get_quant_params(quantizer_type)

    # Additional test that same quantizer object can be shared
    # between Configs, though we don't expicitly promote this
    # anywhere in the documentation.
    quantizer = quantizer_type(**init_params)

    class DenseQuantizeConfig(QuantizeConfig):
      """Custom QuantizeConfig for Dense layer."""

      def get_weights_and_quantizers(self, layer):
        return [(layer.kernel, quantizer)]

      def get_activations_and_quantizers(self, layer):
        # Defaults.
        return [(layer.activation,
                 MovingAverageQuantizer(
                     num_bits=8,
                     per_axis=False,
                     symmetric=False,
                     narrow_range=False))]

      def set_quantize_weights(self, layer, quantize_weights):
        layer.kernel = quantize_weights[0]

      def set_quantize_activations(self, layer, quantize_activations):
        return

      def get_output_quantizers(self, layer):
        return []

      def get_config(self):
        return {}

    annotated_model = tf.keras.Sequential([
        quantize.quantize_annotate_layer(
            l.Dense(8, input_shape=(10,)), DenseQuantizeConfig()),
        quantize.quantize_annotate_layer(
            l.Dense(5), DenseQuantizeConfig())
    ])

    with quantize.quantize_scope(
        {'DenseQuantizeConfig': DenseQuantizeConfig}):
      quant_model = quantize.quantize_apply(annotated_model)

    # Check no error happens.
    self._train_model(quant_model)
コード例 #2
0
    def add_quantize_annotation(layer):
        # create new layer to break link with old model
        layer = layer.__class__.from_config(layer.get_config())

        quantization_map = [
            # tf.keras.layers.Dense,
            tf.keras.layers.Conv2D
            # tf.keras.layers.Input: BFPInputQuantizerConfig()
        ]

        for layer_type in quantization_map:

            if isinstance(layer, layer_type):
                quantize_config = SLGQuantizeConfig()

                log.info(
                    "**SLG annotation added to layer {} of type {} with {}".
                    format(layer.name, layer_type, quantize_config))

                quantized_layer = quantize_annotate_layer(
                    to_annotate=layer, quantize_config=quantize_config)
                return quantized_layer
        log.info("**SLG annotation not added to layer {} of type {}".format(
            layer.name, type(layer)))

        return layer
コード例 #3
0
    def add_quantize_annotation(layer):
        # create new layer to break link with old model
        try:
            layer = layer.__class__.from_config(layer.get_config())
        except:
            pass

        for layer_type in quantization_map:

            if isinstance(layer, layer_type):

                if isinstance(pruning_policy, float) or pruning_policy is None:
                    layer_pruning = pruning_policy
                elif isinstance(pruning_policy, dict):
                    layer_pruning = pruning_policy[layer.name]
                else:
                    raise ValueError("Illegal layer pruning policy {}".format(
                        pruning_policy))

                quantize_config = BFPQuantizeConfig(
                    pruning_policy=layer_pruning)

                log.info(
                    "**Quantization annotation added to layer {} of type {} with {}"
                    .format(layer.name, layer_type, quantize_config))

                quantized_layer = quantize_annotate_layer(
                    to_annotate=layer, quantize_config=quantize_config)
                return quantized_layer
        log.info("**Quantization annotation not added to layer {} of type {}".
                 format(layer.name, type(layer)))

        return layer
コード例 #4
0
 def add_sparsity_annotation(layer):
     quantize_config = SparsityMeter()
     log.info(
         "**Sparsity Measure annotation added to layer {} with {}".format(
             layer.name, quantize_config))
     quantized_layer = quantize_annotate_layer(
         to_annotate=layer, quantize_config=quantize_config)
     return quantized_layer
コード例 #5
0
        def add_quantize_annotation(layer):
            # create new layer to break link with old model
            try:
                layer = layer.__class__.from_config(layer.get_config())
            except:
                pass

            for layer_type in quantization_map:

                if isinstance(
                        layer, layer_type
                ):  # and not (tf.keras.activations.get("softmax") == layer.activation):

                    if isinstance(pruning_policy,
                                  float) or pruning_policy is None:
                        layer_pruning = pruning_policy
                    elif isinstance(pruning_policy, dict):
                        layer_pruning = pruning_policy[layer.name]
                    else:
                        raise ValueError(
                            "Illegal layer pruning policy {}".format(
                                pruning_policy))

                    quantize_config = BFPQuantizeConfig(
                        pruning_policy=layer_pruning)

                    log.info(
                        "**Quantization annotation added to layer {} of type {} with {}"
                        .format(layer.name, layer_type, quantize_config))

                    quantized_layer = quantize_annotate_layer(
                        to_annotate=layer, quantize_config=quantize_config)
                    return quantized_layer

            # try:
            #     if tf.keras.activations.get("softmax") == layer.activation:
            #         log.info("Quantization not added for Classification layer with Softmax")
            # except AttributeError:
            #     pass

            log.info(
                "**Quantization annotation not added to layer {} of type {}".
                format(layer.name, type(layer)))

            return layer
コード例 #6
0
    def add_quantize_annotation(layer):
        kernelization_map = [
            # tf.keras.layers.Dense,
            tf.keras.layers.Conv2D
        ]

        for layer_type in kernelization_map:
            if isinstance(layer, layer_type):
                quantize_config = SLCQuantizeConfig()

                log.info(
                    "**Kernelization annotation added to layer {} of type {} with {}".format(layer.name, layer_type,
                                                                                             quantize_config))

                quantized_layer = quantize_annotate_layer(to_annotate=layer, quantize_config=quantize_config)
                return quantized_layer
        log.info("**Kernelization annotation not added to layer {} of type {}".format(layer.name, type(layer)))

        return layer
コード例 #7
0
ファイル: Quantizer.py プロジェクト: M2theJJ/NN
    def add_quantize_annotation(layer):
        # create new layer to break link with old model
        layer = layer.__class__.from_config(layer.get_config())

        quantization_map = {
            tf.keras.layers.Dense: BFPQuantizeConfig(),
            tf.keras.layers.Conv2D: BFPQuantizeConfig()
        }

        for layer_type, quantize_config in quantization_map.items():
            if isinstance(layer, layer_type):
                print(
                    "**Quantization annotation added to layer {} of type {} with {}"
                    .format(layer.name, layer_type, quantize_config))

                quantized_layer = quantize_annotate_layer(
                    to_annotate=layer, quantize_config=quantize_config)
                return quantized_layer
        print("**Quantization annotation not added to layer {} of type {}".
              format(layer.name, type(layer)))

        return layer
コード例 #8
0
ファイル: mnist_cnn.py プロジェクト: Crissal1995/IPCV
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)

l = tf.keras.layers

model = tf.keras.Sequential([
    quantize.quantize_annotate_layer(l.Conv2D(32,
                                              5,
                                              padding='same',
                                              activation='relu'),
                                     input_shape=input_shape),
    l.MaxPooling2D((2, 2), (2, 2), padding='same'),
    quantize.quantize_annotate_layer(
        l.Conv2D(64, 5, padding='same', activation='relu')),
    l.MaxPooling2D((2, 2), (2, 2), padding='same'),
    l.Flatten(),
    quantize.quantize_annotate_layer(l.Dense(1024, activation='relu')),
    l.Dropout(0.4),
    quantize.quantize_annotate_layer(l.Dense(num_classes)),
    # TODO(alanchiao): fuse softmax once we've handled it.
    l.Softmax(),
])

model = quantize.quantize_apply(model)
コード例 #9
0
    def testQuantizeAnnotateLayer_FailsWithModel(self):
        model = keras_test_utils.build_simple_dense_model()

        with self.assertRaises(ValueError):
            quantize.quantize_annotate_layer(model)
コード例 #10
0
print(x_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)

l = tf.keras.layers

keras_file = '/tmp/quantized_mnist.h5'
if not os.path.exists(keras_file):
    model = tf.keras.Sequential([
        # Only the fisrt layer is quantized trained.
        # The rest of the layers are not quantization-aware.
        quantize.quantize_annotate_layer(
            l.Conv2D(32,
                     5,
                     padding='same',
                     activation='relu',
                     input_shape=input_shape)),
        l.MaxPooling2D((2, 2), (2, 2), padding='same'),
        l.Conv2D(64, 5, padding='same', activation='relu'),
        l.BatchNormalization(),
        l.MaxPooling2D((2, 2), (2, 2), padding='same'),
        l.Flatten(),
        l.Dense(1024, activation='relu'),
        l.Dropout(0.4),
        l.Dense(num_classes),
        l.Softmax(),
    ])
    model = quantize.quantize_apply(model)
    model.compile(loss=tf.keras.losses.categorical_crossentropy,
                  optimizer=tf.keras.optimizers.Adadelta(),
 def apply_quantization_to_dense(layer):
     if isinstance(layer, tf.keras.layers.Dense):
         if layer.name == 'qat':
             return quantize.quantize_annotate_layer(layer)
     return layer
コード例 #12
0
ファイル: dropin_base.py プロジェクト: altostratous/dnnfault
 def apply_quantization_to_dense(layer):
     if isinstance(layer, tf.keras.layers.Dense) or isinstance(
             layer, tf.keras.layers.Conv2D):
         return quantize_annotate_layer(layer)
     return layer