예제 #1
0
    def testRaisesErrorModelNotBuilt(self):
        model = keras.Sequential(
            [quantize_annotate(keras.layers.Dense(10), **self.quant_params1)])

        self.assertFalse(model.built)
        with self.assertRaises(ValueError):
            quantize_emulate.quantize_apply(model)
예제 #2
0
    def testRaisesErrorNoAnnotatedLayers_Sequential(self):
        model = keras.Sequential(
            [keras.layers.Dense(10),
             keras.layers.Dropout(0.4)])

        with self.assertRaises(ValueError):
            quantize_emulate.quantize_apply(model)
예제 #3
0
    def testRaisesErrorIfKerasSubclassedModel(self):
        class MyModel(keras.Model):
            def call(self, inputs, training=None, mask=None):  # pylint: disable=g-wrong-blank-lines
                return inputs

        with self.assertRaises(ValueError):
            quantize_emulate.quantize_apply(MyModel())
예제 #4
0
    def testRaisesErrorNoAnnotatedLayers_Functional(self):
        inputs = keras.Input(shape=(10, ))
        x = keras.layers.Dense(32, activation='relu')(inputs)
        results = keras.layers.Dense(5, activation='softmax')(x)
        model = keras.Model(inputs=inputs, outputs=results)

        with self.assertRaises(ValueError):
            quantize_emulate.quantize_apply(model)
예제 #5
0
    def testQuantizesActivationsWithinLayer_Sequential(self):
        quant_params = {'num_bits': 8, 'symmetric': True}
        model = keras.Sequential([
            quantize_annotate(keras.layers.Conv2D(32, 5, activation='relu'),
                              input_shape=(28, 28, 1),
                              **quant_params)
        ])

        quantized_model = quantize_emulate.quantize_apply(model)

        # We expect activation to be modified.
        self._assert_model_emulated(model, quantized_model, ['activation'])

        conv_layer = quantized_model.layers[0].layer
        self.assertIsInstance(conv_layer.activation, QuantizeAwareActivation)
        self.assertEqual(keras.activations.get('relu'),
                         conv_layer.activation.activation)
        self.assertEqual(keras.layers.Conv2D,
                         conv_layer.activation.parent_layer)
        self.assertEqual(quant_params,
                         conv_layer.activation.get_quantize_params())
예제 #6
0
    def testQuantizesActivationsWithinLayer_Functional(self):
        quant_params = {'num_bits': 8, 'symmetric': True}

        inputs = keras.Input(shape=(28, 28, 1))
        results = quantize_annotate(
            keras.layers.Conv2D(32, 5, activation='relu'),
            **self.quant_params1)(inputs)
        model = keras.Model(inputs=inputs, outputs=results)

        quantized_model = quantize_emulate.quantize_apply(model)

        # We expect activation to be modified.
        self._assert_model_emulated(model, quantized_model, ['activation'])

        conv_layer = quantized_model.layers[1].layer
        self.assertIsInstance(conv_layer.activation, QuantizeAwareActivation)
        self.assertEqual(keras.activations.get('relu'),
                         conv_layer.activation.activation)
        self.assertEqual(keras.layers.Conv2D,
                         conv_layer.activation.parent_layer)
        self.assertEqual(quant_params,
                         conv_layer.activation.get_quantize_params())
예제 #7
0
    def testAppliesQuantizationToAnnotatedModel_Functional(self):
        model = self._get_annotated_functional_model()

        quantized_model = quantize_emulate.quantize_apply(model)

        self._assert_model_emulated(model, quantized_model)
예제 #8
0
 def testRaisesErrorIfNotKerasModel(self):
     with self.assertRaises(ValueError):
         quantize_emulate.quantize_apply(keras.layers.Dense(32))