def testQuantizeSave(self):
   model = QuantizeEmulate(test_utils.build_simple_dense_model(),
                           **self.params)
   model.compile(
       loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
   _, keras_file = tempfile.mkstemp('.h5')
   keras.models.save_model(model, keras_file)
Ejemplo n.º 2
0
    def testMnistAccuracyinTFLite(self):
        num_classes = 10
        train_data, test_data, input_shape = test_utils.get_preprocessed_mnist_data(
            num_classes=num_classes)
        x_train, y_train = train_data
        x_test, y_test = test_data

        def linear(x):
            return x

        l = keras.layers
        model = keras.Sequential([
            QuantizeEmulate(l.Conv2D(32, 5, padding='same', activation='relu'),
                            input_shape=input_shape,
                            **self.params),
            l.MaxPooling2D((2, 2), (2, 2), padding='same'),
            QuantizeEmulate(l.Conv2D(64, 5, padding='same', activation='relu'),
                            **self.params),
            l.MaxPooling2D((2, 2), (2, 2), padding='same'),
            l.Flatten(),
            QuantizeEmulate(l.Dense(1024, activation='relu'), **self.params),
            l.Dropout(0.4),
            # TODO(alanchiao): fuse softmax once we've handled it.
            # Once we use QuantizeAwareActivation, pre/post activation should be
            # handled. Adding dummy activation to force adding of quant operator.
            QuantizeEmulate(l.Dense(num_classes, activation=linear),
                            **self.params),
            l.Softmax(),
        ])

        model.compile(loss='categorical_crossentropy',
                      optimizer='sgd',
                      metrics=['accuracy'])

        model.fit(x_train,
                  y_train,
                  batch_size=128,
                  epochs=1,
                  validation_data=(x_test, y_test))

        tf_accuracy = model.evaluate(x_test, y_test, verbose=0)[1]

        # High enough to validate that training is happening, with significantly
        # better than 0.1 random accuracy.
        self.assertGreater(tf_accuracy, 0.4)

        _, keras_file = tempfile.mkstemp('.h5')
        _, tflite_file = tempfile.mkstemp('.h5')

        keras.models.save_model(model, keras_file)
        utils.convert_keras_to_tflite(keras_file, tflite_file,
                                      {'linear': linear})
        tflite_accuracy = test_utils.eval_mnist_tflite(tflite_file,
                                                       is_quantized=True)

        self.assertAlmostEqual(tf_accuracy, tflite_accuracy, delta=0.01)
Ejemplo n.º 3
0
  def testQuantizeEmulateSequential(self):
    model = keras.models.Sequential([
        self.conv_layer,
        self.dense_layer
    ])

    quant_model = QuantizeEmulate(model, **self.params)

    self._assert_quant_model(quant_model.layers)
  def testQuantizeSaveAndRestore(self):
    model = QuantizeEmulate(test_utils.build_simple_dense_model(),
                            **self.params)
    model.compile(
        loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])

    # Verify serialization correctness persists after training.
    model.fit(
        np.random.rand(20, 10),
        keras.utils.to_categorical(np.random.randint(5, size=(20, 1)), 5),
        batch_size=20,
    )

    _, keras_file = tempfile.mkstemp('.h5')
    keras.models.save_model(model, keras_file)
    loaded_model = keras.models.load_model(
        keras_file,
        custom_objects={'QuantizeEmulateWrapper': QuantizeEmulateWrapper})
    self._check_models_match(model, loaded_model)
Ejemplo n.º 5
0
    def testQuantizeEmulateList(self):
        quant_layers = QuantizeEmulate([self.conv_layer, self.dense_layer],
                                       QuantizationParams(8))

        self._assert_quant_model(quant_layers)
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)

l = tf.keras.layers
quant_params = {'num_bits': 8}

model = tf.keras.Sequential([
    QuantizeEmulate(l.Conv2D(32, 5, padding='same', activation='relu'),
                    input_shape=input_shape,
                    **quant_params),
    l.MaxPooling2D((2, 2), (2, 2), padding='same'),
    QuantizeEmulate(l.Conv2D(64, 5, padding='same', activation='relu'),
                    **quant_params),
    l.MaxPooling2D((2, 2), (2, 2), padding='same'),
    l.Flatten(),
    QuantizeEmulate(l.Dense(1024, activation='relu'), **quant_params),
    l.Dropout(0.4),
    QuantizeEmulate(l.Dense(num_classes), **quant_params),
    # TODO(alanchiao): fuse softmax once we've handled it.
    l.Softmax(),
])

# Dump graph to /tmp for verification on tensorboard.
graph_def = tf.get_default_graph().as_graph_def()
Ejemplo n.º 7
0
  def testQuantizeEmulateList(self):
    quant_layers = QuantizeEmulate([self.conv_layer, self.dense_layer],
                                   **self.params)

    self._assert_quant_model(quant_layers)