def test_binarization(self, quantized_layer, layer, input_shape, kwargs, keras_should_run_eagerly): input_data = testing_utils.random_input(input_shape) random_weight = np.random.random() - 0.5 with lq.metrics.scope(["flip_ratio"]): quant_output = testing_utils.layer_test( quantized_layer, kwargs=dict( **kwargs, kernel_quantizer="ste_sign", input_quantizer="ste_sign", kernel_initializer=tf.keras.initializers.constant( random_weight), ), input_data=input_data, should_run_eagerly=keras_should_run_eagerly, ) fp_model = tf.keras.models.Sequential([ layer( **kwargs, kernel_initializer=tf.keras.initializers.constant( np.sign(random_weight)), input_shape=input_shape[1:], ) ]) np.testing.assert_allclose(quant_output, fp_model.predict(np.sign(input_data)))
def test_depthwise_layers(self, keras_should_run_eagerly): input_data = testing_utils.random_input((2, 3, 7, 6)) random_weight = np.random.random() - 0.5 with lq.metrics.scope(["flip_ratio"]): quant_output = testing_utils.layer_test( lq.layers.QuantDepthwiseConv2D, kwargs=dict( kernel_size=3, depthwise_quantizer="ste_sign", input_quantizer="ste_sign", depthwise_initializer=tf.keras.initializers.constant( random_weight), ), input_data=input_data, should_run_eagerly=keras_should_run_eagerly, ) fp_model = tf.keras.models.Sequential([ tf.keras.layers.DepthwiseConv2D( kernel_size=3, depthwise_initializer=tf.keras.initializers.constant( np.sign(random_weight)), input_shape=input_data.shape[1:], ) ]) np.testing.assert_allclose(quant_output, fp_model.predict(np.sign(input_data)))
def test_layer_as_quantizer(self, quantizer, keras_should_run_eagerly): """Test whether a keras.layers.Layer can be used as quantizer.""" input_data = testing_utils.random_input((1, 10)) model = tf.keras.Sequential( [lq.layers.QuantDense(1, **{quantizer: DummyTrainableQuantizer()})] ) model.compile(optimizer="sgd", loss="mse", run_eagerly=keras_should_run_eagerly) model.fit(input_data, np.ones((1,)), epochs=1) assert any(["dummy_weight" in var.name for var in model.trainable_variables])
def test_separable_layers( self, quantized_layer, layer, input_shape, keras_should_run_eagerly ): input_data = testing_utils.random_input(input_shape) random_d_kernel = np.random.random() - 0.5 random_p_kernel = np.random.random() - 0.5 with lq.context.metrics_scope(["flip_ratio"]): quant_output = testing_utils.layer_test( quantized_layer, kwargs=dict( filters=3, kernel_size=3, depthwise_quantizer="ste_sign", pointwise_quantizer="ste_sign", input_quantizer="ste_sign", depthwise_initializer=tf.keras.initializers.constant( random_d_kernel ), pointwise_initializer=tf.keras.initializers.constant( random_p_kernel ), ), input_data=input_data, should_run_eagerly=keras_should_run_eagerly, ) fp_model = tf.keras.models.Sequential( [ layer( filters=3, kernel_size=3, depthwise_initializer=tf.keras.initializers.constant( np.sign(random_d_kernel) ), pointwise_initializer=tf.keras.initializers.constant( np.sign(random_p_kernel) ), input_shape=input_shape[1:], ) ] ) np.testing.assert_allclose(quant_output, fp_model.predict(np.sign(input_data)))