def test_read_nested_scopes(): x = QuantizedVariable.from_variable(get_var(3.5), quantizer=lambda x: 2 * x) evaluate(x.initializer) with context.quantized_scope(True): assert evaluate(x.read_value()) == 7 with context.quantized_scope(False): assert evaluate(x.read_value()) == 3.5 assert evaluate(x.read_value()) == 7
def test_method_delegations(distribute_scope): x = QuantizedVariable.from_variable(get_var(3.5), quantizer=lambda x: 2 * x) with context.quantized_scope(True): evaluate(x.initializer) assert evaluate(x.value()) == 7 assert evaluate(x.read_value()) == 7 assert x.trainable if version.parse(tf.__version__) > version.parse("1.14"): assert x.synchronization == x.latent_variable.synchronization assert x.aggregation == x.latent_variable.aggregation assert evaluate(x.initialized_value()) == 7 if not tf.executing_eagerly(): if not distribute_scope: # These functions are not supported for DistributedVariables x.load(4.5) assert x.eval() == 9 assert evaluate(x.initial_value) == 7 assert x.op == x.latent_variable.op assert x.graph == x.latent_variable.graph if not distribute_scope: # These attributes are not supported for DistributedVariables assert x.constraint is None assert x.initializer == x.latent_variable.initializer assert evaluate(x.assign(4)) == 8 assert evaluate(x.assign_add(1)) == 10 assert evaluate(x.assign_sub(1.5)) == 7 assert x.name == x.latent_variable.name assert x.device == x.latent_variable.device assert x.shape == () assert x.get_shape() == ()
def test_scatter_method_delegations(): x = QuantizedVariable.from_variable(get_var([3.5, 4]), quantizer=lambda x: 2 * x) evaluate(x.initializer) with context.quantized_scope(True): assert_array_equal(evaluate(x.value()), [7, 8]) def slices(val, index): return tf.IndexedSlices( values=tf.constant(val, dtype=tf.float32), indices=tf.constant(index, dtype=tf.int32), dense_shape=tf.constant([2], dtype=tf.int32), ) assert_array_equal(evaluate(x.scatter_sub(slices(0.5, 0))), [6, 8]) assert_array_equal(evaluate(x.scatter_add(slices(0.5, 0))), [7, 8]) if version.parse(tf.__version__) > version.parse("1.14"): assert_array_equal(evaluate(x.scatter_max(slices(4.5, 1))), [7, 9]) assert_array_equal(evaluate(x.scatter_min(slices(4.0, 1))), [7, 8]) assert_array_equal(evaluate(x.scatter_mul(slices(2.0, 1))), [7, 16]) assert_array_equal(evaluate(x.scatter_div(slices(2.0, 1))), [7, 8]) assert_array_equal(evaluate(x.scatter_update(slices(2, 1))), [7, 4]) assert_array_equal(evaluate(x.scatter_nd_sub([[0], [1]], [0.5, 1.0])), [6, 2]) assert_array_equal(evaluate(x.scatter_nd_add([[0], [1]], [0.5, 1.0])), [7, 4]) assert_array_equal( evaluate(x.scatter_nd_update([[0], [1]], [0.5, 1.0])), [1, 2])
def test_sparse_reads(): x = QuantizedVariable.from_variable(get_var([1.0, 2.0]), quantizer=lambda x: 2 * x) evaluate(x.initializer) assert evaluate(x.sparse_read([0])) == 1 assert evaluate(x.gather_nd([0])) == 1 with context.quantized_scope(True): assert evaluate(x.sparse_read([0])) == 2 assert evaluate(x.gather_nd([0])) == 2
def test_checkpoint(tmp_path): x = QuantizedVariable.from_variable(get_var(0.0), quantizer=lambda x: 2 * x) evaluate(x.initializer) evaluate(x.assign(123.0)) checkpoint = tf.train.Checkpoint(x=x) save_path = checkpoint.save(tmp_path) evaluate(x.assign(234.0)) checkpoint.restore(save_path).assert_consumed().run_restore_ops() assert isinstance(x, QuantizedVariable) assert evaluate(x) == 123.0 with context.quantized_scope(True): assert evaluate(x) == 123.0 * 2
def test_read(): x = QuantizedVariable.from_variable(get_var(3.5), quantizer=lambda x: 2 * x) evaluate(x.initializer) assert evaluate(x) == 3.5 assert evaluate(x.value()) == 3.5 assert evaluate(x.read_value()) == 3.5 assert evaluate(tf.identity(x)) == 3.5 with context.quantized_scope(True): assert evaluate(x) == 7 assert evaluate(x.value()) == 7 assert evaluate(x.read_value()) == 7 assert evaluate(tf.identity(x)) == 7
def test_optimizer(should_quantize): x = QuantizedVariable.from_variable(get_var(1.0), quantizer=lambda x: -x) opt = tf.keras.optimizers.SGD(1.0) def loss(): with context.quantized_scope(should_quantize): return x + 1.0 @tf.function def f(): opt.minimize(loss, var_list=[x]) f() if should_quantize: assert evaluate(x) == 2.0 with context.quantized_scope(should_quantize): assert evaluate(x) == -2.0 else: assert evaluate(x) == 0.0
def test_method_delegations(distribute_scope): x = QuantizedVariable.from_variable(get_var(3.5), quantizer=lambda x: 2 * x) with context.quantized_scope(True): evaluate(x.initializer) assert evaluate(x.value()) == 7 assert evaluate(x.read_value()) == 7 assert x.trainable if version.parse(tf.__version__) > version.parse("1.14"): assert x.synchronization == x.latent_variable.synchronization assert x.aggregation == x.latent_variable.aggregation assert evaluate(x.initialized_value()) == 7 if not tf.executing_eagerly(): if not distribute_scope: # These functions are not supported for DistributedVariables x.load(4.5) assert x.eval() == 9 assert evaluate(x.initial_value) == 7 assert x.op == x.latent_variable.op assert x.graph == x.latent_variable.graph if not distribute_scope: # These attributes are not supported for DistributedVariables assert x.constraint is None assert x.initializer == x.latent_variable.initializer def apply_and_read(x, fn, args): evaluate(fn(*args)) return evaluate(x) assert apply_and_read(x, x.assign, [4]) == 8 assert apply_and_read(x, x.assign_add, [1]) == 10 assert apply_and_read(x, x.assign_sub, [1.5]) == 7 assert x.name == x.latent_variable.name assert x.device == x.latent_variable.device assert x.shape == () assert x.get_shape() == () try: x.set_shape(()) assert x.shape == () except NotImplementedError: pass
def quantized(request): """pytest fixture for running test quantized and non-quantized""" with lq_context.quantized_scope(request.param): yield request.param
def loss(): with context.quantized_scope(should_quantize): return x + 1.0
def call(self, inputs): if self.input_quantizer: inputs = self.input_quantizer(inputs) with context.quantized_scope(True): return super().call(inputs)