def test_quantize_asymmetric_backward(self, _seed, input_size, bits, use_cuda, is_negative_range, is_weights, scale_mode): level_low, level_high, levels = self.get_range_level(bits) ref_input = generate_input(input_size) ref_input_low, ref_input_range = self.generate_range( ref_input, is_negative_range, scale_mode, is_weights) test_input, test_input_low, test_input_range = get_test_data( [ref_input, ref_input_low, ref_input_range], use_cuda, is_backward=True) range_sign = np.sign(ref_input_range) ref_input_range = abs(ref_input_range) + EPS ref_input_low, ref_input_range = ReferenceQuantizeAsymmetric.tune_range( ref_input_low, ref_input_range, levels) ref_output = ReferenceQuantizeAsymmetric.forward( ref_input, ref_input_low, ref_input_range, levels) ref_grads = ReferenceQuantizeAsymmetric.backward( np.ones(input_size), ref_input, ref_input_low, ref_input_range, ref_output, level_low, level_high, range_sign) test_value = asymmetric_quantize(test_input, levels, level_low, level_high, test_input_low, test_input_range, eps=EPS) test_value.sum().backward() test_grads = get_grads( [test_input, test_input_low, test_input_range]) check_equal(ref_grads, test_grads)
def test_quantize_symmetric_backward(self, _seed, is_signed, is_weights, input_size, bits, use_cuda, scale_mode): ref_input = generate_input(input_size) ref_scale = self.generate_scale(ref_input, scale_mode, is_weights) level_low, level_high, levels = self.get_range_level( is_signed, is_weights, bits) test_input, test_scale = get_test_data([ref_input, ref_scale], use_cuda, is_backward=True) ref_scale = abs(ref_scale) + EPS ref_input_low = ref_scale * (level_low / level_high) ref_input_range = ref_scale - ref_input_low ref_output = ReferenceQuantizeAsymmetric.forward( ref_input, ref_input_low, ref_input_range, levels) ref_grads = ReferenceQuantizeAsymmetric.backward( np.ones(input_size), ref_input, ref_input_low, ref_input_range, ref_output, level_low, level_high, True) del ref_grads[1] test_value = symmetric_quantize(test_input, levels, level_low, level_high, test_scale, EPS) test_value.sum().backward() test_grads = get_grads([test_input, test_scale]) check_equal(ref_output, test_value) check_equal(ref_grads, test_grads)
def test_binarize_activations_forward(self, _seed, input_size, use_cuda): ref_input = generate_input(input_size) ref_scale, ref_threshold = generate_scale_threshold(input_size) test_input, test_scale, test_threshold = get_test_data( [ref_input, ref_scale, ref_threshold], use_cuda) ref_value = ReferenceActivationBinarize.forward( ref_input, ref_scale, ref_threshold) test_value = activation_bin_scale_threshold_op(test_input, test_scale, test_threshold) check_equal(ref_value, test_value, rtol=1e-3)
def test_binarize_weights_forward(self, _seed, input_size, weight_bin_type, use_cuda): ref_input = generate_input(input_size) test_input = get_test_data([ref_input], use_cuda)[0] if weight_bin_type == "xnor": ref_value = ReferenceXNORBinarize.forward(ref_input) test_value = xnor_binarize_op(test_input) elif weight_bin_type == "dorefa": ref_value = ReferenceDOREFABinarize.forward(ref_input) test_value = dorefa_binarize_op(test_input) check_equal(ref_value, test_value, rtol=1e-3)
def test_binarize_activations_backward(self, _seed, input_size, use_cuda): ref_input = generate_input(input_size) ref_scale, ref_threshold = generate_scale_threshold(input_size) test_input, test_scale, test_threshold = get_test_data( [ref_input, ref_scale, ref_threshold], use_cuda, is_backward=True) ref_value = ReferenceActivationBinarize.forward( ref_input, ref_scale, ref_threshold) ref_grads = ReferenceActivationBinarize.backward( np.ones(input_size), ref_input, ref_scale, ref_value) test_value = activation_bin_scale_threshold_op(test_input, test_scale, test_threshold) test_value.sum().backward() test_grads = get_grads([test_input, test_scale, test_threshold]) check_equal(ref_grads, test_grads, rtol=1e-3)
def test_quantize_asymmetric_forward(self, _seed, input_size, bits, use_cuda, is_negative_range, is_weights, scale_mode): level_low, level_high, levels = self.get_range_level(bits) ref_input = generate_input(input_size) ref_input_low, ref_input_range = self.generate_range( ref_input, is_negative_range, scale_mode, is_weights) test_input, test_input_low, test_input_range = get_test_data( [ref_input, ref_input_low, ref_input_range], use_cuda) ref_input_range = abs(ref_input_range) + EPS ref_input_low, ref_input_range = ReferenceQuantizeAsymmetric.tune_range( ref_input_low, ref_input_range, levels) ref_value = ReferenceQuantizeAsymmetric.forward( ref_input, ref_input_low, ref_input_range, levels) test_value = asymmetric_quantize(test_input, levels, level_low, level_high, test_input_low, test_input_range, EPS) check_equal(ref_value, test_value)
def test_quantize_symmetric_forward(self, _seed, is_signed, is_weights, input_size, bits, use_cuda, scale_mode): ref_input = generate_input(input_size) ref_scale = self.generate_scale(ref_input, scale_mode, is_weights) test_input, test_scale = get_test_data([ref_input, ref_scale], use_cuda) level_low, level_high, levels = self.get_range_level( is_signed, is_weights, bits) ref_scale = abs(ref_scale) + EPS ref_input_low = ref_scale * (level_low / level_high) ref_input_range = ref_scale - ref_input_low ref_value = ReferenceQuantizeAsymmetric.forward( ref_input, ref_input_low, ref_input_range, levels) test_value = symmetric_quantize(test_input, levels, level_low, level_high, test_scale, EPS) check_equal(ref_value, test_value, rtol=1e-3)
def test_magnitude_model_has_expected_params(): model = get_magnitude_test_model((4, 4, 1)) act_weights_1 = model.layers[1].kernel.numpy() act_weights_2 = model.layers[2].kernel.numpy() act_bias_1 = model.layers[1].bias.numpy() act_bias_2 = model.layers[2].bias.numpy() sub_tensor = tf.constant([[[[10., 9.], [9., 10.]]]]) sub_tensor = tf.transpose(sub_tensor, (2, 3, 0, 1)) ref_weights_1 = tf.concat((sub_tensor, sub_tensor), 3) sub_tensor = tf.constant([[[[-9., -10., -10.], [-10., -9., -10.], [-10., -10., -9.]]]]) sub_tensor = tf.transpose(sub_tensor, (2, 3, 0, 1)) ref_weights_2 = tf.concat((sub_tensor, sub_tensor), 2) check_equal(act_weights_1, ref_weights_1) check_equal(act_weights_2, ref_weights_2) check_equal(act_bias_1, tf.constant([-2., -2])) check_equal(act_bias_2, tf.constant([0]))