def test_binarize_activations_forward(self, _seed, input_size, use_cuda): ref_input = generate_input(input_size) ref_scale, ref_threshold = generate_scale_threshold(input_size) test_input, test_scale, test_threshold = get_test_data([ref_input, ref_scale, ref_threshold], use_cuda) ref_value = ReferenceActivationBinarize.forward(ref_input, ref_scale, ref_threshold) test_value = activation_bin_scale_threshold_op(test_input, test_scale, test_threshold) check_equal(test_value, ref_value, rtol=1e-3)
def test_binarize_weights_forward(self, _seed, input_size, weight_bin_type, use_cuda): ref_input = generate_input(input_size) test_input = get_test_data([ref_input], use_cuda)[0] if weight_bin_type == "xnor": ref_value = ReferenceXNORBinarize.forward(ref_input) test_value = xnor_binarize_op(test_input) elif weight_bin_type == "dorefa": ref_value = ReferenceDOREFABinarize.forward(ref_input) test_value = dorefa_binarize_op(test_input) check_equal(test_value, ref_value, rtol=1e-3)
def test_basic_model_has_expected_params(): model = BasicConvTestModel() act_weights = model.conv.weight.data ref_weights = BasicConvTestModel.default_weight() act_bias = model.conv.bias.data ref_bias = BasicConvTestModel.default_bias() check_equal(act_bias, ref_bias) check_equal(act_weights, ref_weights) assert act_weights.nonzero().size(0) == model.nz_weights_num assert act_bias.nonzero().size(0) == model.nz_bias_num assert act_weights.numel() == model.weights_num assert act_bias.numel() == model.bias_num
def check_outputs_for_quantization_functions(test_val: torch.Tensor, ref_val: np.ndarray, is_fp16, rtol=1e-4): if is_fp16: # FP16 seems to be so inaccurate that ref and test quantization runs # will never be aligned quantum-wise - for quanta close to the # distribution's zero point even a difference in 1 quantum gives a # 100% relative error, and this is exactly what happens in FP16 for ~5% of the # input values uniformly sampled from the [-1.0; 1.0]. Therefore won't check for # tensor equality - the test passes for FP32 cases, and the kernel implementation # is exactly the same for FP16 calculations-wise. return check_equal(test_val, ref_val, rtol)
def test_binarize_activations_backward(self, _seed, input_size, use_cuda): ref_input = generate_input(input_size) ref_scale, ref_threshold = generate_scale_threshold(input_size) test_input, test_scale, test_threshold = get_test_data([ref_input, ref_scale, ref_threshold], use_cuda, is_backward=True) ref_value = ReferenceActivationBinarize.forward(ref_input, ref_scale, ref_threshold) ref_grads = ReferenceActivationBinarize.backward(np.ones(input_size), ref_input, ref_scale, ref_value) test_value = activation_bin_scale_threshold_op(test_input, test_scale, test_threshold) test_value.sum().backward() test_grads = get_grads([test_input, test_scale, test_threshold]) check_equal(test_grads, ref_grads, rtol=1e-3)
def test_two_conv_model_has_expected_params(): model = TwoConvTestModel() act_weights_1 = model.features[0][0].weight.data act_weights_2 = model.features[1][0].weight.data act_bias_1 = model.features[0][0].bias.data act_bias_2 = model.features[1][0].bias.data ref_weights_1 = BasicConvTestModel.default_weight() channel = torch.eye(3, 3).reshape([1, 1, 3, 3]) ref_weights_2 = torch.cat((channel, channel), 1) check_equal(act_weights_1, ref_weights_1) check_equal(act_weights_2, ref_weights_2) check_equal(act_bias_1, BasicConvTestModel.default_bias()) check_equal(act_bias_2, torch.tensor([0])) assert act_weights_1.nonzero().size(0) + act_weights_2.nonzero().size( 0) == model.nz_weights_num assert act_bias_1.nonzero().size(0) + act_bias_2.nonzero().size( 0) == model.nz_bias_num assert act_weights_1.numel() + act_weights_2.numel() == model.weights_num assert act_bias_1.numel() + act_bias_2.numel() == model.bias_num
def test_two_conv_model_is_valid(): model = TwoConvTestModel() input_ = torch.ones([1, 1, 4, 4]) ref_output = torch.tensor([-24]) act_output = model(input_) check_equal(act_output, ref_output)
def test_basic_model_is_valid(): model = BasicConvTestModel() input_ = torch.ones([1, 1, 4, 4]) ref_output = torch.ones((1, 2, 3, 3)) * (-4) act_output = model(input_) check_equal(act_output, ref_output)