Exemple #1
0
    def test_input_weight_equalization_equalization_scales(self):
        """ After applying the equalization functions, check if the equalization
        scales are the expected values
        """

        tests = [
            SingleLayerLinearModel, TwoLayerLinearModel,
            SingleLayerFunctionalLinearModel, TwoLayerFunctionalLinearModel
        ]

        x = torch.rand((5, 5))
        for M in tests:
            m = M().eval()
            exp_eq_scales = self.get_expected_eq_scales(m, x.detach().numpy())

            prepared = prepare_fx(
                m,
                specific_qconfig_dict,
                equalization_qconfig_dict=default_equalization_qconfig_dict)
            prepared(x)
            convert_ref = _convert_equalization_ref(prepared)
            convert_ref(x)

            counter = 0
            for node in convert_ref.graph.nodes:
                if 'equalization_scale' in node.name and node.op == 'get_attr':
                    self.assertEqual(
                        convert_ref.get_buffer(str(node.target)).reshape(-1),
                        exp_eq_scales[counter])
                    counter += 1
    def test_input_weight_equalization_activation_values(self):
        """ After applying the equalization functions check if the input
        observer's min/max values are as expected
        """

        tests = [
            SingleLayerLinearModel, TwoLayerLinearModel,
            SingleLayerFunctionalLinearModel
        ]

        x = torch.rand((5, 5))
        torch.manual_seed(0)
        for M in tests:
            m = M().eval()
            exp_eq_scales = self.get_expected_eq_scales(m, x.detach().numpy())
            exp_weights, exp_bias = self.get_expected_weights_bias(
                m,
                x.detach().numpy(), exp_eq_scales)
            exp_inp_act_vals = self.get_expected_inp_act_vals(
                m, x, exp_eq_scales, exp_weights, exp_bias)
            exp_weight_act_vals = self.get_expected_weight_act_vals(
                exp_weights)

            example_inputs = (x, )
            prepared = prepare_fx(
                m,
                specific_qconfig_dict,
                example_inputs=example_inputs,
                _equalization_config=default_equalization_qconfig_dict)
            prepared(x)
            convert_ref = _convert_equalization_ref(prepared)
            convert_ref(x)

            modules = dict(convert_ref.named_modules(remove_duplicate=False))
            inp_counter = 0
            weight_counter = 0
            for node in convert_ref.graph.nodes:
                users = list(node.users)
                if node.op == 'call_module' and isinstance(
                        modules[str(node.target)], MinMaxObserver):
                    if len(users) == 1 and users[
                            0].target == torch.nn.functional.linear and users[
                                0].args[1] == node:
                        # Check min/max values of weight activation layers
                        exp_min_val, exp_max_val = exp_weight_act_vals[
                            weight_counter]
                        self.assertEqual(modules[str(node.target)].min_val,
                                         exp_min_val)
                        self.assertEqual(modules[str(node.target)].max_val,
                                         exp_max_val)
                        weight_counter += 1
                    else:
                        # Check min/max values of input activation layers
                        exp_min_val, exp_max_val = exp_inp_act_vals[
                            inp_counter]
                        self.assertEqual(modules[str(node.target)].min_val,
                                         exp_min_val)
                        self.assertEqual(modules[str(node.target)].max_val,
                                         exp_max_val)
                        inp_counter += 1
    def test_input_weight_equalization_convert(self):
        """ Tests that the modified model for equalization (before quantization)
        returns the same output as the original model
        """

        tests = [(SingleLayerLinearModel, 2), (LinearAddModel, 2),
                 (TwoLayerLinearModel, 2),
                 (SingleLayerFunctionalLinearModel, 2),
                 (FunctionalLinearAddModel, 2),
                 (TwoLayerFunctionalLinearModel, 2), (LinearReluModel, 2),
                 (LinearReluLinearModel, 2), (LinearReluAddModel, 2),
                 (FunctionalLinearReluModel, 2),
                 (FunctionalLinearReluLinearModel, 2), (ConvModel, 4),
                 (TwoLayerConvModel, 4), (SingleLayerFunctionalConvModel, 4),
                 (TwoLayerFunctionalConvModel, 4), (ConvReluModel, 4),
                 (ConvReluConvModel, 4), (ConvReluAddModel, 4),
                 (FunctionalConvReluModel, 4),
                 (FunctionalConvReluConvModel, 4)]

        for (M, ndim) in tests:
            m = M().eval()

            if ndim == 2:
                x = torch.rand((5, 5))
            elif ndim == 4:
                x = torch.rand((16, 3, 224, 224))

            example_inputs = (x, )
            prepared = prepare_fx(
                copy.deepcopy(m),
                specific_qconfig_dict,
                example_inputs=example_inputs,
                _equalization_config=default_equalization_qconfig_dict)
            output = prepared(x)

            convert_ref = _convert_equalization_ref(prepared)
            convert_ref_output = convert_ref(x)

            prepared = prepare_fx(
                m,
                specific_qconfig_dict,
                example_inputs=example_inputs,
                _equalization_config=default_equalization_qconfig_dict)
            prepared(x)
            convert_fx(prepared)  # Check if compile
            self.assertEqual(output, convert_ref_output)
    def test_input_weight_equalization_weights_bias(self):
        """ After applying the equalization functions check if the weights and
        biases are as expected
        """

        tests = [
            SingleLayerLinearModel, TwoLayerLinearModel,
            SingleLayerFunctionalLinearModel, TwoLayerFunctionalLinearModel
        ]

        x = torch.rand((5, 5))
        for M in tests:
            m = M().eval()
            exp_eq_scales = self.get_expected_eq_scales(m, x.detach().numpy())
            exp_weights, exp_bias = self.get_expected_weights_bias(
                m,
                x.detach().numpy(), exp_eq_scales)

            example_inputs = (x, )
            prepared = prepare_fx(
                m,
                specific_qconfig_dict,
                example_inputs=example_inputs,
                _equalization_config=default_equalization_qconfig_dict)
            prepared(x)
            convert_ref = _convert_equalization_ref(prepared)
            convert_ref(x)

            modules = dict(convert_ref.named_modules(remove_duplicate=False))
            counter = 0
            for node in convert_ref.graph.nodes:
                if node.op == 'call_module' and isinstance(
                        modules[str(node.target)], nn.Linear):
                    self.assertEqual(modules[str(node.target)].weight,
                                     exp_weights[counter])
                    self.assertEqual(modules[str(node.target)].bias,
                                     exp_bias[counter])
                    counter += 1