Beispiel #1
0
    def test_input_weight_equalization_weights_bias(self):
        """ After applying the equalization functions check if the weights and
        biases are as expected
        """

        tests = [SingleLayerLinearModel, TwoLayerLinearModel,
                 SingleLayerFunctionalLinearModel, TwoLayerFunctionalLinearModel]

        x = torch.rand((5, 5))
        for M in tests:
            m = M().eval()
            exp_eq_scales = self.get_expected_eq_scales(m, x.detach().numpy())
            exp_weights, exp_bias = self.get_expected_weights_bias(m, x.detach().numpy(), exp_eq_scales)

            prepared = prepare_fx(m, specific_qconfig_dict, equalization_qconfig_dict=default_equalization_qconfig_dict)
            prepared(x)
            convert_ref = _convert_equalization_ref(prepared)
            convert_ref(x)

            modules = dict(convert_ref.named_modules(remove_duplicate=False))
            counter = 0
            for node in convert_ref.graph.nodes:
                if node.op == 'call_module' and isinstance(modules[str(node.target)], nn.Linear):
                    self.assertEqual(modules[str(node.target)].weight, exp_weights[counter])
                    self.assertEqual(modules[str(node.target)].bias, exp_bias[counter])
                    counter += 1
Beispiel #2
0
    def test_input_weight_equalization_equalization_scales(self):
        """ After applying the equalization functions, check if the equalization
        scales are the expected values
        """

        tests = [
            SingleLayerLinearModel, TwoLayerLinearModel,
            SingleLayerFunctionalLinearModel, TwoLayerFunctionalLinearModel
        ]

        x = torch.rand((5, 5))
        for M in tests:
            m = M().eval()
            exp_eq_scales = self.get_expected_eq_scales(m, x.detach().numpy())

            prepared = prepare_fx(
                m,
                qconfig_dict,
                equalization_qconfig_dict=default_equalization_qconfig_dict)
            prepared(x)
            convert_ref = _convert_equalization_ref(prepared)
            convert_ref(x)

            counter = 0
            for node in convert_ref.graph.nodes:
                if 'equalization_scale' in node.name and node.op == 'get_attr':
                    self.assertEqual(convert_ref.get_buffer(str(node.target)),
                                     exp_eq_scales[counter])
                    counter += 1
Beispiel #3
0
    def test_input_weight_equalization_convert(self):
        """ Tests that the modified model for equalization (before quantization)
        returns the same output as the original model
        """

        tests = [
            SingleLayerLinearModel, LinearAddModel, TwoLayerLinearModel,
            SingleLayerFunctionalLinearModel, FunctionalLinearAddModel,
            TwoLayerFunctionalLinearModel
        ]

        x = torch.rand((5, 5))
        for M in tests:
            m = M().eval()
            prepared = prepare_fx(
                copy.deepcopy(m),
                qconfig_dict,
                equalization_qconfig_dict=default_equalization_qconfig_dict)
            output = prepared(x)

            convert_ref = _convert_equalization_ref(prepared)
            convert_ref_output = convert_ref(x)

            prepared = prepare_fx(
                m,
                qconfig_dict,
                equalization_qconfig_dict=default_equalization_qconfig_dict)
            prepared(x)
            convert_fx(prepared)  # Check if compile
            self.assertEqual(output, convert_ref_output)
Beispiel #4
0
    def test_input_weight_equalization_activation_values(self):
        """ After applying the equalization functions check if the input
        observer's min/max values are as expected
        """

        tests = [
            SingleLayerLinearModel, TwoLayerLinearModel,
            SingleLayerFunctionalLinearModel
        ]

        x = torch.rand((5, 5))
        torch.manual_seed(0)
        for M in tests:
            m = M().eval()
            exp_eq_scales = self.get_expected_eq_scales(m, x.detach().numpy())
            exp_weights, exp_bias = self.get_expected_weights_bias(
                m,
                x.detach().numpy(), exp_eq_scales)
            exp_inp_act_vals = self.get_expected_inp_act_vals(
                m, x, exp_eq_scales, exp_weights, exp_bias)
            exp_weight_act_vals = self.get_expected_weight_act_vals(
                exp_weights)

            prepared = prepare_fx(
                m,
                qconfig_dict,
                equalization_qconfig_dict=default_equalization_qconfig_dict)
            prepared(x)
            convert_ref = _convert_equalization_ref(prepared)
            convert_ref(x)

            modules = dict(convert_ref.named_modules(remove_duplicate=False))
            inp_counter = 0
            weight_counter = 0
            for node in convert_ref.graph.nodes:
                if "weight" not in node.name and node.op == 'call_module' and \
                   isinstance(modules[str(node.target)], MinMaxObserver):
                    # Check min/max values of input activation layers
                    exp_min_val, exp_max_val = exp_inp_act_vals[inp_counter]
                    self.assertEqual(modules[str(node.target)].min_val,
                                     exp_min_val)
                    self.assertEqual(modules[str(node.target)].max_val,
                                     exp_max_val)
                    inp_counter += 1

                elif node.op == 'call_module' and isinstance(
                        modules[str(node.target)], MinMaxObserver):
                    # Check min/max values of weight activation layers
                    assert ("weight" in node.name)
                    exp_min_val, exp_max_val = exp_weight_act_vals[
                        weight_counter]
                    self.assertEqual(modules[str(node.target)].min_val,
                                     exp_min_val)
                    self.assertEqual(modules[str(node.target)].max_val,
                                     exp_max_val)
                    weight_counter += 1
    def test_input_weight_equalization_convert(self):
        """ Tests that the modified model for equalization (before quantization)
        returns the same output as the original model
        """

        tests = [(SingleLayerLinearModel, 2), (LinearAddModel, 2),
                 (TwoLayerLinearModel, 2),
                 (SingleLayerFunctionalLinearModel, 2),
                 (FunctionalLinearAddModel, 2),
                 (TwoLayerFunctionalLinearModel, 2), (LinearReluModel, 2),
                 (LinearReluLinearModel, 2), (LinearReluAddModel, 2),
                 (FunctionalLinearReluModel, 2),
                 (FunctionalLinearReluLinearModel, 2), (ConvModel, 4),
                 (TwoLayerConvModel, 4), (SingleLayerFunctionalConvModel, 4),
                 (TwoLayerFunctionalConvModel, 4), (ConvReluModel, 4),
                 (ConvReluConvModel, 4), (ConvReluAddModel, 4),
                 (FunctionalConvReluModel, 4),
                 (FunctionalConvReluConvModel, 4)]

        for (M, ndim) in tests:
            m = M().eval()

            if ndim == 2:
                x = torch.rand((5, 5))
            elif ndim == 4:
                x = torch.rand((16, 3, 224, 224))

            prepared = prepare_fx(
                copy.deepcopy(m),
                qconfig_dict,
                equalization_qconfig_dict=default_equalization_qconfig_dict)
            output = prepared(x)

            convert_ref = _convert_equalization_ref(prepared)
            convert_ref_output = convert_ref(x)

            prepared = prepare_fx(
                m,
                qconfig_dict,
                equalization_qconfig_dict=default_equalization_qconfig_dict)
            prepared(x)
            convert_fx(prepared)  # Check if compile
            self.assertEqual(output, convert_ref_output)
Beispiel #6
0
    def test_input_weight_equalization_convert(self):
        """
        """
        qconfig_dict = {
            "":
            None,
            "object_type": [(nn.Linear, default_qconfig),
                            (nn.functional.linear, default_qconfig)]
        }

        default_equalization_qconfig_dict = {
            "":
            None,
            "object_type":
            [(nn.Linear, default_equalization_qconfig),
             (nn.functional.linear, default_equalization_qconfig)]
        }

        # Basic test with one linear layer
        class LinearModule(nn.Module):
            def __init__(self):
                super().__init__()
                self.linear = nn.Linear(2, 2)

            def forward(self, x):
                return self.linear(x)

        # Test with two linear layer with a fp32 operation between
        class Linear2FP32Module(nn.Module):
            def __init__(self):
                super().__init__()
                self.linear1 = nn.Linear(2, 2)
                self.linear2 = nn.Linear(2, 2)

            def forward(self, x):
                x = self.linear1(x)
                x = torch.add(x, torch.tensor([1, 2]))
                x = self.linear2(x)
                return x

        tests = [(LinearModule, default_equalization_qconfig_dict),
                 (Linear2FP32Module, default_equalization_qconfig_dict)]

        for (M, equalization_qconfig_dict) in tests:
            m = M().eval()
            x = torch.tensor([[1.0, 2.0], [2.0, 2.5], [4.5, 6.0]])
            prepared = prepare_fx(
                m,
                qconfig_dict,
                equalization_qconfig_dict=equalization_qconfig_dict)
            output = prepared(x)

            convert_ref = _convert_equalization_ref(prepared)
            convert_ref_output = convert_ref(x)

            m = M().eval()
            prepared = prepare_fx(
                m,
                qconfig_dict,
                equalization_qconfig_dict=equalization_qconfig_dict)
            prepared(x)
            convert_fx(prepared)  # Check if compile?

            self.assertEqual(output, convert_ref_output)