Пример #1
0
 def test_compare_weights(self):
     r"""Compare the weights of float and quantized conv layer
     """
     # eager mode
     annotated_conv_model = AnnotatedConvModel().eval()
     quantized_annotated_conv_model = quantize(annotated_conv_model,
                                               default_eval_fn,
                                               self.img_data)
     weight_dict = compare_weights(
         annotated_conv_model.state_dict(),
         quantized_annotated_conv_model.state_dict(),
     )
     self.assertEqual(len(weight_dict), 1)
     for k, v in weight_dict.items():
         self.assertTrue(v["float"].shape == v["quantized"].shape)
Пример #2
0
    def test_compare_model_stub(self):
        r"""Compare the output of quantized conv layer and its float shadow module
        """
        def compare_and_validate_results(float_model, q_model,
                                         module_swap_list, data):
            ob_dict = compare_model_stub(float_model, q_model,
                                         module_swap_list, data, ShadowLogger)
            self.assertEqual(len(ob_dict), 1)
            for k, v in ob_dict.items():
                self.assertTrue(v["float"].shape == v["quantized"].shape)

        for qengine in supported_qengines:
            with override_quantized_engine(qengine):
                model_list = [
                    AnnotatedConvModel(qengine),
                    AnnotatedConvBnReLUModel(qengine),
                ]
                data = self.img_data[0][0]
                module_swap_list = [
                    nn.Conv2d, nn.intrinsic.modules.fused.ConvReLU2d
                ]
                for model in model_list:
                    model.eval()
                    if hasattr(model, "fuse_model"):
                        model.fuse_model()
                    q_model = quantize(model, default_eval_fn, self.img_data)
                    compare_and_validate_results(model, q_model,
                                                 module_swap_list, data)

                # Test adding stub to sub module
                model = ModelWithSubModules().eval()
                q_model = quantize(model, default_eval_fn, self.img_data)
                module_swap_list = [SubModule]
                ob_dict = compare_model_stub(model, q_model, module_swap_list,
                                             data, ShadowLogger)
                self.assertTrue(isinstance(q_model.mod1, Shadow))
                self.assertFalse(isinstance(q_model.conv, Shadow))
                for k, v in ob_dict.items():
                    torch.testing.assert_allclose(v["float"],
                                                  v["quantized"].dequantize())

                # Test adding stub to functionals
                model = ModelWithFunctionals().eval()
                model.qconfig = torch.quantization.get_default_qconfig(
                    "fbgemm")
                q_model = prepare(model, inplace=False)
                q_model(data)
                q_model = convert(q_model)
                module_swap_list = [nnq.FloatFunctional]
                ob_dict = compare_model_stub(model, q_model, module_swap_list,
                                             data, ShadowLogger)
                self.assertEqual(len(ob_dict), 6)
                self.assertTrue(isinstance(q_model.mycat, Shadow))
                self.assertTrue(isinstance(q_model.myadd, Shadow))
                self.assertTrue(isinstance(q_model.mymul, Shadow))
                self.assertTrue(isinstance(q_model.myadd_relu, Shadow))
                self.assertTrue(isinstance(q_model.my_scalar_add, Shadow))
                self.assertTrue(isinstance(q_model.my_scalar_mul, Shadow))
                for k, v in ob_dict.items():
                    self.assertTrue(v["float"].shape == v["quantized"].shape)
Пример #3
0
    def test_compare_model_outputs_conv_static(self):
        r"""Compare the output of conv layer in stataic quantized model and corresponding
        output of conv layer in float model
        """
        qengine = torch.backends.quantized.engine

        def compare_and_validate_results(float_model, q_model, data):
            act_compare_dict = compare_model_outputs(float_model, q_model,
                                                     data)
            expected_act_compare_dict_keys = {"conv.stats", "quant.stats"}

            self.assertTrue(
                act_compare_dict.keys() == expected_act_compare_dict_keys)
            for k, v in act_compare_dict.items():
                self.assertTrue(v["float"][0].shape == v["quantized"][0].shape)

        model_list = [
            AnnotatedConvModel(qengine),
            AnnotatedConvBnReLUModel(qengine)
        ]
        for model in model_list:
            model.eval()
            if hasattr(model, "fuse_model"):
                model.fuse_model()
            q_model = quantize(model, test_only_eval_fn, [self.img_data_2d])
            compare_and_validate_results(model, q_model,
                                         self.img_data_2d[0][0])
Пример #4
0
    def test_compare_model_stub_conv_static(self):
        r"""Compare the output of static quantized conv layer and its float shadow module"""

        qengine = torch.backends.quantized.engine

        def compare_and_validate_results(float_model, q_model,
                                         module_swap_list, data):
            ob_dict = compare_model_stub(float_model, q_model,
                                         module_swap_list, data)
            self.assertEqual(len(ob_dict), 1)
            for k, v in ob_dict.items():
                self.assertTrue(len(v["float"]) == len(v["quantized"]))
                for i, val in enumerate(v["quantized"]):
                    self.assertTrue(
                        v["float"][i].shape == v["quantized"][i].shape)

        model_list = [
            AnnotatedConvModel(qengine),
            AnnotatedConvTransposeModel(
                "qnnpack"),  # ConvT cannot use per channel weights
            AnnotatedConvBnReLUModel(qengine)
        ]
        module_swap_list = [
            nn.Conv2d, nn.intrinsic.modules.fused.ConvReLU2d,
            nn.ConvTranspose2d
        ]
        for model in model_list:
            model.eval()
            if hasattr(model, "fuse_model"):
                model.fuse_model()
            q_model = quantize(model, test_only_eval_fn, [self.img_data_2d])
            compare_and_validate_results(model, q_model, module_swap_list,
                                         self.img_data_2d[0][0])
Пример #5
0
    def test_compare_model_stub_conv_static(self):
        r"""Compare the output of static quantized conv layer and its float shadow module
        """

        qengine = torch.backends.quantized.engine

        def compare_and_validate_results(float_model, q_model,
                                         module_swap_list, data):
            ob_dict = compare_model_stub(float_model, q_model,
                                         module_swap_list, data, ShadowLogger)
            self.assertEqual(len(ob_dict), 1)
            for k, v in ob_dict.items():
                self.assertTrue(v["float"].shape == v["quantized"].shape)

        model_list = [
            AnnotatedConvModel(qengine),
            AnnotatedConvBnReLUModel(qengine)
        ]
        module_swap_list = [nn.Conv2d, nn.intrinsic.modules.fused.ConvReLU2d]
        for model in model_list:
            model.eval()
            if hasattr(model, "fuse_model"):
                model.fuse_model()
            q_model = quantize(model, default_eval_fn, self.img_data)
            compare_and_validate_results(model, q_model, module_swap_list,
                                         self.img_data[0][0])
Пример #6
0
    def test_compare_model_outputs(self):
        r"""Compare the output of conv layer in quantized model and corresponding
        output of conv layer in float model
        """
        def compare_and_validate_results(float_model, q_model, data):
            act_compare_dict = compare_model_outputs(float_model, q_model,
                                                     data)
            self.assertEqual(len(act_compare_dict), 2)
            expected_act_compare_dict_keys = {"conv.stats", "quant.stats"}
            self.assertTrue(
                act_compare_dict.keys() == expected_act_compare_dict_keys)
            for k, v in act_compare_dict.items():
                self.assertTrue(v["float"].shape == v["quantized"].shape)

        for qengine in supported_qengines:
            with override_quantized_engine(qengine):
                model_list = [
                    AnnotatedConvModel(qengine),
                    AnnotatedConvBnReLUModel(qengine),
                ]
                data = self.img_data[0][0]
                module_swap_list = [
                    nn.Conv2d, nn.intrinsic.modules.fused.ConvReLU2d
                ]
                for model in model_list:
                    model.eval()
                    if hasattr(model, "fuse_model"):
                        model.fuse_model()
                    q_model = quantize(model, default_eval_fn, self.img_data)
                    compare_and_validate_results(model, q_model, data)

                # Test functionals
                model = ModelWithFunctionals().eval()
                model.qconfig = torch.quantization.get_default_qconfig(
                    "fbgemm")
                q_model = prepare(model, inplace=False)
                q_model(data)
                q_model = convert(q_model)
                act_compare_dict = compare_model_outputs(model, q_model, data)
                self.assertEqual(len(act_compare_dict), 7)
                expected_act_compare_dict_keys = {
                    "mycat.stats",
                    "myadd.stats",
                    "mymul.stats",
                    "myadd_relu.stats",
                    "my_scalar_add.stats",
                    "my_scalar_mul.stats",
                    "quant.stats",
                }
                self.assertTrue(
                    act_compare_dict.keys() == expected_act_compare_dict_keys)
                for k, v in act_compare_dict.items():
                    self.assertTrue(v["float"].shape == v["quantized"].shape)
Пример #7
0
    def test_compare_weights(self):
        r"""Compare the weights of float and quantized conv layer
        """
        def compare_and_validate_results(float_model, q_model):
            weight_dict = compare_weights(float_model.state_dict(),
                                          q_model.state_dict())
            self.assertEqual(len(weight_dict), 1)
            for k, v in weight_dict.items():
                self.assertTrue(v["float"].shape == v["quantized"].shape)

        model_list = [AnnotatedConvModel(), AnnotatedConvBnReLUModel()]
        for model in model_list:
            model.eval()
            if hasattr(model, "fuse_model"):
                model.fuse_model()
            q_model = quantize(model, default_eval_fn, self.img_data)
            compare_and_validate_results(model, q_model)
Пример #8
0
    def test_compare_weights_conv_static(self):
        r"""Compare the weights of float and static quantized conv layer"""

        qengine = torch.backends.quantized.engine

        def compare_and_validate_results(float_model, q_model):
            weight_dict = compare_weights(
                float_model.state_dict(), q_model.state_dict()
            )
            self.assertEqual(len(weight_dict), 1)
            for k, v in weight_dict.items():
                self.assertTrue(v["float"].shape == v["quantized"].shape)

        model_list = [AnnotatedConvModel(qengine), AnnotatedConvBnReLUModel(qengine)]
        for model in model_list:
            model.eval()
            if hasattr(model, "fuse_model"):
                model.fuse_model()
            q_model = quantize(model, test_only_eval_fn, [self.img_data_2d])
            compare_and_validate_results(model, q_model)