Пример #1
0
class ScriptabilityTest(QuantizationTestCase):
    def setUp(self):
        self.model_under_test = ModForWrapping(quantized=False)
        self.qmodel_under_test = ModForWrapping(quantized=True)
        self.qmodel_under_test = self.qmodel_under_test.from_float(
            self.model_under_test)
        self.x = torch.rand(10)
        self.qx = torch.quantize_per_tensor(self.x.to(torch.float), scale=1.0,
                                            zero_point=0, dtype=torch.qint32)

    def test_scriptability_serialization(self):
        # test serialization of quantized functional modules
        b = io.BytesIO()
        torch.save(self.qmodel_under_test, b)
        b.seek(0)
        loaded = torch.load(b)
        self.assertEqual(self.qmodel_under_test.myadd.zero_point, loaded.myadd.zero_point)
        state_dict = self.qmodel_under_test.state_dict()
        self.assertTrue('myadd.zero_point' in state_dict.keys(),
                        'zero point not in state dict for functional modules')

        x = torch.rand(10, 1, dtype=torch.float)
        xq = torch.quantize_per_tensor(x, 1.0, 0, torch.qint8)
        self.checkScriptable(self.qmodel_under_test, [(xq, xq)], check_save_load=True)
        self.checkScriptable(self.model_under_test, [(xq.dequantize(), xq.dequantize())], check_save_load=True)
Пример #2
0
class ScriptabilityTest(QuantizationTestCase):
    def setUp(self):
        self.model_under_test = ModForWrapping(quantized=False)
        self.qmodel_under_test = ModForWrapping(quantized=True)
        self.qmodel_under_test = self.qmodel_under_test.from_float(
            self.model_under_test)
        self.x = torch.rand(10)
        self.qx = torch.quantize_linear(self.x.to(torch.float),
                                        scale=1.0,
                                        zero_point=0,
                                        dtype=torch.qint32)

    def test_scriptability_serialization(self):
        # test serialization of quantized functional modules
        with tempfile.TemporaryFile() as f:
            torch.save(self.qmodel_under_test, f)
            f.seek(0)
            loaded = torch.load(f)
        self.assertEqual(self.qmodel_under_test.myadd.zero_point,
                         loaded.myadd.zero_point)
        state_dict = self.qmodel_under_test.state_dict()
        assert ('myadd.zero_point' in state_dict.keys())

        x = torch.rand(10, 1, dtype=torch.float)
        xq = torch.quantize_linear(x, 1.0, 0, torch.qint8)
        self.checkScriptable(self.qmodel_under_test, [(xq, xq)],
                             check_save_load=True)
        self.checkScriptable(self.model_under_test, [(xq, xq)],
                             check_save_load=True)
Пример #3
0
 def setUp(self):
     self.model_under_test = ModForWrapping(quantized=False)
     self.qmodel_under_test = ModForWrapping(quantized=True)
     self.qmodel_under_test = self.qmodel_under_test.from_float(
         self.model_under_test)
     self.x = torch.rand(10)
     self.qx = torch.quantize_per_tensor(self.x.to(torch.float), scale=1.0,
                                         zero_point=0, dtype=torch.qint32)
Пример #4
0
class ScriptabilityTest(QuantizationTestCase):
    def setUp(self):
        self.model_under_test = ModForWrapping(quantized=False)
        self.qmodel_under_test = ModForWrapping(quantized=True)
        self.qmodel_under_test = self.qmodel_under_test.from_float(
            self.model_under_test)
        self.x = torch.rand(10)
        self.qx = torch.quantize_linear(self.x.to(torch.float),
                                        scale=1.0,
                                        zero_point=0,
                                        dtype=torch.qint32)

    def test_quantized(self):
        qtraced_model = torch.jit.trace(self.qmodel_under_test,
                                        self.qx,
                                        check_trace=False)
        self.assertEqual(qtraced_model(self.qx),
                         self.qmodel_under_test(self.qx))

        qscripted_model = torch.jit.script(self.qmodel_under_test)
        self.assertEqual(qscripted_model(self.qx),
                         self.qmodel_under_test(self.qx))

    def test_float(self):
        traced_model = torch.jit.trace(self.model_under_test,
                                       self.x,
                                       check_trace=False)
        self.assertEqual(traced_model(self.x), self.model_under_test(self.x))

        scripted_model = torch.jit.script(self.model_under_test)
        self.assertEqual(scripted_model(self.x), self.model_under_test(self.x))