예제 #1
0
    def test_find_and_run_method(self):
        class MyTestModule(torch.nn.Module):
            def forward(self, arg):
                return arg

        input = (torch.tensor([1]), )

        script_module = torch.jit.script(MyTestModule())
        script_module_result = script_module(*input)

        buffer = io.BytesIO(
            script_module._save_to_buffer_for_lite_interpreter())
        buffer.seek(0)
        mobile_module = _load_for_lite_interpreter(buffer)

        has_bundled_inputs = mobile_module.find_method(
            "get_all_bundled_inputs")
        self.assertFalse(has_bundled_inputs)

        torch.utils.bundled_inputs.augment_model_with_bundled_inputs(
            script_module, [input], [])

        buffer = io.BytesIO(
            script_module._save_to_buffer_for_lite_interpreter())
        buffer.seek(0)
        mobile_module = _load_for_lite_interpreter(buffer)

        has_bundled_inputs = mobile_module.find_method(
            "get_all_bundled_inputs")
        self.assertTrue(has_bundled_inputs)

        bundled_inputs = mobile_module.run_method("get_all_bundled_inputs")
        mobile_module_result = mobile_module.forward(*bundled_inputs[0])
        torch.testing.assert_close(script_module_result, mobile_module_result)
    def test_versioned_div_scalar_inplace(self, sample_input):
        def historic_div_scalar_float_inplace(self, other: float):
            return self.true_divide_(other)

        def historic_div_scalar_int_inplace(self, other: int):
            if self.is_floating_point():
                return self.true_divide_(other)

            return self.divide_(other, rounding_mode='trunc')

        class MyModuleFloat(torch.nn.Module):
            def __init__(self):
                super(MyModuleFloat, self).__init__()

            def forward(self, a, b: float):
                a /= b
                return a

        class MyModuleInt(torch.nn.Module):
            def __init__(self):
                super(MyModuleInt, self).__init__()

            def forward(self, a, b: int):
                a /= b
                return a

        try:
            v3_mobile_module_float = _load_for_lite_interpreter(
                pytorch_test_dir +
                "/cpp/jit/upgrader_models/test_versioned_div_scalar_inplace_float_v2.ptl"
            )
            v3_mobile_module_int = _load_for_lite_interpreter(
                pytorch_test_dir +
                "/cpp/jit/upgrader_models/test_versioned_div_scalar_inplace_int_v2.ptl"
            )
        except Exception as e:
            self.skipTest("Failed to load fixture!")

        current_mobile_module_float = self._save_load_module(MyModuleFloat)
        current_mobile_module_int = self._save_load_module(MyModuleInt)

        for val_a, val_b in product(sample_input, sample_input):
            a = torch.tensor((val_a, ))
            b = val_b

            def _helper(m, fn):
                m_result = self._try_fn(m, a, b)
                fn_result = self._try_fn(fn, a, b)

                if isinstance(m_result, Exception):
                    self.assertTrue(fn_result, Exception)
                else:
                    self.assertEqual(m_result, fn_result)

            if isinstance(b, float):
                _helper(current_mobile_module_float, torch.Tensor.div_)
            else:
                _helper(current_mobile_module_int, torch.Tensor.div_)
예제 #3
0
    def getScriptExportImportCopy(self, m, save_mobile_debug_info=True, also_test_file=False):
        m_scripted = torch.jit.script(m)

        if not also_test_file:
            buffer = io.BytesIO(m_scripted._save_to_buffer_for_lite_interpreter(_save_mobile_debug_info=save_mobile_debug_info))
            buffer.seek(0)
            mobile_module = _load_for_lite_interpreter(buffer)
            return mobile_module

        with TemporaryFileName() as fname:
            m_scripted._save_for_lite_interpreter(fname, _save_mobile_debug_info=save_mobile_debug_info)
            mobile_module = _load_for_lite_interpreter(fname)
            return mobile_module
예제 #4
0
    def test_nest_typing_namedtuple_custom_classtype(self):
        class Baz(NamedTuple):
            di: torch.Tensor

        class Foo(NamedTuple):
            id: torch.Tensor
            baz: Baz

        class Bar(torch.nn.Module):
            def __init__(self):
                super(Bar, self).__init__()
                self.foo = Foo(torch.tensor(1), Baz(torch.tensor(1)))

            def forward(self, a: torch.Tensor):
                self.foo = Foo(a, Baz(torch.tensor(1)))
                return self.foo

        sample_input = torch.tensor(5)
        script_module = torch.jit.script(Bar())
        script_module_result = script_module(sample_input)

        buffer_mobile = io.BytesIO(
            script_module._save_to_buffer_for_lite_interpreter())
        buffer_mobile.seek(0)
        mobile_module = _load_for_lite_interpreter(buffer_mobile)
        mobile_module_result = mobile_module(sample_input)
        torch.testing.assert_allclose(script_module_result.baz.di,
                                      mobile_module_result.baz.di)
예제 #5
0
    def test_typing_dict_with_namedtuple(self):
        class Foo(NamedTuple):
            id: torch.Tensor

        class Bar(torch.nn.Module):
            def __init__(self):
                super(Bar, self).__init__()
                self.foo = Foo(torch.tensor(1))

            def forward(self, a: torch.Tensor):
                self.foo = Foo(a)
                re: Dict[str, Foo] = dict()
                re["test"] = Foo(a)
                return self.foo, re["test"]

        # The corresponding bytecode is
        # (8,
        #  ('__torch__.___torch_mangle_2.Bar.forward',
        #   (('instructions',
        #     (('STOREN', 1, 2),
        #      ('DROPR', 1, 0),
        #      ('DICT_CONSTRUCT', 0, 0),
        #      ('STORE', 3, 0),
        #      ('LOAD', 3, 0),
        #      ('LOADC', 1, 0),
        #      ('MOVE', 2, 0),
        #      ('NAMED_TUPLE_CONSTRUCT', 1, 1),
        #      ('OP', 0, 0),
        #      ('MOVE', 3, 0),
        #      ('LOADC', 1, 0),
        #      ('DICT_INDEX', 0, 0),
        #      ('LOADC', 0, 0),
        #      ('TUPLE_INDEX', 0, 0),
        #      ('RET', 0, 0))),
        #    ('operators', (('aten::_set_item', 'str', 3),)),
        #    ('constants', (0, 'test')),
        #    ('types',
        #     ('Dict[str,__torch__.Foo[NamedTuple, [[id, Tensor]]]]',
        #      '__torch__.Foo[NamedTuple, [[id, Tensor]]]')),
        #    ('register_size', 3)),
        #   (('arguments',
        #     ((('name', 'self'),
        #       ('type', '__torch__.___torch_mangle_2.Bar'),
        #       ('default_value', None)),
        #      (('name', 'a'), ('type', 'Tensor'), ('default_value', None)))),
        #    ('returns',
        #     ((('name', ''), ('type', 'Tensor'), ('default_value', None)),)))))

        sample_input = torch.tensor(5)
        script_module = torch.jit.script(Bar())

        script_module_result = script_module(sample_input)

        buffer_mobile = io.BytesIO(
            script_module._save_to_buffer_for_lite_interpreter())
        buffer_mobile.seek(0)
        mobile_module = _load_for_lite_interpreter(buffer_mobile)
        mobile_module_result = mobile_module(sample_input)
        torch.testing.assert_allclose(script_module_result,
                                      mobile_module_result)
예제 #6
0
 def setUp(self):
     super().setUp()
     # Create Python and JIT versions of CompModule with lowered submodules.
     compile_spec = {
         "forward": {
             "input_shapes": "((1, 1, 320, 240), (1, 3))",
             "some_other_option": "True",
         },
     }
     lowered_add = torch._C._jit_to_backend(
         "backend_with_compiler_demo", torch.jit.script(BasicModuleAdd()),
         compile_spec)
     lowered_sub = torch._C._jit_to_backend(
         "backend_with_compiler_demo",
         torch.jit.script(CompModuleTestWithCompiler.BasicModuleSub()),
         {"forward": {
             "": ""
         }})
     self.module = CompModuleTestWithCompiler.CompModule(
         lowered_add, lowered_sub)
     self.scripted_module = torch.jit.script(
         CompModuleTestWithCompiler.CompModule(lowered_add, lowered_sub))
     # No backend version of CompModule currently, so this is filler.
     self.lowered_module = self.scripted_module
     # Create a mobile version of CompModule from JIT version
     buffer = io.BytesIO(
         self.scripted_module._save_to_buffer_for_lite_interpreter())
     buffer.seek(0)
     self.mobile_module = _load_for_lite_interpreter(buffer)
예제 #7
0
    def _compare_script_and_mobile(self, model: torch.nn.Module,
                                   input: torch.Tensor):
        qengine = "qnnpack"
        with override_quantized_engine(qengine):
            script_module = torch.jit.script(model)
            script_module_result = script_module(input)

            max_retry = 5
            for retry in range(1, max_retry + 1):
                # retires `max_retry` times; breaks iff succeeds else throws exception
                try:
                    buffer = io.BytesIO(
                        script_module._save_to_buffer_for_lite_interpreter())
                    buffer.seek(0)
                    mobile_module = _load_for_lite_interpreter(buffer)

                    mobile_module_result = mobile_module(input)

                    torch.testing.assert_allclose(script_module_result,
                                                  mobile_module_result)
                    mobile_module_forward_result = mobile_module.forward(input)
                    torch.testing.assert_allclose(
                        script_module_result, mobile_module_forward_result)

                    mobile_module_run_method_result = mobile_module.run_method(
                        "forward", input)
                    torch.testing.assert_allclose(
                        script_module_result, mobile_module_run_method_result)
                except AssertionError as e:
                    if retry == max_retry:
                        raise e
                    else:
                        continue
                break
예제 #8
0
    def test_backport_bytecode_from_file_to_buffer(self):
        maximum_checked_in_model_version = max(
            SCRIPT_MODULE_BYTECODE_PKL.keys())
        script_module_v5_path = pytorch_test_dri / "cpp" / "jit" / SCRIPT_MODULE_BYTECODE_PKL[
            maximum_checked_in_model_version]["model_name"]

        if (maximum_checked_in_model_version > MINIMUM_TO_VERSION):
            # Backport model to v4
            script_module_v4_buffer = _backport_for_mobile_to_buffer(
                script_module_v5_path, maximum_checked_in_model_version - 1)
            buf = io.StringIO()

            # Check version of the model v4 from backport
            bytesio = io.BytesIO(script_module_v4_buffer)
            backport_version = _get_model_bytecode_version(bytesio)
            assert (backport_version == maximum_checked_in_model_version - 1)

            # Load model v4 from backport and run forward method
            bytesio = io.BytesIO(script_module_v4_buffer)
            mobile_module = _load_for_lite_interpreter(bytesio)
            module_input = 1
            mobile_module_result = mobile_module(module_input)
            expected_mobile_module_result = 3 * torch.ones([2, 4],
                                                           dtype=torch.float64)
            torch.testing.assert_allclose(mobile_module_result,
                                          expected_mobile_module_result)
예제 #9
0
    def test_load_mobile_module(self):
        class MyTestModule(torch.nn.Module):
            def __init__(self):
                super(MyTestModule, self).__init__()

            def forward(self, x):
                return x + 10

        input = torch.tensor([1])

        script_module = torch.jit.script(MyTestModule())
        script_module_result = script_module(input)

        buffer = io.BytesIO(
            script_module._save_to_buffer_for_lite_interpreter())
        buffer.seek(0)
        mobile_module = _load_for_lite_interpreter(buffer)

        mobile_module_result = mobile_module(input)
        torch.testing.assert_close(script_module_result, mobile_module_result)

        mobile_module_forward_result = mobile_module.forward(input)
        torch.testing.assert_close(script_module_result,
                                   mobile_module_forward_result)

        mobile_module_run_method_result = mobile_module.run_method(
            "forward", input)
        torch.testing.assert_close(script_module_result,
                                   mobile_module_run_method_result)
예제 #10
0
 def _save_load_mobile_module(self, script_module: torch.jit.ScriptModule):
     buffer = io.BytesIO(
         script_module._save_to_buffer_for_lite_interpreter(
             _save_mobile_debug_info=True))
     buffer.seek(0)
     mobile_module = _load_for_lite_interpreter(buffer)
     return mobile_module
    def test_versioned_logspace_out(self):
        class Module(torch.nn.Module):
            def __init__(self):
                super(Module, self).__init__()

            def forward(self, a: Union[int, float, complex],
                        b: Union[int, float, complex], out: torch.Tensor):
                return torch.logspace(a, b, steps=100, out=out)

        model_path = pytorch_test_dir + "/jit/fixtures/test_versioned_logspace_out_v8.ptl"
        loaded_model = torch.jit.load(model_path)
        buffer = io.BytesIO(
            loaded_model._save_to_buffer_for_lite_interpreter())
        buffer.seek(0)
        v8_mobile_module = _load_for_lite_interpreter(buffer)
        current_mobile_module = self._save_load_mobile_module(Module)

        sample_inputs = (
            (3, 10, torch.empty((100, ), dtype=torch.int64),
             torch.empty((100, ), dtype=torch.int64)),
            (-10, 10, torch.empty((100, ), dtype=torch.int64),
             torch.empty((100, ), dtype=torch.int64)),
            (4.0, 6.0, torch.empty((100, ), dtype=torch.float64),
             torch.empty((100, ), dtype=torch.float64)),
            (3 + 4j, 4 + 5j, torch.empty((100, ), dtype=torch.complex64),
             torch.empty((100, ), dtype=torch.complex64)),
        )
        for (start, end, out_for_old, out_for_new) in sample_inputs:
            output = v8_mobile_module(start, end, out_for_old)
            output_current = current_mobile_module(start, end, out_for_new)
            # when no step is given, should have used 100
            self.assertTrue(output.size(dim=0) == 100)
            # "Upgraded" model should match the new version output
            self.assertEqual(output, output_current)
예제 #12
0
    def test_versioned_div_tensor(self):
        def div_tensor_0_3(self, other):
            if self.is_floating_point() or other.is_floating_point():
                return self.true_divide(other)
            return self.divide(other, rounding_mode='trunc')

        model_path = pytorch_test_dir / "cpp" / "jit" / "upgrader_models" / "test_versioned_div_tensor_v2.ptl"
        mobile_module_v2 = _load_for_lite_interpreter(str(model_path))
        jit_module_v2 = torch.jit.load(str(model_path))
        current_mobile_module = self._save_load_mobile_module(jit_module_v2)
        vals = (2., 3., 2, 3)
        for val_a, val_b in product(vals, vals):
            a = torch.tensor((val_a, ))
            b = torch.tensor((val_b, ))

            def _helper(m, fn):
                m_results = self._try_fn(m, a, b)
                fn_result = self._try_fn(fn, a, b)

                if isinstance(m_results, Exception):
                    self.assertTrue(isinstance(fn_result, Exception))
                else:
                    for result in m_results:
                        print("result: ", result)
                        print("fn_result: ", fn_result)
                        print(result == fn_result)
                        self.assertTrue(result.eq(fn_result))
예제 #13
0
    def test_backport_bytecode_from_file_to_file(self):
        maximum_checked_in_model_version = max(SCRIPT_MODULE_BYTECODE_PKL.keys())
        script_module_v5_path = pytorch_test_dir / "cpp" / "jit" / SCRIPT_MODULE_BYTECODE_PKL[
            maximum_checked_in_model_version]["model_name"]

        if (maximum_checked_in_model_version > MINIMUM_TO_VERSION):
            with tempfile.TemporaryDirectory() as tmpdirname:
                tmp_backport_model_path = Path(tmpdirname, "tmp_script_module_v5_backported_to_v4.ptl")
                # backport from file
                success = _backport_for_mobile(
                    script_module_v5_path,
                    tmp_backport_model_path,
                    maximum_checked_in_model_version - 1)
                assert(success)

                buf = io.StringIO()
                torch.utils.show_pickle.main(
                    ["", tmpdirname + "/" + tmp_backport_model_path.name + "@*/bytecode.pkl"],
                    output_stream=buf)
                output = buf.getvalue()

                expected_result = SCRIPT_MODULE_V4_BYTECODE_PKL
                acutal_result_clean = "".join(output.split())
                expect_result_clean = "".join(expected_result.split())
                isMatch = fnmatch.fnmatch(acutal_result_clean, expect_result_clean)
                assert(isMatch)

                # Load model v4 and run forward method
                mobile_module = _load_for_lite_interpreter(str(tmp_backport_model_path))
                module_input = 1
                mobile_module_result = mobile_module(module_input)
                expected_mobile_module_result = 3 * torch.ones([2, 4], dtype=torch.float64)
                torch.testing.assert_close(mobile_module_result, expected_mobile_module_result)
                shutil.rmtree(tmpdirname)
    def test_versioned_logspace(self):
        class Module(torch.nn.Module):
            def __init__(self):
                super(Module, self).__init__()

            def forward(self, a: Union[int, float, complex],
                        b: Union[int, float, complex]):
                c = torch.logspace(a, b, steps=5)
                d = torch.logspace(a, b, steps=100)
                return c, d

        scripted_module = torch.jit.load(
            pytorch_test_dir + "/jit/fixtures/test_versioned_logspace_v8.ptl")

        buffer = io.BytesIO(
            scripted_module._save_to_buffer_for_lite_interpreter())
        buffer.seek(0)
        v8_mobile_module = _load_for_lite_interpreter(buffer)

        current_mobile_module = self._save_load_mobile_module(Module)

        sample_inputs = ((3, 10), (-10, 10), (4.0, 6.0), (3 + 4j, 4 + 5j))
        for (a, b) in sample_inputs:
            (output_with_step, output_without_step) = v8_mobile_module(a, b)
            (current_with_step,
             current_without_step) = current_mobile_module(a, b)
            # when no step is given, should have used 100
            self.assertTrue(output_without_step.size(dim=0) == 100)
            self.assertTrue(output_with_step.size(dim=0) == 5)
            # outputs should be equal to the newest version
            self.assertEqual(output_with_step, current_with_step)
            self.assertEqual(output_without_step, current_without_step)
    def test_versioned_div_scalar_scalar(self):
        class MyModule(torch.nn.Module):
            def __init__(self):
                super(MyModule, self).__init__()

            def forward(self, a: float, b: int, c: float, d: int):
                result_0 = a / b
                result_1 = a / c
                result_2 = b / c
                result_3 = b / d
                return (result_0, result_1, result_2, result_3)

        try:
            v3_mobile_module = _load_for_lite_interpreter(
                pytorch_test_dir +
                "/cpp/jit/upgrader_models/test_versioned_div_scalar_scalar_v2.ptl"
            )
        except Exception as e:
            self.skipTest("Failed to load fixture!")

        current_mobile_module = self._save_load_mobile_module(MyModule)

        def _helper(m, fn):
            vals = (5., 3, 2., 7)
            m_result = m(*vals)
            fn_result = fn(*vals)
            for mr, hr in zip(m_result, fn_result):
                self.assertEqual(mr, hr)

        _helper(v3_mobile_module, current_mobile_module)
    def test_versioned_div_tensor_out(self):
        def historic_div_out(self, other, out):
            if self.is_floating_point() or other.is_floating_point(
            ) or out.is_floating_point():
                return torch.true_divide(self, other, out=out)
            return torch.divide(self, other, out=out, rounding_mode='trunc')

        class MyModule(torch.nn.Module):
            def __init__(self):
                super(MyModule, self).__init__()

            def forward(self, a, b, out):
                return a.div(b, out=out)

        try:
            v3_module = torch.jit.load(
                pytorch_test_dir +
                "/jit/fixtures/test_versioned_div_tensor_out_v3.pt")
            v3_mobile_module = _load_for_lite_interpreter(
                pytorch_test_dir +
                "/cpp/jit/upgrader_models/test_versioned_div_tensor_out_v2.ptl"
            )
        except Exception as e:
            self.skipTest("Failed to load fixture!")

        self._verify_count("aten::div", v3_module,
                           2)  # true_divide and divide alias to div
        self._verify_count('prim::Constant[value="trunc"]', v3_module,
                           1)  # rounding_mode argument

        current_module = self._save_load_module(MyModule)
        current_mobile_module = self._save_load_mobile_module(MyModule)
        self._verify_count("aten::div", current_module, 1)

        vals = (2., 3., 2, 3)
        for val_a, val_b in product(vals, vals):
            a = torch.tensor((val_a, ))
            b = torch.tensor((val_b, ))

            for out in (torch.empty((1, )), torch.empty((1, ),
                                                        dtype=torch.long)):

                def _helper(m, fn):
                    fn_result = None
                    if fn is torch.div:
                        fn_result = self._try_fn(fn, a, b, out=out.clone())
                    else:
                        fn_result = self._try_fn(fn, a, b, out.clone())
                    m_result = self._try_fn(m, a, b, out)

                    if isinstance(m_result, Exception):
                        self.assertTrue(fn_result, Exception)
                    else:
                        self.assertEqual(m_result, fn_result)
                        self.assertEqual(m_result, out)

                _helper(v3_module, historic_div_out)
                _helper(current_module, torch.div)
                _helper(v3_mobile_module, historic_div_out)
                _helper(current_mobile_module, torch.div)
예제 #17
0
    def setUp(self):
        super().setUp()

        self.module = CompModule()
        self.scripted_module = torch.jit.script(self.module)
        buffer = io.BytesIO(self.scripted_module._save_to_buffer_for_lite_interpreter())
        buffer.seek(0)
        self.mobile_module = _load_for_lite_interpreter(buffer)
    def test_versioned_div_tensor(self):
        def historic_div(self, other):
            if self.is_floating_point() or other.is_floating_point():
                return self.true_divide(other)
            return self.divide(other, rounding_mode='trunc')

        # Tensor x Tensor
        class MyModule(torch.nn.Module):
            def __init__(self):
                super(MyModule, self).__init__()

            def forward(self, a, b):
                result_0 = a / b
                result_1 = torch.div(a, b)
                result_2 = a.div(b)

                return result_0, result_1, result_2

        # Loads historic module
        try:
            v3_module = torch.jit.load(
                pytorch_test_dir +
                "/jit/fixtures/test_versioned_div_tensor_v3.pt")
            v3_mobile_module = _load_for_lite_interpreter(
                pytorch_test_dir +
                "/cpp/jit/upgrader_models/test_versioned_div_tensor_v2.ptl")
        except Exception as e:
            self.skipTest("Failed to load fixture!")

        self._verify_count("aten::div", v3_module,
                           6)  # true_divide and divide alias to div
        self._verify_count('prim::Constant[value="trunc"]', v3_module,
                           1)  # rounding_mode argument

        current_module = self._save_load_module(MyModule)
        current_mobile_module = self._save_load_mobile_module(MyModule)
        self._verify_count("aten::div", current_module, 3)

        vals = (2., 3., 2, 3)
        for val_a, val_b in product(vals, vals):
            a = torch.tensor((val_a, ))
            b = torch.tensor((val_b, ))

            def _helper(m, fn):
                m_results = self._try_fn(m, a, b)
                fn_result = self._try_fn(fn, a, b)

                if isinstance(m_results, Exception):
                    self.assertTrue(isinstance(fn_result, Exception))
                else:
                    for result in m_results:
                        self.assertEqual(result, fn_result)

            _helper(v3_module, historic_div)
            _helper(v3_mobile_module, historic_div)
            _helper(current_module, torch.div)
            _helper(current_mobile_module, torch.div)
예제 #19
0
    def test_all_backport_functions(self):
        # Backport from the latest bytecode version to the minimum support version
        # Load, run the backport model, and check version
        class TestModule(torch.nn.Module):
            def __init__(self, v):
                super().__init__()
                self.x = v

            def forward(self, y: int):
                increment = torch.ones([2, 4], dtype=torch.float64)
                return self.x + y + increment

        module_input = 1
        expected_mobile_module_result = 3 * torch.ones([2, 4],
                                                       dtype=torch.float64)

        # temporary input model file and output model file will be exported in the temporary folder
        with tempfile.TemporaryDirectory() as tmpdirname:
            tmp_input_model_path = Path(tmpdirname, "tmp_script_module.ptl")
            script_module = torch.jit.script(TestModule(1))
            optimized_scripted_module = optimize_for_mobile(script_module)
            exported_optimized_scripted_module = optimized_scripted_module._save_for_lite_interpreter(
                str(tmp_input_model_path))

            current_from_version = _get_model_bytecode_version(
                tmp_input_model_path)
            current_to_version = current_from_version - 1
            tmp_output_model_path = Path(tmpdirname,
                                         "tmp_script_module_backport.ptl")

            while current_to_version >= MINIMUM_TO_VERSION:
                # Backport the latest model to `to_version` to a tmp file "tmp_script_module_backport"
                backport_success = _backport_for_mobile(
                    tmp_input_model_path, tmp_output_model_path,
                    current_to_version)
                assert (backport_success)

                backport_version = _get_model_bytecode_version(
                    tmp_output_model_path)
                assert (backport_version == current_to_version)

                # Load model and run forward method
                mobile_module = _load_for_lite_interpreter(
                    str(tmp_input_model_path))
                mobile_module_result = mobile_module(module_input)
                torch.testing.assert_allclose(mobile_module_result,
                                              expected_mobile_module_result)
                current_to_version -= 1

            # Check backport failure case
            backport_success = _backport_for_mobile(tmp_input_model_path,
                                                    tmp_output_model_path,
                                                    MINIMUM_TO_VERSION - 1)
            assert (not backport_success)
            # need to clean the folder before it closes, otherwise will run into git not clean error
            shutil.rmtree(tmpdirname)
    def test_versioned_div_tensor_inplace(self):
        def historic_div_(self, other):
            if self.is_floating_point() or other.is_floating_point():
                return self.true_divide_(other)
            return self.divide_(other, rounding_mode='trunc')

        class MyModule(torch.nn.Module):
            def __init__(self):
                super(MyModule, self).__init__()

            def forward(self, a, b):
                a /= b
                return a

        try:
            v3_module = torch.jit.load(
                pytorch_test_dir +
                "/jit/fixtures/test_versioned_div_tensor_inplace_v3.pt")
            v3_mobile_module = _load_for_lite_interpreter(
                pytorch_test_dir +
                "/cpp/jit/upgrader_models/test_versioned_div_tensor_inplace_v2.ptl"
            )
        except Exception as e:
            self.skipTest("Failed to load fixture!")

        self._verify_count("aten::div", v3_module,
                           2)  # true_divide and divide both alias to div
        self._verify_count('prim::Constant[value="trunc"]', v3_module,
                           1)  # rounding_mode argument

        current_module = self._save_load_module(MyModule)
        current_mobile_module = self._save_load_mobile_module(MyModule)
        self._verify_count("aten::div", current_module, 1)

        vals = (2., 3., 2, 3)
        for val_a, val_b in product(vals, vals):
            a = torch.tensor((val_a, ))
            b = torch.tensor((val_b, ))

            def _helper(m, fn):
                fn_result = self._try_fn(fn, a.clone(), b)
                m_result = self._try_fn(m, a, b)
                if isinstance(m_result, Exception):
                    self.assertTrue(fn_result, Exception)
                else:
                    self.assertEqual(m_result, fn_result)
                    self.assertEqual(m_result, a)

            _helper(v3_module, historic_div_)
            _helper(v3_mobile_module, historic_div_)

            # Recreates a since it was modified in place
            a = torch.tensor((val_a, ))
            _helper(current_module, torch.Tensor.div_)
            _helper(current_mobile_module, torch.Tensor.div_)
예제 #21
0
def runModule(module):
    buffer = io.BytesIO(module._save_to_buffer_for_lite_interpreter())
    buffer.seek(0)
    lite_module = _load_for_lite_interpreter(buffer)
    if lite_module.find_method("get_all_bundled_inputs"):
        # run with the first bundled input
        input = lite_module.run_method("get_all_bundled_inputs")[0]
        lite_module.forward(*input)
    else:
        # assuming model has no input
        lite_module()
예제 #22
0
 def wrapper_save():
     _MODELS.append(cls)
     model = cls()
     scripted = torch.jit.script(model)
     buffer = BytesIO(scripted._save_to_buffer_for_lite_interpreter())
     buffer.seek(0)
     mobile_module = _load_for_lite_interpreter(buffer)
     ops = _export_operator_list(mobile_module)
     _OPERATORS.update(ops)
     path = f"./{cls.__name__}.ptl"
     _FILENAMES.append(path)
     scripted._save_for_lite_interpreter(path)
예제 #23
0
    def test_stacktrace_interface_call(self):
        @torch.jit.interface
        class Forward(torch.nn.Module):
            def forward(self, x) -> torch.Tensor:
                pass

            def forwardError(self, x) -> torch.Tensor:
                pass

        class B(torch.nn.Module):
            def __init__(self):
                super().__init__()

            def forward(self, x):
                return x

            def forwardError(self, x):
                return self.call() + x

            def call(self):
                return torch.ones(-1)

        class A(torch.nn.Module):
            b: Forward

            def __init__(self):
                super().__init__()
                self.b = B()

            def forward(self):
                self.b.forward(torch.ones(1))
                self.b.forwardError(torch.ones(1))

        a = torch.jit.script(A())
        torch._C._enable_mobile_interface_call_export()
        buffer = io.BytesIO(
            a._save_to_buffer_for_lite_interpreter(
                _save_mobile_debug_info=True))
        buffer.seek(0)
        mobile_module = _load_for_lite_interpreter(buffer)
        try:
            mobile_module()
            self.assertTrue(False)
        except RuntimeError as exp:
            FileCheck().check("Trying to create tensor with negative dimension") \
                .check("Traceback of TorchScript") \
                .check("self.b.forwardError").check_next("~~~~~~~~~~~~~~~~~~~ <--- HERE") \
                .check("return self.call").check_next("~~~~~~~~~ <--- HERE") \
                .check("return torch.ones").check_next("~~~~~~~~~~ <--- HERE").run(str(exp))
예제 #24
0
    def test_save_mobile_module_with_debug_info_with_trace(self):
        class A(torch.nn.Module):
            def __init__(self):
                super(A, self).__init__()

            def forward(self, x, y):
                return x * y

        class B(torch.nn.Module):
            def __init__(self):
                super(B, self).__init__()
                self.A0 = A()
                self.A1 = A()

            def forward(self, x, y, z):
                return self.A0(x, y) + self.A1(y, z)

        for export_method in ['trace', 'script']:
            x = torch.rand((2, 3))
            y = torch.rand((2, 3))
            z = torch.rand((2, 3))
            if export_method == 'trace':
                trace_module = torch.jit.trace(B(), [x, y, z])
            else:
                trace_module = torch.jit.script(B())
            exported_module = trace_module._save_to_buffer_for_lite_interpreter(
                _save_mobile_debug_info=True)
            buffer = io.BytesIO(exported_module)
            buffer.seek(0)

            assert (b"callstack_debug_map.pkl" in exported_module)

            mobile_module = _load_for_lite_interpreter(buffer)
            with self.assertRaisesRegex(
                    RuntimeError,
                    r"Module hierarchy:top\(B\)::<unknown>.A0\(A\)::forward.aten::mul"
            ):
                x = torch.rand((2, 3))
                y = torch.rand((8, 10))
                z = torch.rand((8, 10))
                mobile_module(x, y, z)
            with self.assertRaisesRegex(
                    RuntimeError,
                    r"Module hierarchy:top\(B\)::<unknown>.A1\(A\)::forward.aten::mul"
            ):
                x = torch.rand((2, 3))
                y = torch.rand((2, 3))
                z = torch.rand((8, 10))
                mobile_module(x, y, z)
    def test_versioned_div_tensor_out(self, sample_input):
        def historic_div_out(self, other, out):
            if self.is_floating_point() or other.is_floating_point(
            ) or out.is_floating_point():
                return torch.true_divide(self, other, out=out)
            return torch.divide(self, other, out=out, rounding_mode='trunc')

        class MyModule(torch.nn.Module):
            def __init__(self):
                super(MyModule, self).__init__()

            def forward(self, a, b, out):
                return a.div(b, out=out)

        try:
            v3_mobile_module = _load_for_lite_interpreter(
                pytorch_test_dir +
                "/cpp/jit/upgrader_models/test_versioned_div_tensor_out_v2.ptl"
            )
        except Exception as e:
            self.skipTest("Failed to load fixture!")

        current_mobile_module = self._save_load_mobile_module(MyModule)

        for val_a, val_b in product(sample_input, sample_input):
            a = torch.tensor((val_a, ))
            b = torch.tensor((val_b, ))

            for out in (torch.empty((1, )), torch.empty((1, ),
                                                        dtype=torch.long)):

                def _helper(m, fn):
                    fn_result = None
                    if fn is torch.div:
                        fn_result = self._try_fn(fn, a, b, out=out.clone())
                    else:
                        fn_result = self._try_fn(fn, a, b, out.clone())
                    m_result = self._try_fn(m, a, b, out)

                    if isinstance(m_result, Exception):
                        self.assertTrue(fn_result, Exception)
                    else:
                        self.assertEqual(m_result, fn_result)
                        self.assertEqual(m_result, out)

                _helper(v3_mobile_module, historic_div_out)
                _helper(current_mobile_module, torch.div)
예제 #26
0
    def test_return_collections_namedtuple(self):
        myNamedTuple = namedtuple('myNamedTuple', [('a')])

        class MyTestModule(torch.nn.Module):
            def forward(self, a: torch.Tensor):
                return myNamedTuple(a)

        sample_input = torch.Tensor(1)
        script_module = torch.jit.script(MyTestModule())
        script_module_result = script_module(sample_input)
        buffer_mobile = io.BytesIO(
            script_module._save_to_buffer_for_lite_interpreter())
        buffer_mobile.seek(0)
        mobile_module = _load_for_lite_interpreter(buffer_mobile)
        mobile_module_result = mobile_module(sample_input)
        torch.testing.assert_allclose(script_module_result,
                                      mobile_module_result)
예제 #27
0
    def test_method_calls_with_optional_arg(self):
        class A(torch.nn.Module):
            def __init__(self):
                super(A, self).__init__()

            # opt arg in script-to-script invocation
            def forward(self, x, two: int = 2):
                return x + two

        class B(torch.nn.Module):
            def __init__(self):
                super(B, self).__init__()
                self.A0 = A()

            # opt arg in Python-to-script invocation
            def forward(self, x, one: int = 1):
                return self.A0(x) + one

        script_module = torch.jit.script(B())
        buffer = io.BytesIO(
            script_module._save_to_buffer_for_lite_interpreter()
        )
        mobile_module = _load_for_lite_interpreter(buffer)

        input = torch.tensor([5])
        script_module_forward_result = script_module.forward(input)
        mobile_module_forward_result = mobile_module.forward(input)
        torch.testing.assert_close(
            script_module_forward_result,
            mobile_module_forward_result
        )

        # change ref only
        script_module_forward_result = script_module.forward(input, 2)
        self.assertFalse(
            (script_module_forward_result == mobile_module_forward_result)
            .all()
            .item()
        )

        # now both match again
        mobile_module_forward_result = mobile_module.forward(input, 2)
        torch.testing.assert_close(
            script_module_forward_result,
            mobile_module_forward_result
        )
예제 #28
0
 def setUp(self):
     super().setUp()
     # Create Python, JIT and backend versions of BasicModuleAdd.
     self.module = BasicModuleAdd()
     self.scripted_module = torch.jit.script(BasicModuleAdd())
     compile_spec = {
         "forward": {
             "input_shapes": "((1, 1, 320, 240), (1, 3))",
             "some_other_option": "True",
         },
     }
     self.lowered_module = torch._C._jit_to_backend(
         "backend_with_compiler_demo", self.scripted_module, compile_spec)
     # Create mobile version of BasicModuleAdd
     buffer = io.BytesIO(self.lowered_module._save_to_buffer_for_lite_interpreter())
     buffer.seek(0)
     self.mobile_module = _load_for_lite_interpreter(buffer)
    def test_versioned_div_tensor(self, sample_input):
        def historic_div(self, other):
            if self.is_floating_point() or other.is_floating_point():
                return self.true_divide(other)
            return self.divide(other, rounding_mode='trunc')

        # Tensor x Tensor
        class MyModule(torch.nn.Module):
            def __init__(self):
                super(MyModule, self).__init__()

            def forward(self, a, b):
                result_0 = a / b
                result_1 = torch.div(a, b)
                result_2 = a.div(b)

                return result_0, result_1, result_2

        # Loads historic module
        try:
            v3_mobile_module = _load_for_lite_interpreter(
                pytorch_test_dir +
                "/cpp/jit/upgrader_models/test_versioned_div_tensor_v2.ptl")
        except Exception as e:
            self.skipTest("Failed to load fixture!")

        current_mobile_module = self._save_load_mobile_module(MyModule)

        for val_a, val_b in product(sample_input, sample_input):
            a = torch.tensor((val_a, ))
            b = torch.tensor((val_b, ))

            def _helper(m, fn):
                m_results = self._try_fn(m, a, b)
                fn_result = self._try_fn(fn, a, b)

                if isinstance(m_results, Exception):
                    self.assertTrue(isinstance(fn_result, Exception))
                else:
                    for result in m_results:
                        self.assertEqual(result, fn_result)

            _helper(v3_mobile_module, historic_div)
            _helper(current_mobile_module, torch.div)
예제 #30
0
    def test_bundled_input_with_dynamic_type(self):
        class Model(torch.nn.Module):
            def __init__(self):
                super(Model, self).__init__()

            def forward(
                self,
                x: Dict[int, torch.Tensor],
                y: Dict[int, torch.Tensor],
                z: Dict[int, torch.Tensor],
            ):
                return x

        model = Model()
        script_module = torch.jit.script(model)

        sample_input = {
            script_module.forward: [
                (
                    {0: torch.ones(1)},
                    {1: torch.ones(1)},
                    {2: torch.ones(1)},
                )
            ]
        }

        bundled_model = torch.utils.bundled_inputs.bundle_inputs(
            script_module, sample_input
        )

        buf = bundled_model._save_to_buffer_for_lite_interpreter()
        mobile_module = _load_for_lite_interpreter(io.BytesIO(buf))

        i = mobile_module.run_method("get_all_bundled_inputs")

        self.assertEqual(
            i[0],
            (
                {0: torch.ones(1)},
                {1: torch.ones(1)},
                {2: torch.ones(1)},
            ),
        )