Exemple #1
0
    def tuple_test_helper(self, ModType):
        input = torch.randn(4)

        model = ModType()

        spec = torch_glow.CompilationSpec()
        spec.get_settings().set_glow_backend("Interpreter")

        compilation_group = torch_glow.CompilationGroup()
        spec.compilation_groups_append(compilation_group)

        input_spec = torch_glow.InputSpec()
        input_spec.set_same_as(input)

        compilation_group.input_sets_append([input_spec])

        scripted_mod = torch.jit.script(model)
        lowered_model = torch_glow.to_glow(scripted_mod, {"forward": spec})

        # Run Glow model
        g = lowered_model(input)

        # Run reference model
        t = model(input)

        self.assertEqual(type(g), type(t))
        self.assertEqual(len(g), len(t))

        for (gi, ti) in zip(g, t):
            self.assertTrue(torch.allclose(gi, ti))
    def test_save_preprocessed_module(self):
        with torch.no_grad():
            x = torch.randn([1, 4, 4, 4], dtype=torch.float32)
            model = Bar()
            model.eval()
            model = torch.jit.trace(model, x)

            spec = torch_glow.CompilationSpec()
            spec.get_settings().set_glow_backend("Interpreter")

            compilation_group = torch_glow.CompilationGroup()
            spec.compilation_groups_append(compilation_group)

            compilation_group.input_sets_append(
                torch_glow.input_specs_from_tensors([x]))

            torch_glow.disableFusionPass()
            torch_glow.enable_convert_to_fp16()
            glow_mod = torch_glow.to_glow(model, spec)

            reloaded = utils.save_and_reload_model(glow_mod)

            wrappername = "__loweredModule__"
            attrname = "__processed_module"
            wp = getattr(reloaded._c, wrappername)
            pp = getattr(wp, attrname)
            pt_model = torch.jit._recursive.wrap_cpp_module(pp)
            graph = pt_model.graph_for(x)
            found = False
            for node in graph.nodes():
                if node.kind() == "quantized::conv2d":
                    found = True

            assert found
Exemple #3
0
    def test_to_glow_tuple_output(self):
        input = torch.randn(4)

        model = Foo()

        spec = torch_glow.CompilationSpec()
        spec.get_settings().set_glow_backend("Interpreter")

        compilation_group = torch_glow.CompilationGroup()
        spec.compilation_groups_append(compilation_group)

        input_spec = torch_glow.InputSpec()
        input_spec.set_same_as(input)

        compilation_group.input_sets_append([input_spec])

        scripted_mod = torch.jit.script(model)
        lowered_model = torch_glow.to_glow(scripted_mod, {"forward": spec})

        # Run Glow model
        (gx, gy) = lowered_model(input)

        # Run reference model
        (tx, ty) = model(input)

        assert torch.allclose(tx, gx)
        assert torch.allclose(ty, gy)
Exemple #4
0
    def devices_to_use_test_helper(self, input, num_replications):
        model = SimpleModule()

        spec = torch_glow.CompilationSpec()
        spec.get_settings().set_glow_backend("Interpreter")
        # Init with total number of devices.
        torch_glow.setGlowBackendNumDevices(6)

        compilation_group = torch_glow.CompilationGroup()
        spec.compilation_groups_append(compilation_group)

        input_spec = torch_glow.InputSpec()
        input_spec.set_same_as(input)
        compilation_group.input_sets_append([input_spec])
        compilation_group_settings = compilation_group.get_settings()
        compilation_group_settings.set_num_devices_to_use(3)
        compilation_group_settings.set_replication_count(num_replications)

        traced_mod = torch.jit.trace(model, input)
        lowered_model = torch_glow.to_glow(traced_mod, {"forward": spec})

        g = lowered_model(input)
        t = model(input)

        self.assertEqual(type(g), type(t))
        self.assertEqual(len(g), len(t))
        for (gi, ti) in zip(g, t):
            self.assertTrue(torch.allclose(gi, ti))
    def test_to_glow_multiple_specs(self):
        a = torch.randn(4)
        b = torch.randn(6)

        model = Foo()
        torch_resA = model(a)
        torch_resB = model(b)

        metaA = torch_glow.InputMeta()
        metaA.set_same_as(a)
        inputA = [metaA]

        metaB = torch_glow.InputMeta()
        metaB.set_same_as(b)
        inputB = [metaB]

        options = torch_glow.CompilationOptions()
        options.backend = "Interpreter"
        specA = torch_glow.GlowCompileSpec()
        specA.set(inputA, options)
        specB = torch_glow.GlowCompileSpec()
        specB.set(inputB, options)

        scripted_mod = torch.jit.script(model)
        lowered_mod = torch_glow.to_glow(scripted_mod, [specA, specB])
        glow_resA = lowered_mod(a)
        glow_resB = lowered_mod(b)

        assert torch.allclose(torch_resA, glow_resA)
        assert torch.allclose(torch_resB, glow_resB)
Exemple #6
0
def compare_tracing_methods(
    module,
    *inputs,
    atol=5e-4,
    rtol=1e-3,
    reference=None,
    fusible_ops=None,
    fusion_blocklist=None,
    fp16=False,
    scripted=False,
    check_trace=True,
    skip_to_glow=False,  # Ugly hack, TODO: Remove
):
    if not isinstance(module, torch.nn.Module):
        raise AssertionError("to_glow only supports nn.Modules")

    def trace(mod, ins):
        if scripted:
            return torch.jit.script(mod)
        else:
            return torch.jit.trace(mod, ins, check_trace=check_trace)

    with torch.no_grad():
        with ephemeral_torchglow_settings(
            fusion=True, fp16=fp16, blocklist=fusion_blocklist
        ):
            fusion_inputs = deepcopy(inputs)
            fusion_trace = trace(module, fusion_inputs)
            assert_fused(
                fusion_trace.graph_for(*fusion_inputs),
                *(fusible_ops or []),
                accept_any=fusible_ops is None,
            )
            fusion_result = fusion_trace(*fusion_inputs)
        with ephemeral_torchglow_settings(fusion=False, fp16=fp16):
            if scripted:
                torchscript_result = module(*deepcopy(inputs))
            else:
                torchscript_inputs = deepcopy(inputs)
                torchscript_trace = trace(module, torchscript_inputs)
                torchscript_result = torchscript_trace(*torchscript_inputs)
        with ephemeral_torchglow_settings(fusion=False, fp16=fp16):
            if not skip_to_glow:
                glow_inputs = deepcopy(inputs)
                glow_spec = generate_glow_spec(module, DEFAULT_BACKEND, *glow_inputs)
                glow_trace = torch_glow.to_glow(trace(module, glow_inputs), glow_spec)
                glow_result = glow_trace(*glow_inputs)
        if reference:
            assert_equivalent(reference, fusion_trace, atol=atol, rtol=rtol)
            assert_equivalent(reference, torchscript_result, atol=atol, rtol=rtol)
            if not skip_to_glow:
                assert_equivalent(reference, glow_result, atol=atol, rtol=rtol)
        # This is written out manually instead of using combinations in order to aid
        # debugging. TODO: Clean up.
        assert_equivalent(fusion_result, torchscript_result, atol=atol, rtol=rtol)
        if not skip_to_glow:
            assert_equivalent(fusion_result, glow_result, atol=atol, rtol=rtol)
            assert_equivalent(torchscript_result, glow_result, atol=atol, rtol=rtol)
Exemple #7
0
def compare_tracing_methods_error(
    module,
    *inputs,
    fusible_ops=None,
    fusion_blocklist=None,
    fp16=False,
):
    if not isinstance(module, torch.nn.Module):
        raise AssertionError("to_glow only supports nn.Modules")

    def trace(mod, ins):
        return torch.jit.trace(mod, ins)

    with torch.no_grad():
        with ephemeral_torchglow_settings(fusion=True,
                                          fp16=fp16,
                                          blocklist=fusion_blocklist):
            fusion_inputs = deepcopy(inputs)
            try:
                fusion_trace = trace(module, fusion_inputs)
                assert_fused(
                    fusion_trace.graph_for(*fusion_inputs),
                    *(fusible_ops or []),
                    accept_any=fusible_ops is None,
                )
                fusion_trace(*fusion_inputs)
            except Exception:
                pass
            else:
                raise AssertionError(
                    "Error expected (fusion), but none were received")
        with ephemeral_torchglow_settings(fusion=False, fp16=fp16):
            try:
                torchscript_inputs = deepcopy(inputs)
                torchscript_trace = trace(module, torchscript_inputs)
                torchscript_trace(*torchscript_inputs)
            except Exception:
                pass
            else:
                raise AssertionError(
                    "Error expected (torchscript), but none were received")
        with ephemeral_torchglow_settings(fusion=False, fp16=fp16):
            try:
                glow_inputs = deepcopy(inputs)
                glow_spec = torch_glow.lower(
                    model=module,
                    example_inputs=glow_inputs,
                    backend=DEFAULT_BACKEND,
                )
                glow_trace = torch_glow.to_glow(trace(module, glow_inputs),
                                                glow_spec)
                glow_trace(*glow_inputs)
            except Exception:
                pass
            else:
                raise AssertionError(
                    "Error expected (glow), but none were received")
def run_to_glow(m, x):
    """Trace the model m with input x and call to_glow"""
    traced_m = torch.jit.trace(m, (x))

    spec = torch.classes.glow.GlowCompileSpec()
    spec.setBackend("Interpreter")
    sim = torch.classes.glow.SpecInputMeta()
    sim.set(x.size(), torch.float32)
    inputs = [sim]
    spec.addInputs(inputs)

    lowered_module = torch_glow.to_glow(traced_m, {"forward": spec})
    return lowered_module
Exemple #9
0
def run_to_glow(m, x):
    """Trace the model m with input x and call to_glow"""
    traced_m = torch.jit.trace(m, (x))

    input_meta = InputMeta()
    input_meta.set(x.size(), torch.float32)
    inputs = [input_meta]
    options = CompilationOptions()
    options.backend = "Interpreter"
    spec = GlowCompileSpec()
    spec.set(inputs, options)

    lowered_module = torch_glow.to_glow(traced_m, {"forward": spec})
    return lowered_module
def run_model(m, input, randomize):
    torch_glow.disableFusionPass()
    traced_m = torch.jit.trace(m, input)

    input_meta = InputMeta()
    input_meta.set_same_as(input)
    inputs = [input_meta]
    options = CompilationOptions()
    options.backend = "Interpreter"
    options.randomize_constants = randomize
    spec = GlowCompileSpec()
    spec.set(inputs, options)

    glow_m = torch_glow.to_glow(traced_m, {"forward": spec})
    return glow_m.forward(input)
Exemple #11
0
def run_model(m, input, randomize):
    if randomize:
        torch_glow.enable_randomize_constants()
    else:
        torch_glow.disable_randomize_constants()

    torch_glow.disableFusionPass()
    traced_m = torch.jit.trace(m, input)

    spec = torch.classes.glow.GlowCompileSpec()
    spec.setBackend("Interpreter")
    sim = torch.classes.glow.SpecInputMeta()
    sim.setSameAs(input)
    spec.addInputs([sim])

    glow_m = torch_glow.to_glow(traced_m, {"forward": spec})
    return glow_m.forward(input)
def run_to_glow(m, x):
    """Trace the model m with input x and call to_glow"""
    traced_m = torch.jit.trace(m, (x))

    spec = torch_glow.CompilationSpec()
    spec.get_settings().set_glow_backend("Interpreter")

    compilation_group = torch_glow.CompilationGroup()
    spec.compilation_groups_append(compilation_group)

    input_spec = torch_glow.InputSpec()
    input_spec.set_same_as(x)

    compilation_group.input_sets_append([input_spec])

    lowered_module = torch_glow.to_glow(traced_m, spec)

    return lowered_module
    def test_to_glow_multiple_groups_and_input_sets(self):
        x1 = torch.randn(1, 4)
        y1 = torch.randn(2, 4)

        x2 = torch.randn(1, 2)
        y2 = torch.randn(5, 2)

        x3 = torch.randn(7)
        y3 = torch.randn(3, 7)

        mod = Foo()
        scripted_mod = torch.jit.script(mod)

        x1_y1_set = torch_glow.input_specs_from_tensors([x1, y1])
        x2_y2_set = torch_glow.input_specs_from_tensors([x2, y2])
        x3_y3_set = torch_glow.input_specs_from_tensors([x3, y3])

        # Create two CompilationGroup, first one contains two input sets
        # and the second CompilationGroup has the third input set
        spec = torch_glow.CompilationSpec()
        spec.get_settings().set_glow_backend("Interpreter")

        compilation_group_1 = torch_glow.CompilationGroup()
        compilation_group_2 = torch_glow.CompilationGroup()
        spec.compilation_groups_append(compilation_group_1)
        spec.compilation_groups_append(compilation_group_2)

        compilation_group_1.input_sets_append(x1_y1_set)
        compilation_group_1.input_sets_append(x2_y2_set)
        compilation_group_2.input_sets_append(x3_y3_set)

        lowered_module = torch_glow.to_glow(scripted_mod, spec)

        torch_res1 = mod(x1, y1)
        torch_res2 = mod(x2, y2)
        torch_res3 = mod(x3, y3)

        glow_res1 = lowered_module(x1, y1)
        glow_res2 = lowered_module(x2, y2)
        glow_res3 = lowered_module(x3, y3)

        assert torch.allclose(torch_res1, glow_res1)
        assert torch.allclose(torch_res2, glow_res2)
        assert torch.allclose(torch_res3, glow_res3)
    def test_serialization(self):
        with torch.no_grad():
            x = torch.randn([1, 4, 4, 4], dtype=torch.float32)
            y = torch.randn([1, 4, 4, 4], dtype=torch.float32)
            model = Bar()
            model = torch.jit.trace(model, (x, y))

            spec = torch_glow.CompilationSpec()
            spec_settings = spec.get_settings()
            spec_settings.set_glow_backend("NNPI")
            # Enabled the serialize in this spec
            spec_settings.set_enable_serialize(True)

            compilation_group = torch_glow.CompilationGroup()
            compilation_group_settings = compilation_group.get_settings()
            compilation_group_settings.set_replication_count(1)
            compilation_group_settings.backend_specific_opts_insert(
                "NNPI_IceCores", "1")

            compilation_group.input_sets_append(
                torch_glow.input_specs_from_tensors([x, y]))

            spec.compilation_groups_append(compilation_group)
            torch_glow.disableFusionPass()
            torch_glow.enable_convert_to_fp16()

            # Enable global serialize
            # then compile(serialize) the model and save it
            torch_glow.enable_dump_serialized_model()
            glow_mod = torch_glow.to_glow(model, spec)
            res1 = glow_mod(x, y)
            torch.jit.save(glow_mod, "/tmp/serialize_to_glow.pt")

            # Enable global deserialize and disable serialize
            # and load(deserialize) the model to loaded_glow_mod
            torch_glow.enable_deserialize()
            torch_glow.disable_dump_serialized_model()
            loaded_glow_mod = torch.jit.load("/tmp/serialize_to_glow.pt")
            res2 = loaded_glow_mod(x, y)
            assert torch.allclose(res1, res2, 1e-5, 1e-5)
Exemple #15
0
    def test_input_spec(self):
        """Test setting quantized and non-quantized input specs."""
        with torch.no_grad():
            a = torch.tensor([[0.1]])
            b = torch.tensor([[0.1]])

            mod = TestModule()
            traced_model = torch.jit.trace(mod, (a, b))
            ref_result = traced_model(a, b)

            # test non-quantized input
            glow_mod = torch_glow.to_glow(traced_model, get_compilation_spec((a, b)))
            glow_result = glow_mod(a, b)
            self.assertTrue(torch.allclose(ref_result, glow_result))

            # test quantized input
            add_inputs = torch_glow.get_submod_inputs(mod, "add", (a, b))
            glow_mod = torch_glow.to_glow_selective(
                traced_model, {"add": get_compilation_spec(add_inputs)}
            )
            glow_result = glow_mod(a, b)
            self.assertTrue(torch.allclose(ref_result, glow_result))
def run_model(m, input, randomize):
    torch_glow.disableFusionPass()
    traced_m = torch.jit.trace(m, input)

    if randomize:
        torch_glow.enable_randomize_constants()
    else:
        torch_glow.disable_randomize_constants()

    spec = torch_glow.CompilationSpec()
    spec.get_settings().set_glow_backend("Interpreter")

    compilation_group = torch_glow.CompilationGroup()
    spec.compilation_groups_append(compilation_group)

    input_spec = torch_glow.InputSpec()
    input_spec.set_same_as(input)

    compilation_group.input_sets_append([input_spec])

    glow_m = torch_glow.to_glow(traced_m, {"forward": spec})
    return glow_m(input)
Exemple #17
0
    def test_to_glow_tuple_output(self):
        a = torch.randn(4)

        model = Foo()
        torch_resA = model(a)
        (tx, ty) = torch_resA

        metaA = torch_glow.InputMeta()
        metaA.set_same_as(a)
        inputA = [metaA]

        options = torch_glow.CompilationOptions()
        options.backend = "Interpreter"
        specA = torch_glow.GlowCompileSpec()
        specA.set(inputA, options)

        scripted_mod = torch.jit.script(model)
        lowered_mod = torch_glow.to_glow(scripted_mod, [specA])
        glow_resA = lowered_mod(a)
        (gx, gy) = glow_resA

        assert torch.allclose(tx, gx)
        assert torch.allclose(ty, gy)
Exemple #18
0
    def lower_and_write_to_onnx_helper(self, ModType, onnx_prefix):
        x = torch.randn(1, 3, 8, 8)
        model = create_model(x, ModType)

        spec = torch_glow.CompilationSpec()
        spec.get_settings().set_glow_backend("Interpreter")

        compilation_group = torch_glow.CompilationGroup()
        spec.compilation_groups_append(compilation_group)

        input_spec = torch_glow.InputSpec()
        input_spec.set_same_as(x)

        compilation_group.input_sets_append([input_spec])

        scripted_mod = torch.jit.trace(model, x)
        torch_glow.enable_write_to_onnx()
        torch_glow.set_onnx_file_name_prefix(onnx_prefix)
        torch_glow.enable_write_without_randomize()
        lowered_model = torch_glow.to_glow(scripted_mod, {"forward": spec})

        # Run Glow model
        g = lowered_model(x)

        # Run reference model
        t = model(x)

        self.assertEqual(type(g), type(t))
        self.assertEqual(len(g), len(t))

        for (gi, ti) in zip(g, t):
            self.assertTrue(torch.allclose(gi, ti))

        assert os.path.exists(onnx_prefix + ".onnxtxt")
        onnx_files = glob.glob(onnx_prefix + "*.onnx*")
        for f in onnx_files:
            os.remove(f)
Exemple #19
0
def infer_nnpi(model, device, data_type, input_size, output_size, batch_size,
               args):
    import torch_glow
    # Detailed structure for spec can be found at https://fburl.com/diffusion/79q4efud
    # Create compilation spec
    spec = torch_glow.CompilationSpec()
    spec.get_settings().set_glow_backend("NNPI")
    # Create compilation group and update settings.
    # Compilation group contains compilation specific information like
    # fp16 settings, enableRemoveMutation, anything that changes
    # the Glow graph compiled for example.
    compilation_group = torch_glow.CompilationGroup()
    compilation_group_settings = compilation_group.get_settings()
    compilation_group_settings.set_convert_to_fp16(True)
    compilation_group_settings.set_replication_count(1)
    compilation_group_settings.backend_specific_opts_insert(
        "NNPI_IceCores", "1")

    data = torch.randn(batch_size, input_size)
    # Create input spec and add it into compilation group.
    # This is used for shape inference when lowering the model to Glow.
    data_spec = torch_glow.InputSpec()
    data_spec.set_same_as(data)
    compilation_group.input_sets_append([data_spec])

    spec.compilation_groups_append(compilation_group)

    traced_model = torch.jit.trace(model, (data))
    lowered_model = torch_glow.to_glow(traced_model, spec)

    start_time = time.time()
    for i in range(args.steps + args.warmups):
        lowered_model(data)
        if i < args.warmups:
            start_time = time.time()
    return time.time() - start_time