Ejemplo n.º 1
0
    def __call__(self, measure_input, tmp_dir, **kwargs):
        """
        Wrapped build func.

        Parameters
        ----------
        measure_input: MeasureInput
            The input of measurement

        tmp_dir: str
            The path of temporary directory to export generated library
        """
        tic = time.time()
        try:
            filename = os.path.join(
                tmp_dir, "tmp_func_%0x.%s" %
                (getrandbits(64), self.build_func.output_format))
            # TODO(tvm-team) consider linline _build_func_common
            func, arg_info = _build_func_common(measure_input, self.runtime,
                                                **kwargs)
            if self.build_func.output_format == ".model-library-format":
                # Late import to preserve autoTVM with USE_MICRO OFF
                try:
                    from tvm import micro  # pylint: disable=import-outside-toplevel
                except ImportError:
                    raise ImportError("Requires USE_MICRO")
                micro.export_model_library_format(func, filename)
            else:
                func.export_library(filename, self.build_func)
        except Exception as e:  # pylint: disable=broad-except
            return BuildResult(None, None, e, time.time() - tic)
        return BuildResult(filename, arg_info, None, time.time() - tic)
Ejemplo n.º 2
0
def test_multiple_relay_modules_same_module_name():
    mod = get_conv2d_relay_module()

    executor = Executor("graph")
    runtime = Runtime("crt")
    target = tvm.target.target.micro("host")

    with tvm.transform.PassContext(opt_level=3,
                                   config={"tir.disable_vectorize": True}):
        factory1 = tvm.relay.build(mod,
                                   target,
                                   runtime=runtime,
                                   executor=executor,
                                   mod_name="mod")
        factory2 = tvm.relay.build(mod,
                                   target,
                                   runtime=runtime,
                                   executor=executor,
                                   mod_name="mod")

    temp_dir = utils.tempdir()
    mlf_tar_path = temp_dir.relpath("lib.tar")

    with pytest.raises(AssertionError,
                       match="Multiple modules should have unique names"):
        micro.export_model_library_format([factory1, factory2], mlf_tar_path)
Ejemplo n.º 3
0
def test_multiple_relay_modules_graph():
    mod = get_conv2d_relay_module()

    executor = Executor("graph")
    runtime = Runtime("crt")
    target = tvm.target.target.micro("host")

    with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
        factory1 = tvm.relay.build(mod, target, runtime=runtime, executor=executor, mod_name="mod1")
        factory2 = tvm.relay.build(mod, target, runtime=runtime, executor=executor, mod_name="mod2")

    temp_dir = utils.tempdir()
    mlf_tar_path = temp_dir.relpath("lib.tar")
    micro.export_model_library_format([factory1, factory2], mlf_tar_path)

    with tarfile.open(mlf_tar_path, "r:*") as tf:
        tar_members = [ti.name for ti in tf.getmembers()]
        print("tar members", tar_members)
        assert "./metadata.json" in tar_members
        assert "./codegen/host/src/mod1_lib0.c" in tar_members
        assert "./codegen/host/src/mod2_lib0.c" in tar_members

        with tf.extractfile("./metadata.json") as f:
            metadata = json.load(f)
        mod2_main_md = metadata["modules"]["mod2"]["memory"]["functions"]["main"]
        assert mod2_main_md == [
            {
                "constants_size_bytes": 0,
                "device": 1,
                "io_size_bytes": 143960,
                "workspace_size_bytes": 158088,
            }
        ]
        assert metadata["modules"]["mod1"]["model_name"] == "mod1"
        assert metadata["modules"]["mod2"]["model_name"] == "mod2"
Ejemplo n.º 4
0
def test_export_model_library_format_workspace(executor, runtime):
    target = tvm.target.target.micro("host")
    with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
        relay_mod = tvm.parser.fromtext(
            """
            #[version = "0.0.5"]
            def @main(%p0: Tensor[(1, 56, 56, 128), int16], %p1: Tensor[(3, 3, 128, 1), int16], %p2: Tensor[(1, 1, 1, 128), int32]){
              %0 = nn.conv2d(%p0, %p1, padding=[1, 1, 1, 1], groups=128, channels=128, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWOI", out_dtype="int32") /* ty=Tensor[(1, 56, 56, 128), int32] */;
              %1 = add(%0, %p2) /* ty=Tensor[(1, 56, 56, 128), int32] */;
              %2 = fixed_point_multiply(%1, multiplier=2080045879, shift=-4) /* ty=Tensor[(1, 56, 56, 128), int32] */;
              %3 = clip(%2, a_min=0f, a_max=255f) /* ty=Tensor[(1, 56, 56, 128), int32] */;
              cast(%3, dtype="uint8") /* ty=Tensor[(1, 56, 56, 128), uint8] */
            }
            """
        )
        factory = tvm.relay.build(
            relay_mod,
            target,
            executor=executor,
            runtime=runtime,
            mod_name="qnn_conv2d",
        )

    temp_dir = utils.tempdir()
    mlf_tar_path = temp_dir.relpath("lib.tar")

    micro.export_model_library_format(factory, mlf_tar_path)
    tf = tarfile.open(mlf_tar_path)

    extract_dir = temp_dir.relpath("extract")
    os.mkdir(extract_dir)
    tf.extractall(extract_dir)

    with open(os.path.join(extract_dir, "metadata.json")) as json_f:
        metadata = json.load(json_f)
        module_name = factory.libmod_name
        assert metadata["version"] == _GENERATED_VERSION
        assert metadata["modules"][module_name]["model_name"] == "qnn_conv2d"
        export_datetime = datetime.datetime.strptime(
            metadata["modules"][module_name]["export_datetime"], "%Y-%m-%d %H:%M:%SZ"
        )
        assert (datetime.datetime.now() - export_datetime) < datetime.timedelta(seconds=60 * 5)
        assert metadata["modules"][module_name]["target"] == [str(target)]
        assert metadata["modules"][module_name]["memory"]["functions"]["main"] == [
            {
                "constants_size_bytes": 0,
                "device": 1,
                "io_size_bytes": 1207040,
                "workspace_size_bytes": 2466816,
            }
        ]
        assert metadata["modules"][module_name]["memory"]["functions"]["operator_functions"][0][
            "workspace"
        ] == [{"device": 1, "workspace_size_bytes": 2466816}]
        assert (
            "fused_nn_conv2d_add_fixed_point_multiply_clip_cast"
            in metadata["modules"][module_name]["memory"]["functions"]["operator_functions"][0][
                "function_name"
            ]
        )
Ejemplo n.º 5
0
def test_export_byoc_c_module():
    """Test BYOC flow when it produces DSO-exportable modules.

    NOTE the general BYOC flow is not fully supported by Model Library Format right now.
    """
    x = tvm.relay.var("x", shape=(10, 10))
    w0 = tvm.relay.var("w0", shape=(10, 10))
    w1 = tvm.relay.var("w1", shape=(10, 10))
    w2 = tvm.relay.var("w2", shape=(10, 10))
    w3 = tvm.relay.var("w3", shape=(10, 10))
    w4 = tvm.relay.var("w4", shape=(10, 10))
    w5 = tvm.relay.var("w5", shape=(10, 10))
    w6 = tvm.relay.var("w6", shape=(10, 10))
    w7 = tvm.relay.var("w7", shape=(10, 10))

    # C compiler
    z0 = tvm.relay.add(x, w0)
    p0 = tvm.relay.subtract(z0, w1)
    q0 = tvm.relay.multiply(p0, w2)

    z1 = tvm.relay.add(x, w3)
    p1 = tvm.relay.subtract(z1, w4)
    q1 = tvm.relay.multiply(p1, w5)

    # Other parts on TVM
    z2 = tvm.relay.add(x, w6)
    q2 = tvm.relay.subtract(z2, w7)

    r = tvm.relay.concatenate((q0, q1, q2), axis=0)
    f = tvm.relay.Function([x, w0, w1, w2, w3, w4, w5, w6, w7], r)
    mod = tvm.IRModule()
    ann = byoc.CcompilerAnnotator()
    mod["main"] = ann.visit(f)
    mod = tvm.relay.transform.PartitionGraph("mod_name")(mod)
    mod = tvm.relay.transform.InferType()(mod)

    with tvm.transform.PassContext(opt_level=3,
                                   config={"tir.disable_vectorize": True}):
        factory = tvm.relay.build(mod, tvm.target.target.micro("host"))

    temp_dir = utils.tempdir()
    mlf_tar_path = temp_dir.relpath("lib.tar")

    from tvm import micro

    micro.export_model_library_format(factory, mlf_tar_path)

    with tarfile.open(mlf_tar_path, "r:*") as tf:
        tar_members = [ti.name for ti in tf.getmembers()]
        print("tar members", tar_members)
        assert "./metadata.json" in tar_members
        with tf.extractfile("./metadata.json") as f:
            metadata = json.load(f)
        main_md = metadata["memory"]["functions"]["main"]
        assert main_md == [{
            "constants_size_bytes": 0,
            "device": 1,
            "io_size_bytes": 4800,
            "workspace_size_bytes": 800,
        }]
Ejemplo n.º 6
0
def test_multiple_relay_modules_c():
    mod = get_conv2d_relay_module()

    executor = Executor("aot", {"unpacked-api": True, "interface-api": "c"})
    runtime = Runtime("crt")
    target = tvm.target.target.micro("host")

    with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
        factory1 = tvm.relay.build(mod, target, runtime=runtime, executor=executor, mod_name="mod1")
        factory2 = tvm.relay.build(mod, target, runtime=runtime, executor=executor, mod_name="mod2")

    temp_dir = utils.tempdir()
    mlf_tar_path = temp_dir.relpath("lib.tar")

    micro.export_model_library_format([factory1, factory2], mlf_tar_path)

    tf = tarfile.open(mlf_tar_path)

    extract_dir = temp_dir.relpath("extract")
    os.mkdir(extract_dir)
    tf.extractall(extract_dir)

    assert os.path.exists(os.path.join(extract_dir, "codegen", "host", "src", "mod1_lib0.c"))
    assert os.path.exists(os.path.join(extract_dir, "codegen", "host", "src", "mod1_lib1.c"))
    assert os.path.exists(os.path.join(extract_dir, "codegen", "host", "src", "mod2_lib0.c"))
    assert os.path.exists(os.path.join(extract_dir, "codegen", "host", "src", "mod2_lib1.c"))

    assert os.path.exists(os.path.join(extract_dir, "codegen", "host", "include", "tvmgen_mod1.h"))
    assert os.path.exists(os.path.join(extract_dir, "codegen", "host", "include", "tvmgen_mod2.h"))

    # check CRT runtime directory
    assert os.path.exists(os.path.join(extract_dir, "runtime"))
Ejemplo n.º 7
0
def build_project(
    temp_dir, zephyr_board, west_cmd, mod, build_config, simd=False, extra_files_tar=None
):
    project_dir = temp_dir / "project"

    with tempfile.TemporaryDirectory() as tar_temp_dir:
        model_tar_path = pathlib.Path(tar_temp_dir) / "model.tar"
        export_model_library_format(mod, model_tar_path)

        workspace_size = mlf_extract_workspace_size_bytes(model_tar_path)
        project_options = {
            "extra_files_tar": extra_files_tar,
            "project_type": "aot_demo",
            "west_cmd": west_cmd,
            "verbose": bool(build_config.get("debug")),
            "zephyr_board": zephyr_board,
            "compile_definitions": [
                # TODO(mehrdadh): It fails without offset.
                f"-DWORKSPACE_SIZE={workspace_size + 128}",
            ],
        }
        if simd:
            project_options["config_main_stack_size"] = 1536

        project = tvm.micro.project.generate_project_from_mlf(
            str(TEMPLATE_PROJECT_DIR), project_dir, model_tar_path, project_options
        )
        project.build()
    return project, project_dir
Ejemplo n.º 8
0
def test_export_operator_model_library_format():
    import tvm.micro as micro

    target = tvm.target.target.micro("host")
    with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
        A = tvm.te.placeholder((2,), dtype="int8")
        B = tvm.te.placeholder((1,), dtype="int8")
        C = tvm.te.compute(A.shape, lambda i: A[i] + B[0], name="C")
        sched = tvm.te.create_schedule(C.op)
        mod = tvm.build(
            sched,
            [A, B, C],
            tvm.target.Target(target, target),
            runtime=Runtime("crt", {"system-lib": True}),
            name="add",
        )

    temp_dir = utils.tempdir()
    mlf_tar_path = temp_dir.relpath("lib.tar")
    micro.export_model_library_format(mod, mlf_tar_path)

    tf = tarfile.open(mlf_tar_path)

    extract_dir = temp_dir.relpath("extract")
    os.mkdir(extract_dir)
    tf.extractall(extract_dir)

    with open(os.path.join(extract_dir, "metadata.json")) as json_f:
        metadata = json.load(json_f)
        assert metadata["version"] == 5
        assert metadata["model_name"] == "add"
        export_datetime = datetime.datetime.strptime(
            metadata["export_datetime"], "%Y-%m-%d %H:%M:%SZ"
        )
        assert (datetime.datetime.now() - export_datetime) < datetime.timedelta(seconds=60 * 5)
        assert metadata["target"] == {"1": str(target)}

        assert metadata["memory"]["add"][0]["dtype"] == "int8"
        assert metadata["memory"]["add"][0]["shape"] == [2]
        assert metadata["memory"]["add"][0]["size_bytes"] == 2

        assert metadata["memory"]["add"][1]["dtype"] == "int8"
        assert metadata["memory"]["add"][1]["shape"] == [1]
        assert metadata["memory"]["add"][1]["size_bytes"] == 1

        assert metadata["memory"]["add"][2]["dtype"] == "int8"
        assert metadata["memory"]["add"][2]["shape"] == [2]
        assert metadata["memory"]["add"][2]["size_bytes"] == 2

    assert os.path.exists(os.path.join(extract_dir, "codegen", "host", "src", "lib0.c"))
    assert os.path.exists(os.path.join(extract_dir, "codegen", "host", "src", "lib1.c"))

    assert (
        len(mod.ir_module_by_target) == 1
    ), f"expect 1 ir_model_by_target: {mod.ir_module_by_target!r}"
    for target, ir_mod in mod.ir_module_by_target.items():
        assert int(tvm.runtime.ndarray.device(str(target)).device_type) == 1
        with open(os.path.join(extract_dir, "src", "tir-1.txt")) as tir_f:
            assert tir_f.read() == str(ir_mod)
Ejemplo n.º 9
0
def test_export_model_library_format_llvm():
    with utils.TempDirectory.set_keep_for_debug(True):
        target = tvm.target.target.micro("host")
        assert str(target)[:2] == "c "
        target = tvm.target.Target("llvm " + str(target)[2:])
        with tvm.transform.PassContext(opt_level=3):
            relay_mod = tvm.parser.fromtext(
                """
            #[version = "0.0.5"]
            def @main(%a : Tensor[(1, 2), uint8], %b : Tensor[(1, 2), float32], %c : Tensor[(1, 2), float32]) {
            %0 = cast(%a, dtype="float32") + %b * %c;
            %0
            }"""
            )
            factory = tvm.relay.build(
                relay_mod,
                target,
                target_host=target,
                mod_name="add",
                params={"c": numpy.array([[2.0, 4.0]], dtype="float32")},
            )

        temp_dir = utils.tempdir()
        mlf_tar_path = temp_dir.relpath("lib.tar")
        import tvm.micro as micro

        micro.export_model_library_format(factory, mlf_tar_path)
        tf = tarfile.open(mlf_tar_path)

        extract_dir = temp_dir.relpath("extract")
        os.mkdir(extract_dir)
        tf.extractall(extract_dir)

        with open(os.path.join(extract_dir, "metadata.json")) as json_f:
            metadata = json.load(json_f)
            assert metadata["version"] == 1
            assert metadata["model_name"] == "add"
            export_datetime = datetime.datetime.strptime(
                metadata["export_datetime"], "%Y-%m-%d %H:%M:%SZ"
            )
            assert (datetime.datetime.now() - export_datetime) < datetime.timedelta(seconds=60 * 5)
            assert metadata["target"] == {"1": str(target)}
            assert metadata["memory"] == [
                {"storage_id": 0, "size_bytes": 2, "input_binding": "a"},
                {"storage_id": 1, "size_bytes": 8, "input_binding": "b"},
                {"storage_id": 2, "size_bytes": 8, "input_binding": "p0"},
                {"storage_id": 3, "size_bytes": 8},
            ]

        assert os.path.exists(os.path.join(extract_dir, "codegen", "host", "lib", "lib0.o"))

        validate_graph_json(extract_dir, factory)

        with open(os.path.join(extract_dir, "relay.txt")) as relay_f:
            assert relay_f.read() == str(relay_mod)

        with open(os.path.join(extract_dir, "parameters", "add.params"), "rb") as params_f:
            params = tvm.relay.load_param_dict(params_f.read())
            assert "p0" in params
Ejemplo n.º 10
0
def check_network(build_dir, target_name, model_path, image_path):

    model_name = "network"

    model, shape_dict, dtype_dict = get_tflite_model(model_path)

    #
    # Generate random input data
    #
    image_data = []
    for i in range(NUM_ITERATIONS):
        assert len(shape_dict) == 1, "Only single input models are supported."
        image_shape = list(shape_dict.values())[0]
        in_data = np.random.randint(0, 255, size=image_shape).astype("uint8")
        # Write raw data for using with the TVM implementation
        filename = os.path.join(image_path, "{:02d}.raw".format(i))
        dump_image(filename, in_data)
        image_data.append(in_data)

    mod, params = relay.frontend.from_tflite(model, shape_dict, dtype_dict)

    #
    # Build a TVM C module for the ARM CPU (without compiling the kernels
    # library to the object code form):
    #
    with tvm.transform.PassContext(opt_level=3,
                                   config={"tir.disable_vectorize": True}):
        rt_module = relay.build(mod, target="c -device=arm_cpu", params=params)

    #
    # Export model library format
    #
    target_dir = os.path.join(build_dir, target_name + "_gen")

    if os.path.exists(target_dir):
        print(f'Removing existing "{target_dir}" directory')
        try:
            shutil.rmtree(target_dir)
        except OSError as err:
            raise ValueError(f"emit_code.Error: {target_dir} : {err.strerror}")

    mlf_tar_path = os.path.join(build_dir, target_name + "_lib.tar")
    import tvm.micro as micro

    micro.export_model_library_format(rt_module, mlf_tar_path)

    emitter = stm32.CodeEmitter()
    quantization = extract_tflite_quantization(model)
    emitter.parse_library_format(mlf_tar_path, quantization)
    emitter.emit_code(target_dir, model_name)

    #
    # Results
    #
    tf_results = run_tflite_model(model_path, image_data)
    tvm_results = run_tvm_model(build_dir, model_name, target_dir, image_path)

    check_result(tf_results, tvm_results)
Ejemplo n.º 11
0
def test_multiple_relay_modules_aot_graph():
    mod = get_conv2d_relay_module()

    executor1 = Executor("graph")
    executor2 = Executor("aot", {"unpacked-api": True, "interface-api": "c"})
    runtime = Runtime("crt")
    target = tvm.target.target.micro("host")

    with tvm.transform.PassContext(opt_level=3,
                                   config={"tir.disable_vectorize": True}):
        factory1 = tvm.relay.build(mod,
                                   target,
                                   runtime=runtime,
                                   executor=executor1,
                                   mod_name="mod1")
        factory2 = tvm.relay.build(mod,
                                   target,
                                   runtime=runtime,
                                   executor=executor2,
                                   mod_name="mod2")

    temp_dir = utils.tempdir()
    mlf_tar_path = temp_dir.relpath("lib.tar")

    micro.export_model_library_format([factory1, factory2], mlf_tar_path)

    tf = tarfile.open(mlf_tar_path)
    extract_dir = temp_dir.relpath("extract")
    os.mkdir(extract_dir)
    tf.extractall(extract_dir)

    assert os.path.exists(
        os.path.join(extract_dir, "codegen", "host", "src", "mod1_lib0.c"))
    assert os.path.exists(
        os.path.join(extract_dir, "codegen", "host", "src", "mod1_lib1.c"))
    assert os.path.exists(
        os.path.join(extract_dir, "codegen", "host", "src", "mod1_lib2.c"))
    assert os.path.exists(
        os.path.join(extract_dir, "codegen", "host", "src", "mod2_lib0.c"))
    assert os.path.exists(
        os.path.join(extract_dir, "codegen", "host", "src", "mod2_lib1.c"))

    assert os.path.exists(
        os.path.join(extract_dir, "codegen", "host", "include",
                     "tvmgen_mod2.h"))

    with open(os.path.join(extract_dir, "metadata.json")) as f:
        metadata = json.load(f)

    assert metadata["modules"]["mod1"]["executors"] == ["graph"]
    assert metadata["modules"]["mod2"]["executors"] == ["aot"]
    assert metadata["version"] == _GENERATED_VERSION
Ejemplo n.º 12
0
def compile_and_run(mod, input_list, output_list, params=None):
    """
    This method verifies the generated source
    """
    target = "c -runtime=c --link-params --executor=aot"

    with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
        lib = tvm.relay.build(mod, target, target_host=target, params=params)

    tmp_path = utils.tempdir()
    tmp_dir = tmp_path.temp_dir

    base_path = os.path.join(tmp_dir, "test")
    build_path = os.path.join(base_path, "build")
    os.makedirs(build_path, exist_ok=True)

    tar_file = os.path.join(base_path, "test.tar")
    export_model_library_format(lib, tar_file)
    t = tarfile.open(tar_file)
    t.extractall(base_path)

    for i in range(len(input_list)):
        create_header_file((f"input_data{i}"), input_list[i], build_path)

    for i in range(len(output_list)):
        create_header_file(
            (f"output_data{i}"),
            np.zeros(output_list[i].shape, output_list[i].dtype),
            build_path,
        )
        create_header_file((f"expected_output_data{i}"), output_list[i], build_path)

    create_main("test.c", input_list, output_list, build_path)

    # Verify that compiles fine
    file_dir = os.path.dirname(os.path.abspath(__file__))
    makefile = os.path.join(file_dir, "aot_test.mk")
    make_cmd = f"make -f {makefile} build_dir=" + build_path + f" TVM_ROOT={file_dir}/../../../.."

    compile_log_path = os.path.join(build_path, "test_compile.log")
    ret = subprocess_with_stdout_and_log(make_cmd, ".", compile_log_path, False)
    assert ret == 0

    # Verify that runs fine
    run_log_path = os.path.join(build_path, "test_run.log")
    ret = subprocess_with_stdout_and_log("./aot_test_runner", build_path, run_log_path, False)
    assert ret == 0
Ejemplo n.º 13
0
    def export_package(
        self,
        executor_factory: Union[GraphExecutorFactoryModule, Executable],
        package_path: Optional[str] = None,
        cross: Optional[Union[str, Callable]] = None,
        cross_options: Optional[str] = None,
        output_format: str = "so",
    ):
        """Save this TVMCModel to file.
        Parameters
        ----------
        executor_factory : GraphExecutorFactoryModule
            The factory containing the compiled artifacts needed to run this model.
        package_path : str, None
            Where the model should be saved. Note that it will be packaged as a .tar file.
            If not provided, the package will be saved to a generically named file in tmp.
        cross : str or callable object, optional
            Function that performs the actual compilation.
        cross_options : str, optional
            Command line options to be passed to the cross compiler.
        output_format : str
            How to save the modules function library. Must be one of "so" and "tar" to save
            using the classic format or "mlf" to save using the Model Library Format.

        Returns
        -------
        package_path : str
            The path that the package was saved to.
        """
        if output_format not in ["so", "tar", "mlf"]:
            raise TVMCException(
                "Only 'so', 'tar', and 'mlf' output formats are supported.")

        if output_format == "mlf" and cross:
            raise TVMCException(
                "Specifying the MLF output and a cross compiler is not supported."
            )

        if isinstance(executor_factory, Executable):
            package_path = self.export_vm_format(executor_factory,
                                                 package_path, output_format)
        elif output_format in ["so", "tar"]:
            package_path = self.export_classic_format(executor_factory,
                                                      package_path, cross,
                                                      cross_options,
                                                      output_format)
        elif output_format == "mlf":
            if export_model_library_format:
                package_path = export_model_library_format(
                    executor_factory, package_path)
            else:
                raise Exception(
                    "micro tvm is not enabled. Set USE_MICRO to ON in config.cmake"
                )

        return package_path
Ejemplo n.º 14
0
def test_export_multiple_operator_model_library_format():
    target = tvm.target.target.micro("host")
    with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
        A = tvm.te.placeholder((2,), dtype="int8")
        B = tvm.te.placeholder((1,), dtype="int8")
        C = tvm.te.compute(A.shape, lambda i: A[i] + B[0], name="C")
        sched = tvm.te.create_schedule(C.op)
        mod = tvm.build(
            sched,
            [A, B, C],
            tvm.target.Target(target, target),
            runtime=Runtime("crt", {"system-lib": True}),
            name="add",
        )

    temp_dir = utils.tempdir()
    mlf_tar_path = temp_dir.relpath("lib.tar")

    with pytest.raises(RuntimeError) as exc:
        micro.export_model_library_format([mod, mod], mlf_tar_path)

        assert str(exc.exception) == ("Multiple operator is not supported.")
Ejemplo n.º 15
0
def test_packed_global_variables():
    """Check packed global variables in codegen output."""
    dtype = "float32"
    ishape = (1, 32, 14, 14)
    wshape = (32, 32, 3, 3)
    interface_api = "packed"
    use_unpacked_api = False

    data0 = relay.var("data", shape=ishape, dtype=dtype)
    weight0 = relay.var("weight", shape=wshape, dtype=dtype)
    out = relay.nn.conv2d(data0,
                          weight0,
                          kernel_size=(3, 3),
                          padding=(1, 1),
                          groups=1)
    main_f = relay.Function([data0, weight0], out)
    mod = tvm.IRModule()
    mod["main"] = main_f
    mod = transform.InferType()(mod)

    i_data = np.random.uniform(0, 1, ishape).astype(dtype)
    w1_data = np.random.uniform(0, 1, wshape).astype(dtype)

    inputs = OrderedDict([("data", i_data), ("weight", w1_data)])

    output_list = generate_ref_data(mod, inputs)
    compiled_models_list = compile_models(
        models=AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
        interface_api=interface_api,
        use_unpacked_api=use_unpacked_api,
        workspace_byte_alignment=8,
        enable_op_fusion=True,
        pass_config=AOT_DEFAULT_RUNNER.pass_config,
        use_runtime_executor=True,
        target=tvm.target.Target("c"),
    )
    compiled_model = compiled_models_list[0]

    tmp_path = utils.tempdir()
    base_path = tmp_path.temp_dir

    model = compiled_model.model
    tar_file = os.path.join(base_path, f"{model.name}.tar")
    export_model_library_format(compiled_model.executor_factory, tar_file)
    t = tarfile.open(tar_file)
    t.extractall(base_path)

    file_list = []
    for path in (pathlib.Path(base_path) / "codegen" / "host" /
                 "src").iterdir():
        if path.is_file():
            file_list.append(path)
    assert len(file_list) > 0

    for path in file_list:
        with open(path, "r") as lib_f:
            lib1 = lib_f.readlines()

        tvmgen_names = []
        tvmgen_funcs = []
        for line in lib1:
            for item in line.split(" "):
                # Find all names starting with tvmgen_default
                if item.startswith("tvmgen_default"):
                    # Collect any name starting with tvmgen_default
                    tvmgen_names.append(item)
                    # Collect all functions starting with tvmgen_default
                    tvmgen_funcs += re.findall(r"(?<=).*(?=\()", item)

        # Check if any function name has a packed variable name in all items that start with tvmgen_default
        for func in tvmgen_funcs:
            assert f"{func}_packed" not in tvmgen_names
Ejemplo n.º 16
0
def run_and_check(
    models: List[AOTCompiledTestModel],
    runner: AOTTestRunner,
    interface_api: str,
    debug_calculated_workspaces=False,
    workspace_byte_alignment=8,
    data_linkage: AOTDataLinkage = None,
    test_dir: str = None,
    verbose: bool = False,
):
    """
    This method uses the original test data and compiled runtime.Modules
    to run in the test runner to verify the results.
    """

    base_path = test_dir
    if test_dir is None:
        tmp_path = utils.tempdir()
        tmp_dir = tmp_path.temp_dir
        base_path = os.path.join(tmp_dir, "test")

    cflags = f"-DTVM_RUNTIME_ALLOC_ALIGNMENT_BYTES={workspace_byte_alignment} "
    # The calculated workspaces will not account for stack allocator tags used for debugging
    if debug_calculated_workspaces:
        cflags += "-DTVM_CRT_STACK_ALLOCATOR_ENABLE_LIFO_CHECK "

    base_path = os.path.abspath(base_path)
    build_path = os.path.join(base_path, "build")
    os.makedirs(build_path, exist_ok=True)

    include_path = os.path.join(base_path, "include")
    os.mkdir(include_path)
    crt_root = tvm.micro.get_standalone_crt_dir()
    shutil.copy2(
        os.path.join(crt_root, "template", "crt_config-template.h"),
        os.path.join(include_path, "crt_config.h"),
    )

    workspace_bytes = 0
    for compiled_model in models:
        model = compiled_model.model
        tar_file = os.path.join(base_path, f"{model.name}.tar")
        export_model_library_format(compiled_model.executor_factory, tar_file)
        t = tarfile.open(tar_file)
        t.extractall(base_path)

        workspace_bytes = model.extra_memory_in_bytes
        use_usmp = runner.pass_config.get("tir.usmp.enable", False)
        if interface_api == "packed" and not use_usmp:
            workspace_bytes += mlf_extract_workspace_size_bytes(tar_file)

        for key in model.inputs:
            sanitized_tensor_name = re.sub(r"\W", "_", key)
            create_header_file(
                f'{mangle_name(model.name, "input_data")}_{sanitized_tensor_name}',
                model.inputs[key],
                include_path,
                data_linkage,
            )

        for key in model.outputs:
            sanitized_tensor_name = re.sub(r"\W", "_", key)
            create_header_file(
                f'{mangle_name(model.name, "output_data")}_{sanitized_tensor_name}',
                np.zeros(model.outputs[key].shape, model.outputs[key].dtype),
                include_path,
                data_linkage,
            )
            create_header_file(
                f'{mangle_name(model.name, "expected_output_data")}_{sanitized_tensor_name}',
                model.outputs[key],
                include_path,
                data_linkage,
            )

    create_main(
        "test.c",
        models,
        build_path,
        runner.includes,
        runner.prologue,
        runner.epilogue,
        data_linkage,
        interface_api,
        workspace_bytes,
    )

    # Verify that compiles fine
    file_dir = os.path.dirname(os.path.abspath(__file__))
    codegen_path = os.path.join(base_path, "codegen")
    makefile = os.path.join(file_dir, f"{runner.makefile}.mk")
    fvp_dir = "/opt/arm/FVP_Corstone_SSE-300/models/Linux64_GCC-6.4/"
    # TODO(@grant-arm): Remove once ci_cpu docker image has been updated to FVP_Corstone_SSE
    if not os.path.isdir(fvp_dir):
        fvp_dir = "/opt/arm/FVP_Corstone_SSE-300_Ethos-U55/models/Linux64_GCC-6.4/"
    custom_params = " ".join(
        [f" {param}='{value}'" for param, value in runner.parameters.items()])
    make_command = (
        f"make -f {makefile} build_dir={build_path}" + f" CFLAGS='{cflags}'" +
        f" TVM_ROOT={file_dir}/../../../.." + f" AOT_TEST_ROOT={file_dir}" +
        f" CODEGEN_ROOT={codegen_path}" +
        f" STANDALONE_CRT_DIR={tvm.micro.get_standalone_crt_dir()}" +
        f" FVP_DIR={fvp_dir}" + custom_params)

    compile_log_path = os.path.join(build_path, "test_compile.log")
    compile_command = f"{make_command} aot_test_runner"
    if verbose:
        print("Compile command:\n", compile_command)
    ret = subprocess_log_output(compile_command, ".", compile_log_path)
    assert ret == 0

    # Verify that runs fine
    run_log_path = os.path.join(build_path, "test_run.log")
    run_command = f"{make_command} run"
    if verbose:
        print("Run command:\n", run_command)
    ret = subprocess_log_output(run_command, build_path, run_log_path)
    assert ret == 0
    with open(run_log_path) as run_log:
        assert AOT_SUCCESS_TOKEN in run_log.read()
Ejemplo n.º 17
0
def test_export_model_library_format_llvm():
    with utils.TempDirectory.set_keep_for_debug(True):
        target = tvm.target.target.micro("host")
        assert str(target)[:2] == "c "
        target = tvm.target.Target("llvm " + str(target)[2:])
        with tvm.transform.PassContext(opt_level=3):
            relay_mod = tvm.parser.fromtext(
                """
            #[version = "0.0.5"]
            def @main(%a : Tensor[(1, 2), uint8], %b : Tensor[(1, 2), float32], %c : Tensor[(1, 2), float32]) {
            %0 = cast(%a, dtype="float32") + %b * %c;
            %0
            }"""
            )
            factory = tvm.relay.build(
                relay_mod,
                target,
                runtime=Runtime("crt", {"system-lib": True}),
                mod_name="add",
                params={"c": np.array([[2.0, 4.0]], dtype="float32")},
            )

        temp_dir = utils.tempdir()
        mlf_tar_path = temp_dir.relpath("lib.tar")

        micro.export_model_library_format(factory, mlf_tar_path)
        tf = tarfile.open(mlf_tar_path)

        extract_dir = temp_dir.relpath("extract")
        os.mkdir(extract_dir)
        tf.extractall(extract_dir)

        with open(os.path.join(extract_dir, "metadata.json")) as json_f:
            metadata = json.load(json_f)
            module_name = factory.libmod_name
            assert metadata["version"] == _GENERATED_VERSION
            assert metadata["modules"][module_name]["model_name"] == "add"
            export_datetime = datetime.datetime.strptime(
                metadata["modules"][module_name]["export_datetime"], "%Y-%m-%d %H:%M:%SZ"
            )
            assert (datetime.datetime.now() - export_datetime) < datetime.timedelta(seconds=60 * 5)
            assert metadata["modules"][module_name]["target"] == [str(target)]
            assert metadata["modules"][module_name]["memory"]["sids"] == [
                {"storage_id": 0, "size_bytes": 2, "input_binding": "a"},
                {"storage_id": 1, "size_bytes": 8, "input_binding": "b"},
                {"storage_id": 2, "size_bytes": 8, "input_binding": "p0"},
                {"storage_id": 3, "size_bytes": 8},
            ]
            assert metadata["modules"][module_name]["memory"]["functions"]["main"] == [
                {
                    "constants_size_bytes": 8,
                    "device": 1,
                    "io_size_bytes": 18,
                    "workspace_size_bytes": 0,
                }
            ]
            assert metadata["modules"][module_name]["memory"]["functions"]["operator_functions"][0][
                "workspace"
            ] == [{"device": 1, "workspace_size_bytes": 0}]
            assert (
                "fused_cast_multiply_add"
                in metadata["modules"][module_name]["memory"]["functions"]["operator_functions"][0][
                    "function_name"
                ]
            )

        assert os.path.exists(os.path.join(extract_dir, "codegen", "host", "lib", "add_lib0.o"))

        validate_graph_json(extract_dir, factory)

        with open(os.path.join(extract_dir, "src", f"{module_name}.relay")) as relay_f:
            assert relay_f.read() == str(relay_mod)

        with open(os.path.join(extract_dir, "parameters", "add.params"), "rb") as params_f:
            params = tvm.relay.load_param_dict(params_f.read())
            assert "p0" in params
Ejemplo n.º 18
0
    def run_and_check_body(base_path):
        cflags = f"-DTVM_RUNTIME_ALLOC_ALIGNMENT_BYTES={workspace_byte_alignment} "
        # The calculated workspaces will not account for stack allocator tags used for debugging
        if debug_calculated_workspaces:
            cflags += "-DTVM_CRT_STACK_ALLOCATOR_ENABLE_LIFO_CHECK "

        base_path = os.path.abspath(base_path)
        build_path = os.path.join(base_path, "build")
        os.makedirs(build_path, exist_ok=True)

        include_path = os.path.join(base_path, "include")
        os.mkdir(include_path)
        crt_root = tvm.micro.get_standalone_crt_dir()
        shutil.copy2(
            os.path.join(crt_root, "template", "crt_config-template.h"),
            os.path.join(include_path, "crt_config.h"),
        )

        workspace_bytes = 0
        for compiled_model in models:
            model = compiled_model.model
            tar_file = os.path.join(base_path, f"{model.name}.tar")
            export_model_library_format(compiled_model.executor_factory,
                                        tar_file)
            t = tarfile.open(tar_file)
            t.extractall(base_path)

            # Interface C APIs does not need compiler generated
            # workspace to generate the test application, because
            # workspace size is codegen'd as a macro to
            # tvmgen_<model_name>.h.
            if interface_api != "c":
                workspace_bytes += mlf_extract_workspace_size_bytes(tar_file)

            workspace_bytes += model.extra_memory_in_bytes
            for key in model.inputs:
                sanitized_tensor_name = re.sub(r"\W", "_", key)
                create_header_file(
                    f'{mangle_name(model.name, "input_data")}_{sanitized_tensor_name}',
                    model.inputs[key],
                    include_path,
                    data_linkage,
                )

            for key in model.outputs:
                sanitized_tensor_name = re.sub(r"\W", "_", key)
                create_header_file(
                    f'{mangle_name(model.name, "output_data")}_{sanitized_tensor_name}',
                    np.zeros(model.outputs[key].shape,
                             model.outputs[key].dtype),
                    include_path,
                    data_linkage,
                )
                create_header_file(
                    f'{mangle_name(model.name, "expected_output_data")}_{sanitized_tensor_name}',
                    model.outputs[key],
                    include_path,
                    data_linkage,
                )

        use_usmp = runner.pass_config.get("tir.usmp.enable", False)
        # We only need the stack allocator if USMP is not used
        use_stack_allocator = not use_usmp

        create_main(
            "test.c",
            models,
            build_path,
            runner.includes,
            runner.prologue,
            runner.epilogue,
            data_linkage,
            interface_api,
            workspace_bytes,
            use_stack_allocator,
        )

        # Verify that compiles fine
        file_dir = os.path.dirname(os.path.abspath(__file__))
        codegen_path = os.path.join(base_path, "codegen")
        makefile = os.path.join(file_dir, f"{runner.makefile}.mk")
        fvp_dir = "/opt/arm/FVP_Corstone_SSE-300/models/Linux64_GCC-6.4/"
        # TODO(@grant-arm): Remove once ci_cpu docker image has been updated to FVP_Corstone_SSE
        if not os.path.isdir(fvp_dir):
            fvp_dir = "/opt/arm/FVP_Corstone_SSE-300_Ethos-U55/models/Linux64_GCC-6.4/"
        custom_params = " ".join([
            f" {param}='{value}'"
            for param, value in runner.parameters.items()
        ])
        make_command = (
            f"make -f {makefile} build_dir={build_path}" +
            f" CFLAGS='{cflags}'" + f" TVM_ROOT={file_dir}/../../../.." +
            f" AOT_TEST_ROOT={file_dir}" + f" CODEGEN_ROOT={codegen_path}" +
            f" STANDALONE_CRT_DIR={tvm.micro.get_standalone_crt_dir()}" +
            f" FVP_DIR={fvp_dir}" + custom_params)

        compile_log_path = os.path.join(build_path, "test_compile.log")
        compile_command = f"{make_command} aot_test_runner"
        if verbose:
            print("Compile command:\n", compile_command)
        subprocess_check_log_output(compile_command, ".", compile_log_path)

        # Verify that runs fine
        run_log_path = os.path.join(build_path, "test_run.log")
        run_command = f"{make_command} run"
        if verbose:
            print("Run command:\n", run_command)

        # TODO(lhutton1) This is a quick and dirty work around to help temporarily reduce
        # the flakyness of the tests. Will remove once #10300 and #10314 are resolved.
        try:
            subprocess_check_log_output(run_command, build_path, run_log_path)
        except RuntimeError as err:
            print("Failed to run the module, having a second attempt...",
                  file=sys.stderr)
            print(err, file=sys.stderr)
            subprocess_check_log_output(run_command, build_path, run_log_path)

        with open(run_log_path) as run_log:
            assert AOT_SUCCESS_TOKEN in run_log.read()
Ejemplo n.º 19
0
def run_and_check(
    models: List[AOTCompiledTestModel],
    runner: AOTTestRunner,
    interface_api: str,
    debug_calculated_workspaces=False,
    workspace_byte_alignment=8,
    data_linkage: AOTDataLinkage = None,
):
    """
    This method uses the original test data and compiled runtime.Modules
    to run in the test runner to verify the results.
    """

    tmp_path = utils.tempdir()
    tmp_dir = tmp_path.temp_dir

    cflags = f"-DTVM_RUNTIME_ALLOC_ALIGNMENT_BYTES={workspace_byte_alignment} "
    # The calculated workspaces will not account for stack allocator tags used for debugging
    if debug_calculated_workspaces:
        cflags += "-DTVM_CRT_STACK_ALLOCATOR_ENABLE_LIFO_CHECK "

    base_path = os.path.join(tmp_dir, "test")
    build_path = os.path.join(base_path, "build")
    os.makedirs(build_path, exist_ok=True)

    include_path = os.path.join(base_path, "include")
    os.mkdir(include_path)
    crt_root = tvm.micro.get_standalone_crt_dir()
    shutil.copy2(
        os.path.join(crt_root, "template", "crt_config-template.h"),
        os.path.join(include_path, "crt_config.h"),
    )

    workspace_bytes = 0
    for compiled_model in models:
        model = compiled_model.model
        tar_file = os.path.join(base_path, f"{model.name}.tar")
        export_model_library_format(compiled_model.executor_factory, tar_file)
        t = tarfile.open(tar_file)
        t.extractall(base_path)

        workspace_bytes += model.extra_memory_in_bytes
        workspace_bytes += extract_main_workspace_size_bytes(base_path)

        for key in model.inputs:
            sanitized_tensor_name = re.sub(r"\W", "_", key)
            create_header_file(
                f'{mangle_name(model.name, "input_data")}_{sanitized_tensor_name}',
                model.inputs[key],
                include_path,
                data_linkage,
            )

        for i in range(len(model.outputs)):
            create_header_file(
                (f'{mangle_name(model.name,"output_data")}{i}'),
                np.zeros(model.outputs[i].shape, model.outputs[i].dtype),
                include_path,
                data_linkage,
            )
            create_header_file(
                (f'{mangle_name(model.name, "expected_output_data")}{i}'),
                model.outputs[i],
                include_path,
                data_linkage,
            )

    create_main(
        "test.c",
        [compiled_model.model for compiled_model in models],
        build_path,
        runner.includes,
        runner.prologue,
        data_linkage,
        interface_api,
        workspace_bytes,
    )

    # Verify that compiles fine
    file_dir = os.path.dirname(os.path.abspath(__file__))
    codegen_path = os.path.join(base_path, "codegen")
    makefile = os.path.join(file_dir, f"{runner.makefile}.mk")
    custom_params = " ".join([f" {param}='{value}'" for param, value in runner.parameters.items()])
    make_command = (
        f"make -f {makefile} build_dir={build_path}"
        + f" CFLAGS='{cflags}'"
        + f" TVM_ROOT={file_dir}/../../../.."
        + f" AOT_TEST_ROOT={file_dir}"
        + f" CODEGEN_ROOT={codegen_path}"
        + f" STANDALONE_CRT_DIR={tvm.micro.get_standalone_crt_dir()}"
        + custom_params
    )

    compile_log_path = os.path.join(build_path, "test_compile.log")
    compile_command = f"{make_command} aot_test_runner"
    ret = subprocess_log_output(compile_command, ".", compile_log_path)
    assert ret == 0

    # Verify that runs fine
    run_log_path = os.path.join(build_path, "test_run.log")
    run_command = f"{make_command} run"
    ret = subprocess_log_output(run_command, build_path, run_log_path)
    assert ret == 0

    with open(run_log_path) as run_log:
        assert AOT_SUCCESS_TOKEN in run_log.read()
Ejemplo n.º 20
0
def compile_and_run(mod,
                    input_list,
                    output_list,
                    use_calculated_workspaces,
                    params=None,
                    workspace_byte_alignment=8):
    """
    This method verifies the generated source
    """
    target = f"c -runtime=c --link-params --executor=aot --workspace-byte-alignment={workspace_byte_alignment}"
    cflags = f"-DTVM_RUNTIME_ALLOC_ALIGNMENT_BYTES={workspace_byte_alignment} "

    # The calculated workspaces will not account for stack allocator tags used for debugging
    if not use_calculated_workspaces:
        cflags += "-DTVM_CRT_STACK_ALLOCATOR_ENABLE_LIFO_CHECK "

    with tvm.transform.PassContext(opt_level=3,
                                   config={"tir.disable_vectorize": True}):
        lib = tvm.relay.build(mod, target, target_host=target, params=params)

    tmp_path = utils.tempdir()
    tmp_dir = tmp_path.temp_dir

    base_path = os.path.join(tmp_dir, "test")
    build_path = os.path.join(base_path, "build")
    os.makedirs(build_path, exist_ok=True)

    tar_file = os.path.join(base_path, "test.tar")
    export_model_library_format(lib, tar_file)
    t = tarfile.open(tar_file)
    t.extractall(base_path)
    if use_calculated_workspaces:
        workspace_bytes = extract_main_workspace_sizebytes(base_path)
    else:
        workspace_bytes = 16384 * 1024

    for i in range(len(input_list)):
        create_header_file((f"input_data{i}"), input_list[i], build_path)

    for i in range(len(output_list)):
        create_header_file(
            (f"output_data{i}"),
            np.zeros(output_list[i].shape, output_list[i].dtype),
            build_path,
        )
        create_header_file((f"expected_output_data{i}"), output_list[i],
                           build_path)

    create_main("test.c", input_list, output_list, build_path, workspace_bytes)

    # Verify that compiles fine
    file_dir = os.path.dirname(os.path.abspath(__file__))
    makefile = os.path.join(file_dir, "aot_test.mk")
    make_cmd = (f"make CFLAGS='{cflags}' -f {makefile} build_dir=" +
                build_path + f" TVM_ROOT={file_dir}/../../../..")

    compile_log_path = os.path.join(build_path, "test_compile.log")
    ret = subprocess_with_stdout_and_log(make_cmd, ".", compile_log_path,
                                         False)
    assert ret == 0

    # Verify that runs fine
    run_log_path = os.path.join(build_path, "test_run.log")
    ret = subprocess_with_stdout_and_log("./aot_test_runner", build_path,
                                         run_log_path, False)
    assert ret == 0
Ejemplo n.º 21
0
def compile_and_run(
    models: Union[List[AOTTestModel], AOTTestModel],
    runner: AOTTestRunner,
    interface_api,
    use_unpacked_api,
    debug_calculated_workspaces=False,
    workspace_byte_alignment=8,
    enable_op_fusion=True,
):
    """
    This method verifies the generated source
    """
    base_target = "c -runtime=c --link-params --executor=aot"
    extra_target = f"--workspace-byte-alignment={workspace_byte_alignment} --interface-api={interface_api} --unpacked-api={int(use_unpacked_api)}"
    target = f"{base_target} {extra_target}"
    cflags = f"-DTVM_RUNTIME_ALLOC_ALIGNMENT_BYTES={workspace_byte_alignment} "

    if not isinstance(models, list):
        models = [models]

    # The calculated workspaces will not account for stack allocator tags used for debugging
    if debug_calculated_workspaces:
        cflags += "-DTVM_CRT_STACK_ALLOCATOR_ENABLE_LIFO_CHECK "

    config = {"tir.disable_vectorize": True}
    if not enable_op_fusion:
        config["relay.FuseOps.max_depth"] = 1

    tmp_path = utils.tempdir()
    tmp_dir = tmp_path.temp_dir

    base_path = os.path.join(tmp_dir, "test")
    build_path = os.path.join(base_path, "build")
    os.makedirs(build_path, exist_ok=True)

    include_path = os.path.join(base_path, "include")
    os.mkdir(include_path)
    crt_root = tvm.micro.get_standalone_crt_dir()
    shutil.copy2(
        os.path.join(crt_root, "template", "crt_config-template.h"),
        os.path.join(include_path, "crt_config.h"),
    )

    workspace_bytes = 0
    for model in models:
        with tvm.transform.PassContext(opt_level=3, config=config):
            lib = tvm.relay.build(
                model.module,
                target,
                target_host=target,
                params=model.params,
                mod_name=model.name,
            )

        tar_file = os.path.join(base_path, f"{model.name}.tar")
        export_model_library_format(lib, tar_file)
        t = tarfile.open(tar_file)
        t.extractall(base_path)

        workspace_bytes += extract_main_workspace_size_bytes(base_path)

        for key in model.inputs:
            sanitized_tensor_name = re.sub(r"\W", "_", key)
            create_header_file(
                f'{mangle_name(model.name, "input_data")}_{sanitized_tensor_name}',
                model.inputs[key],
                include_path,
            )

        for i in range(len(model.outputs)):
            create_header_file(
                (f'{mangle_name(model.name,"output_data")}{i}'),
                np.zeros(model.outputs[i].shape, model.outputs[i].dtype),
                include_path,
            )
            create_header_file(
                (f'{mangle_name(model.name, "expected_output_data")}{i}'),
                model.outputs[i],
                include_path,
            )

    create_main(
        "test.c",
        models,
        build_path,
        runner.includes,
        runner.prologue,
        interface_api,
        workspace_bytes,
    )

    # Verify that compiles fine
    file_dir = os.path.dirname(os.path.abspath(__file__))
    codegen_path = os.path.join(base_path, "codegen")
    makefile = os.path.join(file_dir, f"{runner.makefile}.mk")
    custom_params = " ".join(
        [f" {param}='{value}'" for param, value in runner.parameters.items()])
    make_command = (
        f"make -f {makefile} build_dir={build_path}" + f" CFLAGS='{cflags}'" +
        f" TVM_ROOT={file_dir}/../../../.." + f" AOT_TEST_ROOT={file_dir}" +
        f" CODEGEN_ROOT={codegen_path}" +
        f" STANDALONE_CRT_DIR={tvm.micro.get_standalone_crt_dir()}" +
        custom_params)

    compile_log_path = os.path.join(build_path, "test_compile.log")
    compile_command = f"{make_command} aot_test_runner"
    ret = subprocess_log_output(compile_command, ".", compile_log_path)
    assert ret == 0

    # Verify that runs fine
    run_log_path = os.path.join(build_path, "test_run.log")
    run_command = f"{make_command} run"
    ret = subprocess_log_output(run_command, build_path, run_log_path)
    assert ret == 0

    with open(run_log_path) as run_log:
        assert AOT_SUCCESS_TOKEN in run_log.read()
Ejemplo n.º 22
0
def compile_and_run(
    mod,
    inputs,
    output_list,
    interface_api,
    use_unpacked_api,
    use_calculated_workspaces,
    params=None,
    workspace_byte_alignment=8,
    mod_name="default",
    enable_op_fusion=True,
):
    """
    This method verifies the generated source
    """
    base_target = "c -runtime=c --link-params --executor=aot"
    extra_target = f"--workspace-byte-alignment={workspace_byte_alignment} --interface-api={interface_api} --unpacked-api={int(use_unpacked_api)}"
    target = f"{base_target} {extra_target}"
    cflags = f"-DTVM_RUNTIME_ALLOC_ALIGNMENT_BYTES={workspace_byte_alignment} "

    # The calculated workspaces will not account for stack allocator tags used for debugging
    if not use_calculated_workspaces:
        cflags += "-DTVM_CRT_STACK_ALLOCATOR_ENABLE_LIFO_CHECK "

    config = {"tir.disable_vectorize": True}
    if not enable_op_fusion:
        config["relay.FuseOps.max_depth"] = 1

    with tvm.transform.PassContext(opt_level=3, config=config):
        lib = tvm.relay.build(mod,
                              target,
                              target_host=target,
                              params=params,
                              mod_name=mod_name)

    tmp_path = utils.tempdir()
    tmp_dir = tmp_path.temp_dir

    base_path = os.path.join(tmp_dir, "test")
    build_path = os.path.join(base_path, "build")
    os.makedirs(build_path, exist_ok=True)

    tar_file = os.path.join(base_path, "test.tar")
    export_model_library_format(lib, tar_file)
    t = tarfile.open(tar_file)
    t.extractall(base_path)
    if use_calculated_workspaces:
        workspace_bytes = extract_main_workspace_sizebytes(base_path)
    else:
        workspace_bytes = 16384 * 1024

    include_path = os.path.join(base_path, "include")
    os.mkdir(include_path)
    crt_root = tvm.micro.get_standalone_crt_dir()
    shutil.copy2(
        os.path.join(crt_root, "template", "crt_config-template.h"),
        os.path.join(include_path, "crt_config.h"),
    )

    for key in inputs:
        create_header_file(
            f'{mangle_name(mod_name, "input_data")}_{key}',
            inputs[key],
            os.path.join(base_path, "include"),
        )

    for i in range(len(output_list)):
        create_header_file(
            f'{mangle_name(mod_name,"output_data")}{i}',
            np.zeros(output_list[i].shape, output_list[i].dtype),
            os.path.join(base_path, "include"),
        )
        create_header_file(
            f'{mangle_name(mod_name, "expected_output_data")}{i}',
            output_list[i],
            os.path.join(base_path, "include"),
        )

    create_main(
        "test.c",
        {mod_name: inputs},
        {mod_name: output_list},
        build_path,
        interface_api,
        workspace_bytes,
    )

    # Verify that compiles fine
    file_dir = os.path.dirname(os.path.abspath(__file__))
    codegen_path = os.path.join(base_path, "codegen")
    makefile = os.path.join(file_dir, "aot_test.mk")
    make_cmd = (f"make CFLAGS='{cflags}' -f {makefile} build_dir=" +
                build_path + f" TVM_ROOT={file_dir}/../../../.." +
                f" CODEGEN_ROOT={codegen_path}" +
                f" STANDALONE_CRT_DIR={tvm.micro.get_standalone_crt_dir()}")

    compile_log_path = os.path.join(build_path, "test_compile.log")
    ret = subprocess_log_output(make_cmd, ".", compile_log_path)
    assert ret == 0

    # Verify that runs fine
    run_log_path = os.path.join(build_path, "test_run.log")
    ret = subprocess_log_output("./aot_test_runner", build_path, run_log_path)
    assert ret == 0
Ejemplo n.º 23
0
def compile_and_run_multiple_models(
    mod_map,
    input_list_map,
    output_list_map,
    interface_api,
    use_unpacked_api,
    use_calculated_workspaces,
    param_map,
    workspace_byte_alignment=8,
):
    """
    This method verifies the generated source
    """
    base_target = "c -runtime=c --link-params --executor=aot"
    extra_target = f"--workspace-byte-alignment={workspace_byte_alignment} --interface-api={interface_api} --unpacked-api={int(use_unpacked_api)}"
    target = f"{base_target} {extra_target}"
    tmp_path = utils.tempdir()
    tmp_dir = tmp_path.temp_dir

    base_path = os.path.join(tmp_dir, "test")
    build_path = os.path.join(base_path, "build")
    os.makedirs(build_path, exist_ok=True)

    include_path = os.path.join(base_path, "include")
    os.mkdir(include_path)
    crt_root = tvm.micro.get_standalone_crt_dir()
    shutil.copy2(
        os.path.join(crt_root, "template", "crt_config-template.h"),
        os.path.join(include_path, "crt_config.h"),
    )

    for mod_name, mod in mod_map.items():

        with tvm.transform.PassContext(opt_level=3,
                                       config={"tir.disable_vectorize": True}):
            lib = tvm.relay.build(mod,
                                  target,
                                  target_host=target,
                                  params=param_map[mod_name],
                                  mod_name=mod_name)

        tar_file = os.path.join(base_path, "test.tar")
        export_model_library_format(lib, tar_file)
        t = tarfile.open(tar_file)
        t.extractall(base_path)

        input_list = input_list_map[mod_name]
        output_list = output_list_map[mod_name]

        for key in input_list:
            create_header_file((f'{mangle_name(mod_name,"input_data")}_{key}'),
                               input_list[key], build_path)

        for i in range(len(output_list_map[mod_name])):
            create_header_file(
                (f'{mangle_name(mod_name,"output_data")}{i}'),
                np.zeros(output_list[i].shape, output_list[i].dtype),
                build_path,
            )
            create_header_file(
                (f'{mangle_name(mod_name,"expected_output_data")}{i}'),
                output_list[i], build_path)

    create_main(
        "test.c",
        input_list_map,
        output_list_map,
        build_path,
        interface_api,
        workspace_bytes=16384 * 1024,
    )

    # Verify that compiles fine
    file_dir = os.path.dirname(os.path.abspath(__file__))
    codegen_path = os.path.join(base_path, "codegen")
    makefile = os.path.join(file_dir, "aot_test.mk")
    make_cmd = (f"make -f {makefile} build_dir=" + build_path +
                f" TVM_ROOT={file_dir}/../../../.." +
                f" CODEGEN_ROOT={codegen_path}" +
                f" STANDALONE_CRT_DIR={tvm.micro.get_standalone_crt_dir()}")

    compile_log_path = os.path.join(build_path, "test_compile.log")
    ret = subprocess_log_output(make_cmd, ".", compile_log_path)
    assert ret == 0

    # Verify that runs fine
    run_log_path = os.path.join(build_path, "test_run.log")
    ret = subprocess_log_output("./aot_test_runner", build_path, run_log_path)
    assert ret == 0
Ejemplo n.º 24
0
def test_export_model_library_format_c(target):
    executor, _target = target
    with utils.TempDirectory.set_keep_for_debug(True):
        with tvm.transform.PassContext(opt_level=3,
                                       config={"tir.disable_vectorize": True}):
            relay_mod = tvm.parser.fromtext("""
            #[version = "0.0.5"]
            def @main(%a : Tensor[(1, 2), uint8], %b : Tensor[(1, 2), float32], %c : Tensor[(1, 2), float32]) {
            %0 = cast(%a, dtype="float32") + %b * %c;
            %0
            }""")
            factory = tvm.relay.build(
                relay_mod,
                _target,
                target_host=_target,
                mod_name="add",
                params={"c": numpy.array([[2.0, 4.0]], dtype="float32")},
            )

        temp_dir = utils.tempdir()
        mlf_tar_path = temp_dir.relpath("lib.tar")
        import tvm.micro as micro

        micro.export_model_library_format(factory, mlf_tar_path)
        tf = tarfile.open(mlf_tar_path)

        extract_dir = temp_dir.relpath("extract")
        os.mkdir(extract_dir)
        tf.extractall(extract_dir)

        with open(os.path.join(extract_dir, "metadata.json")) as json_f:
            metadata = json.load(json_f)
            assert metadata["version"] == 3
            assert metadata["model_name"] == "add"
            export_datetime = datetime.datetime.strptime(
                metadata["export_datetime"], "%Y-%m-%d %H:%M:%SZ")
            assert (datetime.datetime.now() -
                    export_datetime) < datetime.timedelta(seconds=60 * 5)
            assert metadata["target"] == {"1": str(_target)}
            if executor == "graph":
                assert metadata["memory"]["sids"] == [
                    {
                        "storage_id": 0,
                        "size_bytes": 2,
                        "input_binding": "a"
                    },
                    {
                        "storage_id": 1,
                        "size_bytes": 8,
                        "input_binding": "b"
                    },
                    {
                        "storage_id": 2,
                        "size_bytes": 8,
                        "input_binding": "p0"
                    },
                    {
                        "storage_id": 3,
                        "size_bytes": 8
                    },
                ]
            assert metadata["memory"]["functions"]["main"] == [{
                "constants_size_bytes":
                8,
                "device":
                1,
                "io_size_bytes":
                18,
                "workspace_size_bytes":
                0,
            }]
            assert metadata["memory"]["functions"]["operator_functions"][0][
                "workspace"] == [{
                    "device": 1,
                    "workspace_size_bytes": 0
                }]
            assert ("fused_cast_multiply_add" in metadata["memory"]
                    ["functions"]["operator_functions"][0]["function_name"])

        assert os.path.exists(
            os.path.join(extract_dir, "codegen", "host", "src", "add_lib0.c"))
        assert os.path.exists(
            os.path.join(extract_dir, "codegen", "host", "src", "add_lib1.c"))

        if executor == "graph":
            validate_graph_json(extract_dir, factory)

        with open(os.path.join(extract_dir, "relay.txt")) as relay_f:
            assert relay_f.read() == str(relay_mod)

        with open(os.path.join(extract_dir, "parameters", "add.params"),
                  "rb") as params_f:
            params = tvm.relay.load_param_dict(params_f.read())
            assert "p0" in params