Esempio n. 1
0
def _compile(ext_func):
    """
    This is the main wrapper that accepts an external
    relay function and runs all the passes to lower it down
    to command stream
    Parameters
    ----------
    ext_func : tvm.relay.function.Function
        The partitioned relay function
    Returns
    -------
    cs : str
        An hex string of the bytes of command stream
    encoded_constants : str
        An hex string of the bytes that includes concat'd
        encoded weights, encoded biases and scales.
    scratch_size : int
        The size of the scratch buffer needed.
    """
    mod = tvm.IRModule()
    mod["main"] = ext_func
    mod = LegalizeEthosU()(mod)
    mod = relay.transform.InferType()(mod)
    # We are currently using copy_constants scheduler In the long run,
    # this should be a single intelligent and a composite scheduler
    # that can perform scheduling based on user inputs such as
    # scratch memory size.
    tir_mod, params = lower_to_tir(mod["main"], copy_constants())
    cmms, encoded_constants, scratch_size = tir_to_cs_translator.translate(
        tir_mod, params)
    return cmms, encoded_constants, scratch_size
Esempio n. 2
0
def relay_to_tir(mod: tvm.ir.IRModule) -> tvm.ir.IRModule:
    """
    This is the hook for python-based lowering of a Relay module which lowers NPU
    external functions to TIR.

    Parameters
    ----------
    mod : tvm.ir.IRModule
        This is the Relay module.

    Returns
    -------
    mod : tvm.ir.IRModule
        The Relay module with scheduled NPU external functions.
    """
    mod = OutlineCompilerFunctions("ethos-u")(mod)
    mod = LegalizeEthosU()(mod)
    mod = LUTsOptimizer()(mod)
    mod = relay.transform.InferType()(mod)
    mod = IdentityOptimizer()(mod)
    mod = LayoutOptimizer()(mod)
    mod = relay.transform.InferType()(mod)

    device_contexts = {
        gv: "ethos-u"
        for gv, _ in filter(lambda x: util.is_npu_func(x[1]),
                            mod.functions.items())
    }
    mod = mod.with_attr("device_contexts", device_contexts)

    # Use the cascader if it is enabled for the U55 accelerator, otherwise use copy_constants
    # scheduler
    if util.is_cascader_enabled():
        if util.get_accelerator_config() == "ethos-u65-256":
            raise ValueError(
                "Cascading is not supported for the U65 accelerator")

        workspace_memory_pools = mod.attrs["workspace_memory_pools"]

        if not workspace_memory_pools:
            raise ValueError(
                "Workspace memory pool needs to be provided for the U55 cascader"
            )
        if len(workspace_memory_pools.pools) != 1:
            raise ValueError(
                "Exactly one workspace pool needs to be provided for the U55 cascader"
            )

        memory_pressure = _calculate_memory_pressure(mod)
        sram = extract_memory_info(workspace_memory_pools.pools[0],
                                   memory_pressure)
        tir_mod = LowerToTIR(
            _ethos_u55_cascader(sram, util.is_striping_enabled()))(mod)
    else:
        tir_mod = LowerToTIR(copy_constants())(mod)

    return tir_mod
Esempio n. 3
0
def relay_to_tir_func(ext_func: relay.Function) -> tvm.tir.PrimFunc:
    """
    This is the hook for python-based lowering of relay function
    that gets offloaded to the microNPU.

    Parameters
    ----------
    ext_func : relay.Function
        This is the partitioned relay function

    Returns
    -------
    primfunc : tir.PrimFunc
        This returns the scheduled PrimFunc
    """
    assert len(ext_func.params) == 1
    input_size = util.calculate_size_bytes(ext_func.params[0])
    output_size = util.calculate_size_bytes(ext_func.body)
    mod = tvm.IRModule()
    mod["main"] = ext_func
    mod = LegalizeEthosU()(mod)
    mod = LUTsOptimizer()(mod)
    mod = LayoutOptimizer()(mod)
    mod = relay.transform.InferType()(mod)
    # We are currently using copy_constants scheduler In the long run,
    # this should be a single intelligent and a composite scheduler
    # that can perform scheduling based on user inputs such as
    # scratch memory size.
    tir_mod, const_dict = lower_to_tir(mod["main"], copy_constants())

    for idx in const_dict.keys():
        const_dict[idx] = tvm.nd.array(const_dict[idx])

    primfunc = tir_mod["main"]
    primfunc = primfunc.with_attr("global_symbol",
                                  ext_func.attrs["global_symbol"])
    primfunc = primfunc.with_attr("ethos-u.constants", const_dict)
    primfunc = primfunc.with_attr("ethos-u.input_size", input_size)
    primfunc = primfunc.with_attr("ethos-u.output_size", output_size)
    return primfunc
Esempio n. 4
0
def relay_to_tir(mod: tvm.ir.IRModule) -> tvm.ir.IRModule:
    """
    This is the hook for python-based lowering of a Relay module which lowers NPU
    external functions to TIR.

    Parameters
    ----------
    mod : tvm.ir.IRModule
        This is the Relay module.

    Returns
    -------
    mod : tvm.ir.IRModule
        The Relay module with scheduled NPU external functions.
    """
    mod = OutlineCompilerFunctions("ethos-u")(mod)
    mod = LegalizeEthosU()(mod)
    mod = LUTsOptimizer()(mod)
    mod = relay.transform.InferType()(mod)
    mod = IdentityOptimizer()(mod)
    mod = LayoutOptimizer()(mod)
    mod = relay.transform.InferType()(mod)

    device_contexts = {
        gv: "ethos-u"
        for gv, _ in filter(lambda x: util.is_npu_func(x[1]),
                            mod.functions.items())
    }
    mod = mod.with_attr("device_contexts", device_contexts)

    # We are currently using copy_constants scheduler In the long run,
    # this should be a single intelligent and a composite scheduler
    # that can perform scheduling based on user inputs such as
    # scratch memory size.
    mod = LowerToTIR(copy_constants)(mod)

    return mod