コード例 #1
0
def primfunc_to_artifact(
        primfunc: tvm.tir.PrimFunc) -> util.CompilationArtifact:
    """
    This is the hook for python-based lowering of TIR PrimFunc
    that has undergone unified optimization to compilation
    artifact destined for the microNPU.

    Parameters
    ----------
    primfunc : tir.PrimFunc
        TIR PrimFunc that has undergone unified optimizations

    Returns
    -------
    CompilationArtifact
        This is a structure that holds the binary artifacts
        for the microNPU
    """
    symbol = str(primfunc.attrs["global_symbol"])
    const_dict = primfunc.attrs["ethos-u.constants"]
    tir_mod = tvm.IRModule()
    tir_mod[symbol] = primfunc

    const_dict_np = dict()
    for buffer_var in const_dict.keys():
        const_dict_np[buffer_var] = const_dict[buffer_var].numpy()

    cmms, encoded_constants, base_addresses = tir_to_cs_translator.translate(
        tir_mod, const_dict_np)
    return util.CompilationArtifact(symbol, cmms, encoded_constants,
                                    base_addresses)
コード例 #2
0
def _compile(ext_func):
    """
    This is the main wrapper that accepts an external
    relay function and runs all the passes to lower it down
    to command stream
    Parameters
    ----------
    ext_func : tvm.relay.function.Function
        The partitioned relay function
    Returns
    -------
    cs : str
        An hex string of the bytes of command stream
    encoded_constants : str
        An hex string of the bytes that includes concat'd
        encoded weights, encoded biases and scales.
    scratch_size : int
        The size of the scratch buffer needed.
    """
    mod = tvm.IRModule()
    mod["main"] = ext_func
    mod = LegalizeEthosU()(mod)
    mod = relay.transform.InferType()(mod)
    # We are currently using copy_constants scheduler In the long run,
    # this should be a single intelligent and a composite scheduler
    # that can perform scheduling based on user inputs such as
    # scratch memory size.
    tir_mod, params = lower_to_tir(mod["main"], copy_constants())
    cmms, encoded_constants, scratch_size = tir_to_cs_translator.translate(
        tir_mod, params)
    return cmms, encoded_constants, scratch_size
コード例 #3
0
ファイル: codegen.py プロジェクト: zotanika/incubator-tvm
def primfunc_to_artifact(
        primfunc: tvm.tir.PrimFunc) -> util.CompilationArtifact:
    """
    This is the hook for python-based lowering of TIR PrimFunc
    that has undergone unified optimization to compilation
    artifact destined for the microNPU.

    Parameters
    ----------
    primfunc : tir.PrimFunc
        TIR PrimFunc that has undergone unified optimizations

    Returns
    -------
    CompilationArtifact
        This is a structure that holds the binary artifacts
        for the microNPU
    """
    symbol = str(primfunc.attrs["global_symbol"])
    const_dict = primfunc.attrs["ethos-u.constants"]
    input_size = primfunc.attrs["ethos-u.input_size"]
    output_size = primfunc.attrs["ethos-u.output_size"]
    tir_mod = tvm.IRModule()
    tir_mod[symbol] = primfunc

    const_dict_with_int_keys = dict()
    for idx in const_dict.keys():
        const_dict_with_int_keys[int(idx)] = const_dict[idx].numpy()

    cmms, encoded_constants, scratch_size = tir_to_cs_translator.translate(
        tir_mod, const_dict_with_int_keys)
    return util.CompilationArtifact(cmms, encoded_constants, scratch_size,
                                    input_size, output_size, symbol)
コード例 #4
0
def test_constant_as_input():
    """Test to check that constants specified as inputs aren't
    interpreted as an encoded constant."""
    def get_graph():
        dtype = "uint8"
        ifm = relay.var("ifm", shape=(1, 16, 16, 32), dtype=dtype)
        conv1 = make_ethosu_conv2d(
            ifm,
            32,
            16,
            (1, 1),
            (0, 0),
            (1, 1),
            (1, 1),
        )
        scalar = relay.const(np.ones((1, 1, 1, 1), dtype=dtype), dtype=dtype)
        add1 = make_ethosu_binary_elementwise(conv1,
                                              scalar,
                                              ifm_channels=32,
                                              ifm2_channels=1,
                                              operator_type="ADD",
                                              ofm_dtype=dtype)
        func = relay.Function(relay.analysis.free_vars(add1), add1)
        func = run_opt_pass(func, relay.transform.InferType())
        return func

    tir_mod, params = _lower_to_tir(get_graph(), copy_constants())

    # Check tile address for the scalar constant input hasn't been
    # overwritten.
    extern_calls = tir_mod["main"].body.body.body.body.body
    binary_elementwise = extern_calls[-1].value
    args = binary_elementwise.args

    reason = "Tile address overwritten"
    assert args[26] == 0, reason
    assert args[27] == 0, reason
    assert args[28] == 0, reason

    # More generally, check compiles successfully to make sure
    # nothing else was overrwritten.
    # With Target Hooks the TIR module needs a target attached
    # and lowered via make unpacked API.
    tir_mod["main"] = tir_mod["main"].with_attr("target",
                                                tvm.target.Target("ethos-u"))
    tir_mod = tvm.tir.transform.MakeUnpackedAPI()(tir_mod)
    tir_to_cs_translator.translate(tir_mod, params)