Exemple #1
0
def test_workspace_add():
    """Test a module which uses a workspace to compute an intermediate value."""
    if not tvm.runtime.enabled("micro_dev"):
        return
    shape = (1024,)
    dtype = "float32"

    reset_gdbinit()

    # Construct TVM expression.
    tvm_shape = tvm.runtime.convert(shape)
    A = te.placeholder(tvm_shape, name="A", dtype=dtype)
    B = te.placeholder(tvm_shape, name="B", dtype=dtype)
    B = te.compute(A.shape, lambda *i: A(*i) + 1, name="B")
    C = te.compute(A.shape, lambda *i: B(*i) + 1, name="C")
    s = te.create_schedule(C.op)

    func_name = "fadd_two_workspace"
    c_mod = tvm.build(s, [A, C], target="c", name=func_name)

    with micro.Session(DEV_CONFIG_A) as sess:
        micro_mod = micro.create_micro_mod(c_mod, DEV_CONFIG_A)
        micro_func = micro_mod[func_name]
        ctx = tvm.micro_dev(0)
        a_np = np.random.uniform(size=shape).astype(dtype)
        a = tvm.nd.array(a_np, ctx)
        c = tvm.nd.array(np.zeros(shape, dtype=dtype), ctx)
        micro_func(a, c)

        # ensure input wasn't corrupted
        tvm.testing.assert_allclose(
                a.asnumpy(), a_np)
        # ensure output is correct
        tvm.testing.assert_allclose(
                c.asnumpy(), a.asnumpy() + 2.0)
Exemple #2
0
def relay_micro_build(func, dev_config, params=None):
    """Create a graph runtime module with a micro device context from a Relay function.

    Parameters
    ----------
    func : relay.Function
        function to compile

    dev_config : Dict[str, Any]
        MicroTVM config dict for the target device

    params : dict
        input parameters that do not change during inference

    Return
    ------
    mod : tvm.runtime.Module
        graph runtime module for the target device
    """
    with tvm.transform.PassContext(disabled_pass={'FuseOps'}, config={
        "tir.disable_vectorize": True
    }):
        graph, c_mod, params = relay.build(func, target=TARGET, params=params)
    micro_mod = micro.create_micro_mod(c_mod, dev_config)
    ctx = tvm.micro_dev(0)
    mod = graph_runtime.create(graph, micro_mod, ctx)
    mod.set_input(**params)
    return mod
Exemple #3
0
def test_add():
    """Test a module which performs addition."""
    if not tvm.module.enabled("micro_dev"):
        return
    shape = (1024,)
    dtype = "float32"

    # Construct TVM expression.
    tvm_shape = tvm.convert(shape)
    A = tvm.placeholder(tvm_shape, name="A", dtype=dtype)
    B = tvm.placeholder(tvm_shape, name="B", dtype=dtype)
    C = tvm.compute(A.shape, lambda *i: A(*i) + B(*i), name="C")
    s = tvm.create_schedule(C.op)

    func_name = "fadd"
    c_mod = tvm.build(s, [A, B, C], target="c", name=func_name)

    with micro.Session(DEV_CONFIG):
        micro_mod = create_micro_mod(c_mod, DEV_CONFIG)
        micro_func = micro_mod[func_name]
        ctx = tvm.micro_dev(0)
        a = tvm.nd.array(np.random.uniform(size=shape).astype(dtype), ctx)
        b = tvm.nd.array(np.random.uniform(size=shape).astype(dtype), ctx)
        c = tvm.nd.array(np.zeros(shape, dtype=dtype), ctx)
        micro_func(a, b, c)
        tvm.testing.assert_allclose(
                c.asnumpy(), a.asnumpy() + b.asnumpy())
def test_conv2d():
    if not tvm.runtime.enabled("micro_dev"):
        return

    from tvm.relay import create_executor
    from tvm.relay import transform

    dshape = (1, 4, 16, 16)
    dtype = 'int8'
    func_name = 'fused_nn_conv2d'

    reset_gdbinit()

    # Construct Relay program.
    x = relay.var("x", shape=dshape, dtype=dtype)
    conv_expr = relay.nn.conv2d(x,
                                relay.var("w"),
                                kernel_size=(3, 3),
                                padding=(1, 1),
                                channels=4)
    func = relay.Function(relay.analysis.free_vars(conv_expr), conv_expr)
    mod = tvm.IRModule.from_expr(func)
    mod = transform.InferType()(mod)

    x_shape = list(
        map(lambda x: x.value, mod['main'].params[0].checked_type.shape))
    w_shape = list(
        map(lambda x: x.value, mod['main'].params[1].checked_type.shape))
    out_shape = list(map(lambda x: x.value, mod['main'].ret_type.shape))

    with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
        graph, c_mod, params = relay.build(mod, target="c")

    with micro.Session(DEV_CONFIG_A):
        micro_mod = micro.create_micro_mod(c_mod, DEV_CONFIG_A)
        candidate_func_name = func_name
        for i in range(100):
            try:
                micro_func = micro_mod[candidate_func_name]
                break
            except tvm.TVMError as e:
                candidate_func_name = f'{func_name}_{i}'
        else:
            assert False
        ctx = tvm.micro_dev(0)

        x_data = tvm.nd.array(
            np.random.uniform(size=x_shape).astype(dtype), ctx)
        w_data = tvm.nd.array(
            np.random.uniform(size=w_shape).astype(dtype), ctx)
        result = tvm.nd.array(np.zeros(shape=out_shape, dtype=dtype), ctx)
        micro_func(x_data, w_data, result)

        out_data = np.zeros(out_shape, dtype=dtype)
        params = {'x': x_data.asnumpy(), 'w': w_data.asnumpy()}
        intrp = create_executor('debug')
        expected_result = intrp.evaluate(mod['main'])(x_data, w_data)

        tvm.testing.assert_allclose(result.asnumpy(),
                                    expected_result.asnumpy())
Exemple #5
0
def spike_model(model_path, input_x, input_name):
    """Test a program which uses the graph runtime."""
    if not tvm.runtime.enabled("micro_dev"):
        print("not enable micro_dev")
        return

    input_x = input_x.astype("float32")
    shape_dict = {input_name: input_x.shape}
    mod, params = load_model(model_path, shape_dict)

    with micro.Session(DEV_RISCV):
        ctx = tvm.micro_dev(0)

        disable_vectorize = tvm.target.build_config(disable_vectorize=True)
        disable_fusion = relay.build_config(disabled_pass={'FuseOps'})
        with disable_vectorize:
            graph, c_mod, params = relay.build(mod,
                                               target=TARGET,
                                               params=params)
        print("Part Success")
        micro_mod = micro.create_micro_mod(c_mod, DEV_RISCV)
        mod = graph_runtime.create(graph, micro_mod, ctx)
        mod.set_input(**params)
        mod.set_input(input_name, tvm.nd.array(input_x))
        mod.run
        tvm_output = mod.get_output(0).asnumpy()
        return tvm_output
def relay_micro_build(func,
                      dev_config,
                      target,
                      params=None,
                      lib_headers=None,
                      lib_include_paths=None):
    """Create a graph runtime module with a micro device context from a Relay function.

    Parameters
    ----------
    func : relay.Function
        function to compile

    dev_config : TODO
        TODO

    target : TODO
        TODO

    params : dict
        input parameters that do not change during inference

    lib_headers : TODO
        TODO

    lib_include_paths : TODO
        TODO

    Return
    ------
    mod : tvm.module.Module
        graph runtime module for the target device

    """
    with tvm.target.build_config(opt_level=3, disable_vectorize=True):
        graph, c_mod, params = relay.build(func, target=target, params=params)
    micro_mod = micro.create_micro_mod(c_mod,
                                       dev_config,
                                       lib_headers=lib_headers,
                                       lib_include_paths=lib_include_paths)
    ctx = tvm.micro_dev(0)
    if DEBUG_MODE:
        dump_root = f'{get_repo_root()}/debug/micro'
        mod = debug_runtime.create(graph, micro_mod, ctx, dump_root=dump_root)
    else:
        mod = graph_runtime.create(graph, micro_mod, ctx)
    mod.set_input(**params)
    return mod
Exemple #7
0
def test_model():
    """Test a program which uses the graph runtime."""
    if not tvm.runtime.enabled("micro_dev"):
        print("not enable micro_dev")
        return

    model_url = 'https://people.linaro.org/~tom.gall/sine_model.tflite'
    model_file = 'sine_model.tflite'
    model_path = download_testdata(model_url, model_file, module='data')

    tflite_model_buf = open(model_path, "rb").read()

    # Using the buffer, transform into a tflite model python object
    try:
        import tflite
        tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)
    except AttributeError:
        import tflite.Model
        tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
   
    # Print out the version of the model
    version = tflite_model.Version()
    print ("Model Version: " + str(version))
    
    input_tensor = "dense_4_input"
    input_shape = (1,)
    input_dtype = "float32"
    mod, params = relay.frontend.from_tflite(tflite_model,
                                            shape_dict={input_tensor: input_shape},
                                            dtype_dict={input_tensor: input_dtype})
    with micro.Session(DEV_RISCV):
        ctx = tvm.micro_dev(0)

        disable_vectorize = tvm.target.build_config(disable_vectorize=True)
        disable_fusion = relay.build_config(disabled_pass={'FuseOps'})
        with disable_vectorize, disable_fusion:
            graph, c_mod, params = relay.build(mod, target=TARGET, params=params)
        
        micro_mod = micro.create_micro_mod(c_mod, DEV_RISCV)
        mod = graph_runtime.create(graph, micro_mod, ctx)
        mod.set_input(**params)
        mod.set_input(input_tensor, tvm.nd.array(np.array([0.5], dtype="float32")))

        tvm_output = mod.get_output(0).asnumpy()
        print("result is: "+str(tvm_output))
Exemple #8
0
def get_comm_overhead(dev_config, num_trials=1):
    """Get communication overhead by executing an empty kernel."""
    class EmptyCMod:
        def __init__(self):
            pass

        def export_library(self, out_obj_path, fcompile=None):
            assert fcompile is not None
            fcompile(out_obj_path, f'{os.path.dirname(__file__)}/empty.c')

    # do multiple trials, then calc the average comm overhead
    results = []
    with micro.Session(dev_config) as sess:
        micro_mod = create_micro_mod(EmptyCMod(), dev_config)
        micro_func = micro_mod['empty']
        for _ in range(num_trials):
            results.append(benchmark_micro_func(sess, micro_func, [], 1, 0.0))
    return sum(results) / len(results)
def test_model():
    """Test a program which uses the graph runtime."""
    if not tvm.runtime.enabled("micro_dev"):
        print("not enable micro_dev")
        return

    import onnx
    model_path = "mobilenet_v2.onnx"
    # now you have super_resolution.onnx on disk mobilenet_v2.onnx
    onnx_model = onnx.load(model_path)

    from PIL import Image

    img_path = "cat.png"
    img = Image.open(img_path).resize((224, 224))
    x = np.array(img)[np.newaxis, :, :, :]
    print(x.shape)
    x = x.transpose([0,3,1,2])
    print(x.shape)

    input_name = "input"
    shape_dict = {input_name: x.shape}
    mod, params = relay.frontend.from_onnx(onnx_model, shape_dict)

    with micro.Session(DEV_RISCV):
        ctx = tvm.micro_dev(0)

        disable_vectorize = tvm.target.build_config(disable_vectorize=True)
        disable_fusion = relay.build_config(disabled_pass={'FuseOps'})
        with disable_vectorize:
            graph, c_mod, params = relay.build(mod, target=TARGET, params=params)
        print("I Find the wrong")
        micro_mod = micro.create_micro_mod(c_mod, DEV_RISCV)
        mod = graph_runtime.create(graph, micro_mod, ctx)
        mod.set_input(**params)
        mod.set_input(input_name, tvm.nd.array(x))

        tvm_output = mod.get_output(0).asnumpy()
        print("result is: "+str(tvm_output))