コード例 #1
0
ファイル: test_pass_annotate_target.py プロジェクト: zkzt/tvm
    def after():
        a = relay.var("a", shape=(10, 10))
        b = relay.var("b", shape=(10, 10))

        # add_relu function
        in_1 = relay.var("in_1", shape=(10, 10))
        in_2 = relay.var("in_2", shape=(10, 10))
        add_node = relay.add(in_1, in_2)
        relu_node = relay.nn.relu(add_node)
        add_relu = relay.Function([in_1, in_2], relu_node)
        add_relu = add_relu.with_attr("Composite", "test.add_relu")

        # merged function
        cb_1 = relay.annotation.compiler_begin(a, "test")
        cb_2 = relay.annotation.compiler_begin(b, "test")
        r = relay.Call(add_relu, [cb_1, cb_2])
        ce_1 = relay.annotation.compiler_end(r, "test")
        f = relay.Function([a, b], ce_1)
        mod = tvm.IRModule.from_expr(f)
        return mod
コード例 #2
0
    def expected():
        a = relay.var('a', shape=(10, 10))
        b = relay.var('b', shape=(10, 10))

        # add_relu_add function
        in_1 = relay.var('in_1', shape=(10, 10))
        in_2 = relay.var('in_2', shape=(10, 10))
        add_node = relay.add(in_1, in_2)
        add_node_1 = relay.add(in_1, add_node)
        add_node_2 = relay.add(add_node_1, add_node)
        add_add_add = relay.Function([in_1, in_2], add_node_2)
        add_add_add = add_add_add.set_attribute("Primitive",
                                                tir.IntImm("int32", 1))
        add_add_add = add_add_add.set_attribute("Composite",
                                                tir.StringImm("add_add_add"))

        # merged function
        sub_node = relay.subtract(a, b)
        call = relay.Call(add_add_add, [sub_node, b])
        return relay.Function([a, b], call)
コード例 #3
0
    def after_B():
        inputs = [
            relay.var('input_' + str(i), shape=(10, 10)) for i in range(8)
        ]
        add_relu_calls = []
        for i in range(4):
            x = relay.var('x' + str(i))
            y = relay.var('x' + str(i))
            add_relu = relay.add(x, y)
            add_relu = relay.nn.relu(add_relu)
            add_relu = relay.Function([x, y], add_relu)
            add_relu = add_relu.with_attr('Composite', 'add_relu')
            add_relu_call = relay.Call(add_relu,
                                       [inputs[i * 2], inputs[i * 2 + 1]])
            add_relu_calls.append(add_relu_call)

        add = relay.add(add_relu_calls[0], add_relu_calls[1])
        sub = relay.subtract(add_relu_calls[2], add_relu_calls[3])
        out = relay.multiply(add, sub)
        return relay.Function(inputs, out)
コード例 #4
0
 def expected_same_output_region():
     mod = tvm.IRModule()
     x = relay.var("x", shape=(8, 8))
     y = relay.var("y", shape=(8, 8))
     z = relay.var("z", shape=(8, 8))
     x0 = relay.var("x0", shape=(8, 8))
     y0 = relay.var("y0", shape=(8, 8))
     log = relay.log(x0)
     sub = x0 - y0
     mul = log * sub
     # The partitioned graph contains log, subtract, and multiply
     func = relay.Function([x0, y0], mul)
     func = set_func_attr(func, "ccompiler", "ccompiler_0")
     glb_0 = relay.GlobalVar("ccompiler_0")
     mod[glb_0] = func
     add = x + y
     call = relay.Call(glb_0, [add, z])
     main = relay.Function([x, y, z], call)
     mod["main"] = main
     return mod
コード例 #5
0
ファイル: expr.py プロジェクト: ojotoxy/incubator-tvm
    def __call__(self, *args):
        """Call the global variable.

        Parameters
        ----------
        args: List[RelayExpr]
            The arguments to the call.

        Returns
        -------
        call: BaseExpr
            A call taking the variable as a function.
        """
        # pylint: disable=import-outside-toplevel
        if all(isinstance(x, RelayExpr) for x in args):
            from tvm import relay
            return relay.Call(self, args)
        arg_types = [type(x) for x in args]
        raise RuntimeError(
            "Do not know how to handle GlobalVar.__call__ for types {}".format(arg_types))
コード例 #6
0
    def create_graph():
        def create_external_func1(mod_, compiler_name, symbol_name):
            x_int = relay.var("x_int", shape=(10, 10))
            z0 = relay.nn.relu(x_int)
            f1 = relay.Function([x_int], z0)
            f1 = set_func_attr(f1, compiler_name, symbol_name)
            glb_f1 = relay.GlobalVar(symbol_name)
            mod_[glb_f1] = f1
            mod_ = relay.transform.InferType()(mod_)
            return glb_f1, mod_

        mod = tvm.IRModule()
        x = relay.var("x", shape=(10, 10))

        glb_symbol_f1, mod = create_external_func1(mod, "ethosu", "ethosu_0")
        r = relay.Call(glb_symbol_f1, [x])
        main = relay.Function([x], r)
        mod["main"] = main
        mod = relay.transform.InferType()(mod)
        return mod
コード例 #7
0
def test_count_loop():
    mod = relay.module.Module({})
    sum_up = relay.GlobalVar('sum_up')
    i = relay.var('i', shape=[], dtype='int32')
    sb = ScopeBuilder()
    with sb.if_scope(relay.equal(i, relay.const(0, dtype='int32'))):
        sb.ret(i)
    with sb.else_scope():
        one_less = relay.subtract(i, relay.const(1, dtype='int32'))
        rec_call = relay.Call(sum_up, [one_less])
        sb.ret(relay.add(rec_call, i))
    func = relay.Function([i],
                          sb.get(),
                          ret_type=relay.TensorType([], 'int32'))
    mod[sum_up] = func
    i_data = np.array(0, dtype='int32')
    iarg = relay.var('i', shape=[], dtype='int32')
    mod[mod.entry_func] = relay.Function([iarg], sum_up(iarg))
    result = veval(mod, i_data)
    tvm.testing.assert_allclose(result.asnumpy(), i_data)
コード例 #8
0
def test_count_loop():
    mod = tvm.IRModule({})
    sum_up = relay.GlobalVar("sum_up")
    i = relay.var("i", shape=[], dtype="int32")
    sb = ScopeBuilder()
    with sb.if_scope(relay.equal(i, relay.const(0, dtype="int32"))):
        sb.ret(i)
    with sb.else_scope():
        one_less = relay.subtract(i, relay.const(1, dtype="int32"))
        rec_call = relay.Call(sum_up, [one_less])
        sb.ret(relay.add(rec_call, i))
    func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], "int32"))
    mod[sum_up] = func
    i_data = np.array(0, dtype="int32")
    iarg = relay.var("i", shape=[], dtype="int32")
    mod["main"] = relay.Function([iarg], sum_up(iarg))
    for tgt, ctx in tvm.testing.enabled_targets():
        result = veval(mod, i_data, ctx=ctx, target=tgt)
        tvm.testing.assert_allclose(result.asnumpy(), i_data)
    check_result([i_data], i_data, mod=mod)
コード例 #9
0
    def expected():
        mod = tvm.IRModule({})

        sum_up = relay.GlobalVar('sum_up')
        i = relay.var('i', shape=[], dtype='int32')
        sb = relay.ScopeBuilder()
        with sb.if_scope(relay.equal(i, relay.const(0, dtype='int32'))):
            sb.ret(i)
        with sb.else_scope():
            one_less = relay.subtract(i, relay.const(1, dtype='int32'))
            rec_call = relay.Call(sum_up, [one_less]) + i
            sb.ret(relay.add(rec_call, i))
        func = relay.Function([i],
                              sb.get(),
                              ret_type=relay.TensorType([], 'int32'))
        func = func.set_attribute("Inline", tvm.tir.IntImm("int32", 1))
        mod[sum_up] = func
        iarg = relay.var('i', shape=[], dtype='int32')
        mod["main"] = relay.Function([iarg], sum_up(iarg))
        return mod
コード例 #10
0
ファイル: test_target_hooks.py プロジェクト: were/tvm
def test_runtime_module_generation(check_result):
    shape = (8,)
    x_data = np.random.randint(255, size=shape).astype("float32")
    y_data = np.random.randint(255, size=shape).astype("float32")
    inputs = {"x": x_data, "y": y_data}

    x0 = relay.var("x0", shape=shape, dtype="float32")
    y0 = relay.var("y0", shape=shape, dtype="float32")
    z = x0 + y0
    func = relay.Function([x0, y0], z)
    func = set_external_func_attr(func, "example_target_hook", "replace_add_with_subtract")
    # Test hook to trigger TIRToRuntime code generation
    func = func.with_attr("tir_to_runtime", True)

    x = relay.var("x", shape=(8,), dtype="float32")
    y = relay.var("y", shape=(8,), dtype="float32")
    call = relay.Call(func, [x, y])
    func = IRModule.from_expr(call)

    check_result(func, inputs, (8,), x_data * y_data)
コード例 #11
0
def test_extern_dnnl(check_result):
    dtype = "float32"
    ishape = (1, 32, 14, 14)
    w1shape = (32, 1, 3, 3)
    data0 = relay.var("data0", shape=(ishape), dtype=dtype)
    weight0 = relay.var("weight0", shape=(w1shape), dtype=dtype)

    data1 = relay.var("data0", shape=(ishape), dtype=dtype)
    weight1 = relay.var("weight0", shape=(w1shape), dtype=dtype)
    weight2 = relay.var("weight1", shape=(w1shape), dtype=dtype)
    depthwise_conv2d_1 = relay.nn.conv2d(data1,
                                         weight1,
                                         kernel_size=(3, 3),
                                         padding=(1, 1),
                                         groups=32)
    depthwise_conv2d_2 = relay.nn.conv2d(depthwise_conv2d_1,
                                         weight2,
                                         kernel_size=(3, 3),
                                         padding=(1, 1),
                                         groups=32)
    out = relay.add(depthwise_conv2d_1, depthwise_conv2d_2)

    f = relay.Function([data1, weight1, weight2], out)
    ref_mod = tvm.IRModule()
    ref_mod["main"] = f

    f = set_external_func_attr(f, "dnnl", "dnnl_0")
    call = relay.Call(f, [data0, weight0, weight0])
    mod = tvm.IRModule.from_expr(call)

    i_data = np.random.uniform(0, 1, ishape).astype(dtype)
    w_data = np.random.uniform(0, 1, w1shape).astype(dtype)

    ref_ex = relay.create_executor("graph", mod=ref_mod, device=tvm.cpu())
    ref_res = ref_ex.evaluate()(i_data, w_data, w_data)
    check_result(mod, {
        "data0": i_data,
        "weight0": w_data
    }, (1, 32, 14, 14),
                 ref_res.numpy(),
                 tol=1e-5)
コード例 #12
0
def test_extern_dnnl_const():
    if not tvm.get_global_func("relay.ext.dnnl", True):
        print("skip because DNNL codegen is not available")
        return

    dtype = "float32"
    ishape = (1, 32, 14, 14)
    w1shape = (32, 1, 3, 3)
    data0 = relay.var("data0", shape=(ishape), dtype=dtype)
    w_data = np.random.uniform(0, 1, w1shape).astype(dtype)

    data1 = relay.var("data0", shape=(ishape), dtype=dtype)
    weight1 = relay.const(w_data, dtype=dtype)
    weight2 = relay.const(w_data, dtype=dtype)
    depthwise_conv2d_1 = relay.nn.conv2d(data1,
                                         weight1,
                                         kernel_size=(3, 3),
                                         padding=(1, 1),
                                         groups=32)
    depthwise_conv2d_2 = relay.nn.conv2d(depthwise_conv2d_1,
                                         weight2,
                                         kernel_size=(3, 3),
                                         padding=(1, 1),
                                         groups=32)
    out = relay.add(depthwise_conv2d_1, depthwise_conv2d_2)

    f = relay.Function([data1], out)
    ref_mod = tvm.IRModule()
    ref_mod["main"] = f

    f = set_external_func_attr(f, "dnnl", "dnnl_0")
    call = relay.Call(f, [data0])
    mod = tvm.IRModule.from_expr(call)

    i_data = np.random.uniform(0, 1, ishape).astype(dtype)

    ref_ex = relay.create_executor("graph", mod=ref_mod, ctx=tvm.cpu())
    ref_res = ref_ex.evaluate()(i_data)
    check_result(mod, {"data0": i_data}, (1, 32, 14, 14),
                 ref_res.asnumpy(),
                 tol=1e-5)
コード例 #13
0
ファイル: test_codegen.py プロジェクト: elliothe/tvm
    def create_model():
        ifm_count = int(np.prod(ifm_shape))
        ifm2_count = int(np.prod(ifm2_shape))

        # Create a "partitioned" Relay function
        ifms = relay.var("ifms", shape=[ifm_count + ifm2_count], dtype=dtype)
        split = relay.split(ifms, [ifm_count])
        ifm = relay.reshape(split[0], newshape=ifm_shape)
        ifm2 = relay.reshape(split[1], newshape=ifm2_shape)
        shr_op = infra.make_ethosu_binary_elementwise(ifm, ifm2, ifm_shape[3],
                                                      ifm2_shape[3], "SHR",
                                                      ofm_dtype,
                                                      reversed_operands)

        glb_ethosu = relay.GlobalVar("tvmgen_default_ethos_u_main_0")
        func = (relay.Function([ifms], shr_op).with_attr(
            "Inline", 1).with_attr("Compiler", "ethos-u").with_attr(
                "global_symbol",
                "tvmgen_default_ethos_u_main_0").with_attr("Primitive", 1))
        mod = tvm.IRModule()
        mod[glb_ethosu] = func
        mod = relay.transform.InferType()(mod)

        # Main
        ifm = relay.var("ifm", shape=ifm_shape, dtype=dtype)
        ifm2 = relay.var("ifm2", shape=ifm2_shape, dtype=dtype)
        call = relay.Call(
            glb_ethosu,
            [
                relay.concatenate(
                    data=(
                        relay.reshape(ifm, newshape=ifm_count),
                        relay.reshape(ifm2, newshape=ifm2_count),
                    ),
                    axis=0,
                )
            ],
        )
        mod["main"] = relay.Function([ifm, ifm2], call)
        mod = relay.transform.InferType()(mod)
        return mod
コード例 #14
0
def test_cps_pe():
    def destroy_ref(x):
        x = run_infer_type(x)
        x = to_cps(x)
        x = run_infer_type(x)
        y = un_cps(x)
        y = run_infer_type(y)
        x = run_opt_pass(x, transform.Sequential([transform.PartialEvaluate(), transform.DeadCodeElimination(inline_once=True)]))
        assert Feature.fRefCreate not in detect_feature(x)
    unit = relay.Function([], relay.const(0., dtype='float32'))
    f_ref = relay.Var("f_ref")

    one = relay.const(1., dtype='float32')
    two = relay.const(2., dtype='float32')
    cond = relay.var(shape=(), dtype='uint1', name_hint='cond')
    true_branch = relay.RefWrite(f_ref, relay.Function([], one))
    false_branch = relay.RefWrite(f_ref, relay.Function([], two))
    if_expr = relay.If(cond, true_branch, false_branch)

    stmt = relay.Let(f_ref, relay.RefCreate(unit),
                     relay.Let(relay.Var("x"), if_expr,
                               relay.Call(relay.RefRead(f_ref), [])))

    F = relay.Function([cond], stmt)
    destroy_ref(F)

    G = relay.Function([cond], relay.If(cond, one, two))
    G = run_infer_type(G)
    G = relay.transform.gradient(G)
    destroy_ref(G)

    x = relay.var("x", shape=(1, 16))
    y = relay.var("y", shape=(1, 16))
    z = relay.var("z", shape=(1, 16))
    cond = relay.var("cond", shape=(), dtype='uint1')
    H = relay.If(cond, x, y)
    H = relay.add(H, z)
    H = relay.Function([cond,x,y,z], H)
    H = run_infer_type(H)
    H = relay.transform.gradient(H)
    destroy_ref(H)
コード例 #15
0
def test_sum_loop():
    mod = relay.module.Module({})
    sum_up = relay.GlobalVar('sum_up')
    i = relay.var('i', shape=[], dtype='int32')
    accum = relay.var('accum', shape=[], dtype='int32')
    sb = ScopeBuilder()
    with sb.if_scope(relay.equal(i, relay.const(0, 'int32'))):
        sb.ret(accum)
    with sb.else_scope():
        one_less = relay.subtract(i, relay.const(1, 'int32'))
        new_accum = relay.add(accum, i)
        sb.ret(relay.Call(sum_up, [one_less, new_accum]))
    func = relay.Function([i, accum], sb.get())
    mod[sum_up] = func
    loop_bound = 0
    i_data = np.array(loop_bound, dtype='int32')
    accum_data = np.array(0, dtype='int32')
    iarg = relay.var('i', shape=[], dtype='int32')
    aarg = relay.var('accum', shape=[], dtype='int32')
    mod["main"] = relay.Function([iarg, aarg], sum_up(iarg, aarg))
    check_result([i_data, accum_data], sum(range(1, loop_bound + 1)), mod=mod)
コード例 #16
0
def test_mixed_data_types():
    """
    Test a graph with a primitive function that has mixed datatypes.
    """
    def get_inner_func():
        x = relay.var("x", shape=(1, 2, 2, 2), dtype="int16")
        x = relay.cast(x, dtype="uint32")
        x = _create_primitive_function(x)
        return x

    ifm = relay.var("input", shape=(1, 2, 2, 2), dtype="int16")
    x = get_inner_func()
    x = relay.Call(x, [ifm])
    mod = tvm.IRModule.from_expr(x)

    expected_annotations = [
        [(1 * 2 * 2 * 2) * 2 + (1 * 2 * 2 * 2) * 4],
    ]
    expected_io_annotation = (1 * 2 * 2 * 2) * 2 + (1 * 2 * 2 * 2) * 4
    _check_used_memory_annotations(mod, expected_annotations,
                                   expected_io_annotation)
コード例 #17
0
def test_external_function():
    y0_data = np.random.uniform(0, 1, (8, 8)).astype("float32")
    x0 = relay.var("x0", shape=(8, 8))
    y0_const = relay.const(y0_data, "float32")
    z0 = x0 + y0_const
    ef = relay.Function([x0], z0, relay.TensorType((8, 8), "float32"))
    ev = relay.GlobalVar("external_function")
    ef = set_external_func_attr(ef, "cmsis-nn", ev.name_hint)

    x = relay.var("x", shape=(8, 8))
    c = relay.Call(ev, [x])
    mf = relay.Function([x], c, relay.TensorType((8, 8), "float32"))
    mv = relay.GlobalVar("main")

    mod = tvm.IRModule()
    mod[ev] = ef
    mod[mv] = mf

    mod = ExtractConstantsFromPartitionedFunction()(mod)
    CheckFunctionsForConstants().check_num_constants(mod[ev])
    relay.transform.InferType()(mod)
コード例 #18
0
def test_pow():
    mod = relay.Module()
    p = Prelude(mod)
    shape = (10, 10)
    dtype = 'float32'
    t = relay.TensorType(shape, dtype)
    x = relay.var("x", t)
    double = relay.Function([x], x + x)
    i = relay.var("i", t)
    func = relay.Function([i],
                          relay.Call(p.iterate(double, p.s(p.s(p.s(p.z())))),
                                     [i]))
    back_func = relay.ir_pass.infer_type(gradient(func, mod=mod), mod=mod)
    assert back_func.checked_type == relay.FuncType(
        [t], relay.TupleType([t, relay.TupleType([t])]))
    i_nd = rand(dtype, *shape)
    ex = create_executor(mod=mod)
    forward, (grad_i, ) = ex.evaluate(back_func)(i_nd)
    np.testing.assert_allclose(forward.asnumpy(), 8 * i_nd.asnumpy())
    np.testing.assert_allclose(grad_i.asnumpy(),
                               8 * np.ones_like(grad_i.asnumpy()))
コード例 #19
0
 def expected():
     mod = tvm.IRModule()
     x = relay.const(ones)
     y = relay.var("y", shape=(8, 8))
     x0 = relay.const(ones)
     y0 = relay.var("y0", shape=(8, 8))
     add = x0 + y0
     # Function that uses C compiler
     func = relay.Function([y0], add)
     func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
     func = func.with_attr("Inline", tvm.tir.IntImm("int32", 1))
     func = func.with_attr("Compiler", tvm.tir.StringImm("ccompiler"))
     func = func.with_attr("ExternalSymbol",
                           tvm.tir.StringImm("ccompiler_0"))
     glb_0 = relay.GlobalVar("ccompiler_0")
     mod[glb_0] = func
     add_call = relay.Call(glb_0, [y])
     log = relay.log(add_call)
     main = relay.Function([y], log)
     mod["main"] = main
     return mod
コード例 #20
0
def get_net(iterations, num_hidden, batch_size=1, dtype="float32"):
    """Constructs an unrolled RNN with LSTM cells"""
    input_type = relay.TensorType((batch_size, num_hidden), dtype)
    weight_type = relay.TensorType((4 * num_hidden, num_hidden), dtype)
    bias_type = relay.TensorType((4 * num_hidden,), dtype)

    state_type = relay.TupleType([input_type, input_type])
    cell_type = relay.TupleType([input_type, state_type])

    builder = relay.ScopeBuilder()

    zeros = builder.let(("zeros", input_type), relay.zeros((batch_size, num_hidden), dtype))
    init_states = builder.let(("init_states", state_type), relay.Tuple([zeros, zeros]))

    states = init_states
    out = None

    for i in range(iterations):
        inputs = relay.Var("data", input_type)
        i2h_weight = relay.Var("i2h_%s_weight" % i, weight_type)
        i2h_bias = relay.Var("i2h_%i_bias" % i, bias_type)
        h2h_weight = relay.Var("h2h_%s_weight" % i, weight_type)
        h2h_bias = relay.Var("h2h_%s_bias" % i, bias_type)

        cell_fn = lstm_cell(num_hidden, batch_size, dtype, "lstm_%s" % i)

        call = builder.let(
            ("call_%s" % i, cell_type),
            relay.Call(cell_fn, [inputs, states, i2h_weight, i2h_bias, h2h_weight, h2h_bias]),
        )
        new_out = builder.let(("out_%s" % i, input_type), relay.TupleGetItem(call, 0))
        new_states = builder.let(("states_%s" % i, state_type), relay.TupleGetItem(call, 1))
        states = new_states
        out = new_out

    builder.ret(out)
    body = builder.get()
    args = relay.analysis.free_vars(body)
    return relay.Function(args, body, input_type)
コード例 #21
0
def test_extern_gcc_consts(check_result):
    shape = (8, 8)
    dtype = "float32"
    x = relay.var("x", shape=shape)
    y0_data = np.random.uniform(0, 1, shape).astype(dtype)

    x0 = relay.var("x0", shape=shape)
    y0_const = relay.const(y0_data, dtype)
    z = x0 + y0_const
    f = relay.Function([x0], z)
    f = set_external_func_attr(f, "ccompiler", "ccompiler_0")
    call = relay.Call(f, [x])
    mod = tvm.IRModule.from_expr(call)

    # Note that while the VMCompiler get_params() will return all 'parameters' from both
    # TVM and external codegen compiled code, the GraphExecutor.get_params() will return only
    # those from non-external modules. So in the following we'll test by execution rather than
    # test by inspection.
    x_data = np.random.rand(*shape).astype(dtype)
    inputs = {"x": x_data}
    expected_result = x_data + y0_data
    check_result(mod, inputs, shape, expected_result, target="llvm")
コード例 #22
0
    def expected():
        x = relay.var('x', shape=(1, 8))
        beta = relay.var("beta", shape=(8, ))
        gamma = relay.var("gamma", shape=(8, ))
        moving_mean = relay.var("moving_mean", shape=(8, ))
        moving_var = relay.var("moving_var", shape=(8, ))

        # bn_relu function
        in_1 = relay.var('x1', shape=(1, 8))
        in_2 = relay.var('gamma1', shape=(8, ))
        in_3 = relay.var('beta1', shape=(8, ))
        in_4 = relay.var('moving_mean1', shape=(8, ))
        in_5 = relay.var('moving_var1', shape=(8, ))
        bn_node = relay.nn.batch_norm(in_1, in_2, in_3, in_4, in_5)
        tuple_get_item_node = bn_node[0]
        relu_node = relay.nn.relu(tuple_get_item_node)
        bn_relu = relay.Function([in_1, in_2, in_3, in_4, in_5], relu_node)
        bn_relu = bn_relu.with_attr("Composite", tir.StringImm("bn_relu"))

        # merged function
        r = relay.Call(bn_relu, [x, gamma, beta, moving_mean, moving_var])
        return relay.Function([x, gamma, beta, moving_mean, moving_var], r)
コード例 #23
0
    def expected():
        def create_external_func1(mod_, compiler_name, symbol_name):
            x_int = relay.var("x_int", shape=(10, 10))

            p0 = relay.nn.relu(x_int)
            q0 = relay.tanh(x_int)

            # reshapes
            p0_reshaped = relay.reshape(p0, newshape=100)
            q0_reshaped = relay.reshape(q0, newshape=100)
            ofms = relay.concatenate((p0_reshaped, q0_reshaped), 0)

            f1 = relay.Function([x_int], ofms)
            f1 = set_func_attr(f1, compiler_name, symbol_name)
            glb_f1 = relay.GlobalVar(symbol_name)
            mod_[glb_f1] = f1
            mod_ = relay.transform.InferType()(mod_)
            return glb_f1, mod_

        mod = tvm.IRModule()
        x = relay.var("x", shape=(10, 10))
        glb_symbol_f1, mod = create_external_func1(mod, "ethosu", "ethosu_0")
        ofms = relay.Call(glb_symbol_f1, [x])

        # splits
        (p0_flat, q0_flat) = relay.split(ofms, [100])
        # reshapes
        p0_flat_reshaped = relay.reshape(p0_flat, newshape=(10, 10))
        q0_flat_reshaped = relay.reshape(q0_flat, newshape=(10, 10))
        # original output
        tuple_out = relay.Tuple([p0_flat_reshaped, q0_flat_reshaped])

        p0 = relay.TupleGetItem(tuple_out, 0)
        q0 = relay.TupleGetItem(tuple_out, 1)
        r = relay.concatenate((p0, q0), axis=0)
        main = relay.Function([x], r)
        mod["main"] = main
        mod = relay.transform.InferType()(mod)
        return mod
コード例 #24
0
def _full_offload():
    mod = tvm.IRModule({})

    # NPU function
    x = relay.var("x", shape=(1, 4, 4, 16), dtype="int8")
    max_pool = relay.nn.max_pool2d(x)
    composite_func = relay.Function([x], max_pool)
    composite_func = composite_func.with_attr("Composite", "ethos-u.pooling")
    inp = relay.var("input", shape=(1, 4, 4, 16), dtype="int8")
    compiler_func = relay.Function([inp], composite_func)
    compiler_func = compiler_func.with_attr("used_memory", [256 + 256])
    npu_compiler_func = compiler_func.with_attr("Compiler", "ethos-u")
    g1 = relay.GlobalVar("g1")
    mod[g1] = npu_compiler_func

    # Main
    inp = relay.var("main_input", shape=(1, 4, 4, 16), dtype="int8")
    call = relay.Call(g1, [inp])
    main_func = relay.Function([inp], call)
    main_func = main_func.with_attr("io_used_memory", 256 + 256)
    mod["main"] = main_func
    return mod
コード例 #25
0
def test_sum_loop():
    mod = relay.module.Module({})
    sum_up = relay.GlobalVar('sum_up')
    i = relay.var('i', shape=[], dtype='int32')
    accum = relay.var('accum', shape=[], dtype='int32')
    sb = ScopeBuilder()
    with sb.if_scope(relay.equal(i, relay.const(0, 'int32'))):
        sb.ret(accum)
    with sb.else_scope():
        one_less = relay.subtract(i, relay.const(1, 'int32'))
        new_accum = relay.add(accum, i)
        sb.ret(relay.Call(sum_up, [one_less, new_accum]))
    func = relay.Function([i, accum], sb.get())
    mod[sum_up] = func
    loop_bound = 0
    i_data = np.array(loop_bound, dtype='int32')
    accum_data = np.array(0, dtype='int32')
    iarg = relay.var('i', shape=[], dtype='int32')
    aarg = relay.var('accum', shape=[], dtype='int32')
    mod[mod.entry_func] = relay.Function([iarg, aarg], sum_up(iarg, aarg))
    result = veval(mod, i_data, accum_data)
    tvm.testing.assert_allclose(result.asnumpy(), sum(range(1, loop_bound + 1)))
コード例 #26
0
def test_sum_loop():
    mod = tvm.IRModule({})
    sum_up = relay.GlobalVar("sum_up")
    i = relay.var("i", shape=[], dtype="int32")
    accum = relay.var("accum", shape=[], dtype="int32")
    sb = ScopeBuilder()
    with sb.if_scope(relay.equal(i, relay.const(0, "int32"))):
        sb.ret(accum)
    with sb.else_scope():
        one_less = relay.subtract(i, relay.const(1, "int32"))
        new_accum = relay.add(accum, i)
        sb.ret(relay.Call(sum_up, [one_less, new_accum]))
    func = relay.Function([i, accum], sb.get())
    mod[sum_up] = func
    mod = relay.transform.InferType()(mod)
    loop_bound = 0
    i_data = np.array(loop_bound, dtype="int32")
    accum_data = np.array(0, dtype="int32")
    iarg = relay.var("i", shape=[], dtype="int32")
    aarg = relay.var("accum", shape=[], dtype="int32")
    mod["main"] = relay.Function([iarg, aarg], sum_up(iarg, aarg))
    check_result([i_data, accum_data], sum(range(1, loop_bound + 1)), mod=mod)
コード例 #27
0
    def after_B():
        inputs = [
            relay.var("input_" + str(i), shape=(10, 10)) for i in range(8)
        ]
        add_relu_calls = []
        for i in range(4):
            x = relay.var("x" + str(i))
            y = relay.var("x" + str(i))
            add_relu = relay.add(x, y)
            add_relu = relay.nn.relu(add_relu)
            add_relu = relay.Function([x, y], add_relu)
            add_relu = add_relu.with_attr("Composite", "add_relu")
            add_relu = add_relu.with_attr("PartitionedFromPattern",
                                          "add_nn.relu_")
            add_relu_call = relay.Call(add_relu,
                                       [inputs[i * 2], inputs[i * 2 + 1]])
            add_relu_calls.append(add_relu_call)

        add = relay.add(add_relu_calls[0], add_relu_calls[1])
        sub = relay.subtract(add_relu_calls[2], add_relu_calls[3])
        out = relay.multiply(add, sub)
        return relay.Function(inputs, out)
コード例 #28
0
    def expected(dshape):
        p0 = relay.var("p0", shape=dshape)
        c = conv(p0)
        f0 = relay.Function(relay.analysis.free_vars(c), c)
        f0 = f0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))

        p01 = relay.var("p01", shape=dshape)
        c = conv(p01)
        f1 = relay.Function(relay.analysis.free_vars(c), c)
        f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))

        p02 = relay.var("p02", shape=dshape)
        p12 = relay.var("p12", shape=dshape)
        concat1 = relay.concatenate((p02, p12), axis=1)
        f_concat1 = relay.Function([p02, p12], concat1)
        f_concat1 = f_concat1.with_attr("Primitive",
                                        tvm.tir.IntImm("int32", 1))

        dshape2 = (dshape[0], dshape[1] * 2, dshape[2], dshape[3])

        p03 = relay.var("p03", shape=dshape2)
        c = conv(p03)
        f2 = relay.Function(relay.analysis.free_vars(c), c)
        f2 = f2.with_attr("Primitive", tvm.tir.IntImm("int32", 1))

        p04 = relay.var("p04", shape=dshape2)
        c = conv(p04)
        f3 = relay.Function(relay.analysis.free_vars(c), c)
        f3 = f3.with_attr("Primitive", tvm.tir.IntImm("int32", 1))

        p05 = relay.var("p05", shape=dshape)
        p15 = relay.var("p15", shape=dshape)
        concat2 = relay.concatenate((p05, p15), axis=1)
        f_concat2 = relay.Function([p05, p15], concat2)
        f_concat2 = f_concat2.with_attr("Primitive",
                                        tvm.tir.IntImm("int32", 1))

        x = relay.var("x", shape=dshape)
        c1 = relay.Call(f0, [x, relay.var("w1")])
        c2 = relay.Call(f1, [x, relay.var("w2")])
        concat = relay.Call(f_concat1, [c1, c2])
        c3 = relay.Call(f2, [concat, relay.var("w3")])
        c4 = relay.Call(f3, [concat, relay.var("w4")])
        out = relay.Call(f_concat2, [c3, c4])

        return relay.Function(relay.analysis.free_vars(out), out)
コード例 #29
0
    def visit_call(self, call):
        # TODO(weberlo) use graph pattern matching?
        if not hasattr(call.op,
                       "name") or call.op.name not in ALLOWED_CONVERSION_OPS:
            new_args = []
            for arg in call.args:
                new_arg = self.visit(arg)
                if len(self.subtree_params) == 0:
                    new_args.append(new_arg)
                else:
                    assert len(self.subtree_params) == 1
                    param = next(iter(self.subtree_params))
                    pre_param = self.prefix_sb.let(param.name_hint, new_arg)
                    self.subtree_params.clear()
                    mid_param = relay.Var(param.name_hint, arg.checked_type)
                    self.prefix_binding_map[mid_param] = pre_param
                    # return new parameter, then we can use
                    # relay.analysis.free_vars at the end of the pass to generate
                    # new `mid_func` type signature
                    new_args.append(mid_param)
            return relay.Call(call.op, new_args, call.attrs)

        return super().visit_call(call)
コード例 #30
0
def make_partitioned_function(relay_op):

    ifm0 = relay.analysis.free_vars(relay_op)
    ifm_shape = ifm0[0].type_annotation.shape

    ifm = relay.var("ifm", shape=ifm_shape, dtype="int8")

    glb_ethosu = relay.GlobalVar("tvmgen_default_ethosu_main_0")

    func = (relay.Function(ifm0, relay_op).with_attr("Inline", 1).with_attr(
        "Compiler",
        "ethos-u").with_attr("global_symbol",
                             "tvmgen_default_ethosu_main_0").with_attr(
                                 "Primitive", 1))
    mod = tvm.IRModule()
    mod[glb_ethosu] = func
    mod = relay.transform.InferType()(mod)

    call = relay.Call(glb_ethosu, [ifm])
    mod["main"] = relay.Function([ifm], call)
    mod = relay.transform.InferType()(mod)

    return mod