예제 #1
0
def test_tensorrt_dynamic_batch_conv():
    if skip_codegen_test():
        return
    batches_to_test = [1, 1, 0, 2, 3, 0, 1, 3, 2]
    x_shape = (relay.Any(), 32, 8, 8)
    x_data = np.ones([max(batches_to_test)] +
                     list(x_shape)[1:]).astype("float32")
    k_shape = (16, 32, 3, 3)
    params = {"kernel": np.random.uniform(-1, 1, k_shape).astype("float32")}
    result_arr = [{} for _ in range(len(batches_to_test))]
    for use_trt in [True, False]:
        x = relay.var("x", shape=x_shape, dtype="float32")
        kernel = relay.var("kernel", shape=k_shape, dtype="float32")
        out = relay.nn.conv2d(x,
                              kernel,
                              channels=16,
                              kernel_size=(3, 3),
                              groups=1)
        f = relay.Function([x, kernel], out)
        mod = tvm.IRModule()
        mod["main"] = f
        if use_trt:
            mod, _ = tensorrt.partition_for_tensorrt(mod, params)

        if not skip_runtime_test():
            with relay.build_config(opt_level=3):
                relay_exec = relay.create_executor("vm",
                                                   mod=mod,
                                                   ctx=tvm.cpu(0),
                                                   target="llvm")

            for i, batch_size in enumerate(batches_to_test):
                result_arr[i][use_trt] = relay_exec.evaluate()(
                    x_data[:batch_size, ...], **params)

    if not skip_runtime_test():
        for i in range(len(batches_to_test)):
            assert_result_dict_holds(result_arr[i])
예제 #2
0
def test_relay_strided_slice_legalize(ifm_shape, begin, end):

    ifm = relay.var("ifm", shape=ifm_shape, dtype="int8")
    strided_slice = relay.op.strided_slice(ifm, begin, end)
    func = relay.Function([ifm], strided_slice)
    mod = tvm.IRModule()
    mod["main"] = func
    mod = relay.transform.InferType()(mod)

    strided_slice_pattern_table = [
        (
            ethosu.StridedSliceParams.composite_name,
            ethosu.strided_slice_pattern(),
            lambda pat: ethosu.StridedSliceParams(pat).is_valid(),
        ),
    ]

    mod = partition_ethosu_by_table(mod, strided_slice_pattern_table)
    mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
        legalize.StridedSliceRewriter(), mod["tvmgen_default_ethos_u_main_0"]
    )
    mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
        legalize.NoOpRewriter(), mod["tvmgen_default_ethos_u_main_0"]
    )
    mod = relay.transform.InferType()(mod)

    ext_func = mod["tvmgen_default_ethos_u_main_0"]

    identity = ext_func.body
    assert identity.op.name == "contrib.ethosu.identity"

    # check that the strided_slice is still there
    strided_slice = identity.args[0]
    assert strided_slice.op.name == "strided_slice"

    # check that identity's output shape matches strided slice's output shape
    slice_shape = [a - b for a, b in zip(end, begin)]
    assert list(identity.checked_type.shape) == slice_shape
def test_function_taking_adt_ref_tuple():
    mod = tvm.IRModule()
    prelude = relay.prelude.Prelude(mod)
    intrp = create_executor("debug", mod)
    _, cons, nil = prelude.mod.get_type("List")

    nil_value = ConstructorValue(nil.tag, [], nil)
    cons_value = ConstructorValue(
        cons.tag,
        [nd.array(np.random.rand(1, 10).astype("float32")), nil_value],
        cons,
    )

    ref_value = RefValue(nd.array(np.random.rand(1, 10).astype("float32")))
    tuple_value = container.tuple_object(
        [nd.array(np.random.rand(1, 10).astype("float32")) for _ in range(10)]
    )

    id_func = intrp.evaluate(prelude.id)

    res_nil = id_func(nil_value)
    assert res_nil.tag == nil_value.tag
    assert len(res_nil.fields) == 0

    res_cons = id_func(cons_value)
    assert res_cons.tag == cons_value.tag
    assert len(res_cons.fields) == len(cons_value.fields)
    tvm.testing.assert_allclose(res_cons.fields[0].numpy(), cons_value.fields[0].numpy())
    assert isinstance(res_cons.fields[1], ConstructorValue)
    assert res_cons.fields[1].tag == nil.tag
    assert len(res_cons.fields[1].fields) == 0

    res_ref = id_func(ref_value)
    tvm.testing.assert_allclose(res_ref.value.numpy(), ref_value.value.numpy())

    res_tuple = id_func(tuple_value)
    for i in range(10):
        tvm.testing.assert_allclose(res_tuple[i].numpy(), tuple_value[i].numpy())
예제 #4
0
def test_extern_dnnl():
    if not tvm.get_global_func("relay.ext.dnnl", True):
        print("skip because DNNL codegen is not available")
        return

    dtype = "float32"
    ishape = (1, 32, 14, 14)
    w1shape = (32, 1, 3, 3)
    data0 = relay.var("data0", shape=(ishape), dtype=dtype)
    weight0 = relay.var("weight0", shape=(w1shape), dtype=dtype)

    data1 = relay.var("data0", shape=(ishape), dtype=dtype)
    weight1 = relay.var("weight0", shape=(w1shape), dtype=dtype)
    weight2 = relay.var("weight1", shape=(w1shape), dtype=dtype)
    depthwise_conv2d_1 = relay.nn.conv2d(
        data1, weight1, kernel_size=(3, 3), padding=(1, 1), groups=32
    )
    depthwise_conv2d_2 = relay.nn.conv2d(
        depthwise_conv2d_1, weight2, kernel_size=(3, 3), padding=(1, 1), groups=32
    )
    out = relay.add(depthwise_conv2d_1, depthwise_conv2d_2)

    f = relay.Function([data1, weight1, weight2], out)
    ref_mod = tvm.IRModule()
    ref_mod["main"] = f

    f = set_external_func_attr(f, "dnnl", "dnnl_0")
    call = relay.Call(f, [data0, weight0, weight0])
    mod = tvm.IRModule.from_expr(call)

    i_data = np.random.uniform(0, 1, ishape).astype(dtype)
    w_data = np.random.uniform(0, 1, w1shape).astype(dtype)

    ref_ex = relay.create_executor("graph", mod=ref_mod, ctx=tvm.cpu())
    ref_res = ref_ex.evaluate()(i_data, w_data, w_data)
    check_result(
        mod, {"data0": i_data, "weight0": w_data}, (1, 32, 14, 14), ref_res.asnumpy(), tol=1e-5
    )
예제 #5
0
def test_tensorrt_simple():
    if skip_codegen_test():
        return
    dtype = "float32"
    xshape = (1, 3, 2, 2)
    yshape = (1, 3, 1, 1)
    zshape = (1, 1, 1, 1)
    x = relay.var("x", shape=(xshape), dtype=dtype)
    y = relay.var("y", shape=(yshape), dtype=dtype)
    z = relay.var("z", shape=(zshape), dtype=dtype)
    w = z * (x + y)
    out = relay.nn.relu(w)
    f = relay.Function([x, y, z], out)

    x_data = np.random.uniform(-1, 1, xshape).astype(dtype)
    y_data = np.random.uniform(-1, 1, yshape).astype(dtype)
    z_data = np.random.uniform(-1, 1, zshape).astype(dtype)

    result_dict = dict()
    for mode in ["vm", "graph"]:
        for use_trt in [True, False]:
            mod = tvm.IRModule()
            mod["main"] = f
            result_key = mode + ("_trt" if use_trt else "")
            if use_trt:
                mod, config = tensorrt.partition_for_tensorrt(mod)
                with tvm.transform.PassContext(
                    opt_level=3, config={"relay.ext.tensorrt.options": config}
                ):
                    relay_exec = relay.create_executor(mode, mod=mod, ctx=tvm.gpu(0), target="cuda")
            else:
                with tvm.transform.PassContext(opt_level=3):
                    relay_exec = relay.create_executor(mode, mod=mod, ctx=tvm.gpu(0), target="cuda")
            if not skip_runtime_test():
                result_dict[result_key] = relay_exec.evaluate()(x_data, y_data, z_data)

    if not skip_runtime_test():
        assert_result_dict_holds(result_dict)
예제 #6
0
def test_too_specific_match():
    mod = tvm.IRModule()
    p = Prelude(mod)
    _, cons, nil = mod.get_type("List")

    v = relay.Var("v")
    match = relay.Match(
        v,
        [
            relay.Clause(
                relay.PatternConstructor(
                    cons,
                    [
                        relay.PatternWildcard(),
                        relay.PatternConstructor(
                            cons, [relay.PatternWildcard(), relay.PatternWildcard()]
                        ),
                    ],
                ),
                v,
            )
        ],
    )

    unmatched = unmatched_cases(match, mod)

    # will not match nil or a list of length 1
    nil_found = False
    single_length_found = False
    assert len(unmatched) == 2
    for case in unmatched:
        assert isinstance(case, relay.PatternConstructor)
        if case.constructor == nil:
            nil_found = True
        if case.constructor == cons:
            assert isinstance(case.patterns[1], relay.PatternConstructor)
            assert case.patterns[1].constructor == nil
            single_length_found = True
    def partition(dpu_target):
        data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
        weight = relay.var("weight", relay.TensorType((16, 3, 3, 3),
                                                      "float32"))
        bn_gamma = relay.var("bn_gamma", relay.TensorType((16, ), "float32"))
        bn_beta = relay.var("bn_beta", relay.TensorType((16, ), "float32"))
        bn_mmean = relay.var("bn_mean", relay.TensorType((16, ), "float32"))
        bn_mvar = relay.var("bn_var", relay.TensorType((16, ), "float32"))

        conv = relay.nn.conv2d(data=data,
                               weight=weight,
                               kernel_size=(3, 3),
                               channels=16,
                               padding=(1, 1))
        bn_output = relay.nn.batch_norm(conv, bn_gamma, bn_beta, bn_mmean,
                                        bn_mvar)

        func = relay.Function(
            [data, weight, bn_gamma, bn_beta, bn_mmean, bn_mvar],
            bn_output.astuple())
        mod = tvm.IRModule()
        mod["main"] = func
        params = {}
        params["weight"] = np.random.rand(16, 3, 3, 3).astype("float32")
        params["bn_gamma"] = np.random.rand(16).astype("float32")
        params["bn_beta"] = np.random.rand(16).astype("float32")
        params["bn_mean"] = np.random.rand(16).astype("float32")
        params["bn_var"] = np.random.rand(16).astype("float32")
        mod = annotation(mod, params, dpu_target)

        opt_pass = tvm.transform.Sequential([
            transform.MergeCompilerRegions(),
            transform.PartitionGraph(),
        ])

        with tvm.transform.PassContext(opt_level=3):
            mod = opt_pass(mod)
        return mod
예제 #8
0
def test_all_primary_operands_tensor_constants():
    dtype = "int8"
    shape = (1, 3, 3, 32)
    x0 = generate_variable("x0", shape, dtype)
    x1 = generate_variable("x1", shape, dtype)
    z1 = make_binary_op(
        relay.qnn.op.add,
        x0,
        x1,
        input_0_scale=0.0128,
        input_0_zero_point=32,
        input_1_scale=0.256,
        input_1_zero_point=-64,
    )

    lf = relay.Function([x0, x1], z1, relay.TensorType(shape, dtype))
    lf = set_composite_func_attr(lf, "cmsis-nn.qnn_add")

    rng = np.random.default_rng(12345)
    y0 = relay.const(rng.integers(-128, high=127, size=shape, dtype=dtype))
    y1 = relay.const(rng.integers(-128, high=127, size=shape, dtype=dtype))
    c0 = relay.Call(lf, [y0, y1])
    ef = relay.Function([], c0, relay.TensorType(shape, dtype))

    ev = relay.GlobalVar("external_function")
    ef = set_external_func_attr(ef, "cmsis-nn", ev.name_hint)
    c = relay.Call(ev, [])
    mf = relay.Function([], c, relay.TensorType(shape, dtype))
    mv = relay.GlobalVar("main")

    mod = tvm.IRModule()
    mod[ev] = ef
    mod[mv] = mf

    mod = relay.transform.InferType()(mod)
    mod = ScalarToTensorConstants()(mod)
    new_mod = relay.transform.InferType()(mod)
    assert tvm.ir.structural_equal(mod[ev].body, new_mod[ev].body)
예제 #9
0
    def run(dtype):
        mod = tvm.IRModule()
        p = Prelude(mod)

        # tensor array
        v1 = relay.var("v1")
        v2 = relay.var("v2")
        v3 = relay.var("v2")
        tensor_array = p.get_var("tensor_array", dtype)
        tensor_array1 = tensor_array(relay.const(3))
        write_func = p.get_var("tensor_array_write", dtype)
        split_func = p.get_var("tensor_array_split", dtype)
        tensor2 = p.get_var("tensor2", dtype)
        tensor_array1 = write_func(tensor_array1, relay.const(0), tensor2(v1))
        tensor_array1 = write_func(tensor_array1, relay.const(1), tensor2(v2))
        tensor_array1 = write_func(tensor_array1, relay.const(2), tensor2(v3))

        # value tensor
        value = relay.var("value")

        # lengths tensor
        ta_len = relay.var("length")

        # create the scatter function
        tensor_array_split = split_func(tensor_array1, tensor2(value), ta_len)
        mod["main"] = relay.Function([v1, v2, v3, value, ta_len], tensor_array_split)

        # initialize and check
        v1_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)
        v2_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)
        v3_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)
        value_data = np.random.uniform(low=0.0, high=8.0, size=(4, 3)).astype(dtype)
        length_data = np.array([2, 2], dtype="int32")
        expected = np.concatenate([value_data, v3_data])
        expected = np.split(expected, indices_or_sections=[2, 4])
        check_tensor_array(
            mod, expected, *(v1_data, v2_data, v3_data, value_data, length_data), dtype=dtype
        )
def test_before_partial_eval():
    """Test transformation before PartialEval"""
    mod = tvm.IRModule()

    shape = (10, 10)
    dtype = 'float32'
    t = relay.TensorType(shape, dtype)

    x = relay.var("x", t)
    y = relay.var("y", t)

    func = relay.Function([x, y], x * y)
    func = run_infer_type(func)
    back_func = transform.gradient(func)
    back_func = run_infer_type(back_func)

    mod["main"] = back_func
    seq = tvm.transform.Sequential([
        transform.LazyGradientInit(),
        transform.PartialEvaluate(),
        transform.DeadCodeElimination()
    ])
    mod = seq(mod)
    back_func = mod["main"]

    assert mod["main"].checked_type == relay.FuncType(
        [t, t], relay.TupleType([t, relay.TupleType([t, t])]))

    ex = create_executor(mod=mod)
    x = rand(dtype, *shape)
    y = rand(dtype, *shape)
    (forward), (
        grad_x,
        grad_y,
    ) = ex.evaluate(back_func)(x, y)
    assert_allclose(forward.asnumpy(), x.asnumpy() * y.asnumpy())
    assert_allclose(grad_x.asnumpy(), y.asnumpy())
    assert_allclose(grad_y.asnumpy(), x.asnumpy())
예제 #11
0
def test_constant_propagation():
    ones = np.ones(shape=(8, 8), dtype="float32")

    def expected():
        mod = tvm.IRModule()
        x = relay.const(ones)
        y = relay.var("y", shape=(8, 8))
        x0 = relay.const(ones)
        y0 = relay.var("y0", shape=(8, 8))
        add = x0 + y0
        # Function that uses C compiler
        func = relay.Function([y0], add)
        func = set_func_attr(func, "ccompiler", "ccompiler_0")
        glb_0 = relay.GlobalVar("ccompiler_0")
        mod[glb_0] = func
        add_call = relay.Call(glb_0, [y])
        log = relay.log(add_call)
        main = relay.Function([y], log)
        mod["main"] = main
        return mod

    x = relay.var("x", shape=(8, 8))
    y = relay.var("y", shape=(8, 8))
    add = x + y
    log = relay.log(add)
    f = relay.Function([x, y], log)
    f = bind_params_by_name(f, {"x": tvm.nd.array(ones)})
    mod = tvm.IRModule()
    mod["main"] = f
    mod = WhiteListAnnotator(["add"], "ccompiler")(mod)
    mod = transform.PartitionGraph()(mod)

    expected_mod = expected()
    assert tvm.ir.structural_equal(mod, expected_mod, map_free_vars=True)

    y_data = np.random.rand(8, 8).astype('float32')
    np_add = ones + y_data
    check_result(mod, {"y": y_data}, (8, 8), np.log(np_add))
    def expected():
        mod = tvm.IRModule()

        # function 0
        f0_i0 = relay.var(target + "_0_i0", shape=(10, 10))
        f0_i1 = relay.var(target + "_0_i1")
        f0_i2 = relay.var(target + "_0_i2")
        f0_i3 = relay.var(target + "_0_i3")
        f0_i4 = relay.var(target + "_0_i4")
        f0_n0 = relay.nn.batch_norm(f0_i0, f0_i1, f0_i2, f0_i3, f0_i4)
        f0_n1 = f0_n0[1]
        f0_n2 = relay.nn.relu(f0_n0[0])
        f0_o0 = relay.Tuple([f0_n2, f0_n1])
        func0 = relay.Function([f0_i0, f0_i1, f0_i2, f0_i3, f0_i4], f0_o0)

        func0 = func0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
        func0 = func0.with_attr("Inline", tvm.tir.IntImm("int32", 1))
        func0 = func0.with_attr("Compiler", target)
        func0 = func0.with_attr("global_symbol", target + "_0")
        gv0 = relay.GlobalVar(target + "_0")
        mod[gv0] = func0

        # body
        data = relay.var('data', shape=(10, 10))
        bn_gamma = relay.var("bn_gamma")
        bn_beta = relay.var("bn_beta")
        bn_mmean = relay.var("bn_mean")
        bn_mvar = relay.var("bn_var")
        function_out = gv0(data, bn_gamma, bn_beta, bn_mmean, bn_mvar)
        get_out0 = relay.TupleGetItem(function_out, 0)
        get_out1 = relay.TupleGetItem(function_out, 1)
        out_2 = relay.tanh(get_out1)
        out_3 = relay.log(get_out1)
        out = relay.Tuple([get_out0, out_2, out_3])
        func = relay.Function([data, bn_gamma, bn_beta, bn_mmean, bn_mvar],
                              out)
        mod["main"] = func
        return mod
예제 #13
0
def test_recursive_concat():
    """
    fn @concat_loop(%i: int32, %st: (any, 1)) -> (any, 1) {
        if (%i < 10) {
            let %i = reshape(cast(i, "float32"), newshape=(1, ))
            let %new_st = concatenate((st, i), axis=0)
            concat_loop(%i + 1, )
        } else {
            st
        }
    }
    """
    # Initial Values.
    i = relay.var('i', shape=(), dtype='int32')
    st = relay.var('st', shape=(relay.Any(), 1), dtype='int32')

    def _cond(i, st):
        return relay.op.min(relay.op.less(i, int32(10)))

    def _body(i, st):
        i_vec = relay.op.reshape(i, (1,1))
        ret = relay.op.concatenate([st, i_vec], axis=0)
        return i + int32(1), ret

    loop = while_loop(_cond, [i, st], _body)
    start = relay.var('start', shape=(), dtype='int32')
    body = loop(start, relay.op.reshape(relay.const(0), newshape=(1, 1)))
    func = relay.Function([start], relay.TupleGetItem(body, 1))
    mod = tvm.IRModule()
    mod["main"] = func
    data = np.array(0.0, dtype='int32')
    ref = np.array([0] + list(range(10))).reshape((11, 1)).astype("int32")
    # TODO(@jroesch): After LambdaLift pass, TypeInfer pass will fail
    # so currently we cannot run this test case on VM
    for kind in ["debug"]:
        ex = relay.create_executor(kind, mod=mod, ctx=tvm.cpu(), target="llvm")
        result = ex.evaluate()(data)
        np.testing.assert_allclose(result.asnumpy(), ref)
예제 #14
0
def test_sequential_with_scoping():
    shape = (1, 2, 3)
    c_data = np.array(shape).astype("float32")
    tp = relay.TensorType(shape, "float32")
    def before():
        c = relay.const(c_data)
        x = relay.var("x", tp)
        y = relay.add(c, c)
        y = relay.multiply(y, relay.const(2, "float32"))
        y = relay.add(x, y)
        z = relay.add(y, c)
        z1 = relay.add(y, c)
        z2 = relay.add(z, z1)
        return relay.Function([x], z2)

    def expected():
        x = relay.var("x", tp)
        c_folded = (c_data + c_data) * 2
        y = relay.add(x, relay.const(c_folded))
        z = relay.add(y, relay.const(c_data))
        z1 = relay.add(z, z)
        return relay.Function([x], z1)

    seq = _transform.Sequential([
        relay.transform.InferType(),
        relay.transform.FoldConstant(),
        relay.transform.EliminateCommonSubexpr(),
        relay.transform.AlterOpLayout()
    ])

    mod = tvm.IRModule({"main": before()})
    with relay.build_config(opt_level=3):
        with tvm.target.create("llvm"):
            mod = seq(mod)

    zz = mod["main"]
    zexpected = run_infer_type(expected())
    assert analysis.alpha_equal(zz, zexpected)
예제 #15
0
def test_constant_shape_with_external_codegen():
    mod = tvm.IRModule()
    shape = (relay.Any(), 25)
    dtype = "float32"

    # external function
    x = relay.var("x", shape=shape, dtype=dtype)
    weight = relay.const(np.random.rand(5, 25).astype("float32"), dtype="float32")
    out = relay.nn.dense(x, weight)
    f1 = relay.Function([x], out)
    f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
    f1 = f1.with_attr("Inline", tvm.tir.IntImm("int32", 1))
    f1 = f1.with_attr("Compiler", "a")
    glb_f1 = relay.GlobalVar("f1")
    mod[glb_f1] = f1
    mod = relay.transform.InferType()(mod)

    # Main function
    x = relay.var("x", shape=shape, dtype=dtype)
    mod["main"] = relay.Function([x], glb_f1(x))
    comp = relay.vm.VMCompiler()
    opt_mod, _ = comp.optimize(mod, target="llvm")
    assert "shape_func" in opt_mod.astext(False)
예제 #16
0
def verify_any_conv2d(
    data_shape,
    kernel_shape,
    strides,
    padding,
    dilation,
    static_data_shape,
    ref_out_shape,
):
    mod = tvm.IRModule()
    dtype = "float32"
    data = relay.var("data", shape=data_shape, dtype=dtype)
    kernel = relay.var("kernel", shape=kernel_shape, dtype=dtype)
    y = relay.nn.conv2d(data,
                        kernel,
                        strides,
                        padding,
                        dilation,
                        kernel_size=kernel_shape[2:4])
    mod["main"] = relay.Function([data, kernel], y)
    data_np = np.random.uniform(size=static_data_shape).astype(dtype)
    kernel_np = np.random.uniform(size=kernel_shape).astype(dtype)
    check_result([data_np, kernel_np], mod, ref_out_shape, assert_shape=True)
예제 #17
0
def verify_any_reshape(x_shape,
                       newshape,
                       x_np_shape,
                       out_shape,
                       variable_newshape=False):
    x = relay.var("x", shape=x_shape, dtype="float32")
    relu_x = relay.nn.relu(x)
    data = np.random.uniform(size=x_np_shape).astype("float32")
    params = [x]
    args = [data]

    if variable_newshape:
        newshape_var = relay.var("newshape",
                                 shape=(len(newshape), ),
                                 dtype="int64")
        params.append(newshape_var)
        args.append(np.array(newshape, dtype="int64"))
        newshape = newshape_var

    y = relay.reshape(relu_x, newshape=newshape)
    mod = tvm.IRModule()
    mod["main"] = relay.Function(params, y)
    check_result(args, mod, data, flatten=True)
예제 #18
0
def test_multiple_type_param_defn():
    glob_typ_var = relay.GlobalTypeVar("Either")
    typ_var_a = relay.TypeVar("A")
    typ_var_b = relay.TypeVar("B")
    prog = relay.TypeData(
        glob_typ_var,
        [typ_var_a, typ_var_b],
        [
            relay.Constructor("Left", [typ_var_a], glob_typ_var),
            relay.Constructor("Right", [typ_var_b], glob_typ_var),
        ],
    )
    mod = tvm.IRModule()
    mod[glob_typ_var] = prog
    assert_parse_module_as(
        """
        type Either[A, B] {
          Left(A),
          Right(B),
        }
        """,
        mod,
    )
예제 #19
0
def test_list_update():
    expected = list(range(10))

    mod = tvm.IRModule()
    p = Prelude(mod)

    nil = p.nil
    cons = p.cons
    update = p.update

    l = nil()
    # create zero initialized list
    for i in range(len(expected)):
        l = cons(relay.const(0), l)

    # set value
    for i, v in enumerate(expected):
        l = update(l, relay.const(i), relay.const(v))

    f = relay.Function([], l)
    mod["main"] = f
    result = veval(mod)
    tvm.testing.assert_allclose(vmobj_to_list(result), np.array(expected))
예제 #20
0
def test_sum_loop(target, dev):
    mod = tvm.IRModule({})
    sum_up = relay.GlobalVar("sum_up")
    i = relay.var("i", shape=[], dtype="int32")
    accum = relay.var("accum", shape=[], dtype="int32")
    sb = ScopeBuilder()
    with sb.if_scope(relay.equal(i, relay.const(0, "int32"))):
        sb.ret(accum)
    with sb.else_scope():
        one_less = relay.subtract(i, relay.const(1, "int32"))
        new_accum = relay.add(accum, i)
        sb.ret(relay.Call(sum_up, [one_less, new_accum]))
    func = relay.Function([i, accum], sb.get())
    mod[sum_up] = func
    mod = relay.transform.InferType()(mod)
    loop_bound = 0
    i_data = np.array(loop_bound, dtype="int32")
    accum_data = np.array(0, dtype="int32")
    iarg = relay.var("i", shape=[], dtype="int32")
    aarg = relay.var("accum", shape=[], dtype="int32")
    mod["main"] = relay.Function([iarg, aarg], sum_up(iarg, aarg))
    check_result(target, dev, [i_data, accum_data],
                 sum(range(1, loop_bound + 1)), mod)
예제 #21
0
def test_list_update():
    expected = list(range(10))

    mod = tvm.IRModule()
    p = Prelude(mod)

    _, cons, nil = mod.get_type("List")
    update = mod.get_global_var("update")

    l = nil()
    # create zero initialized list
    for i in range(len(expected)):
        l = cons(relay.const(0), l)

    # set value
    for i, v in enumerate(expected):
        l = update(l, relay.const(i), relay.const(v))

    f = relay.Function([], l)
    mod["main"] = f
    for tgt, dev in tvm.testing.enabled_targets():
        result = veval(mod, device=dev, target=tgt)
        tvm.testing.assert_allclose(vmobj_to_list(result), np.array(expected))
예제 #22
0
def test_nested_match_pattern():
    mod = tvm.IRModule()
    box, box_ctor = init_box_adt(mod)
    v = relay.Var("v")
    w = relay.Var("w")
    match = relay.Let(
        v,
        box_ctor(box_ctor(relay.const(2))),
        relay.Match(
            v,
            [
                relay.Clause(
                    relay.PatternConstructor(box_ctor, [
                        relay.PatternConstructor(box_ctor,
                                                 [relay.PatternVar(w)])
                    ]),
                    w,
                )
            ],
        ),
    )
    match_val = run_as_python(match, mod)
    assert_tensor_value(match_val, 2)
예제 #23
0
def test_print_ir(capfd):
    shape = (1, 2, 3)
    tp = relay.TensorType(shape, "float32")
    x = relay.var("x", tp)
    y = relay.add(x, x)
    y = relay.multiply(y, relay.const(2, "float32"))
    func = relay.Function([x], y)

    seq = tvm.transform.Sequential([
        relay.transform.InferType(),
        relay.transform.FoldConstant(),
        tvm.transform.PrintIR(),
        relay.transform.DeadCodeElimination()
    ])

    mod = tvm.IRModule({"main": func})
    with relay.build_config(opt_level=3):
        mod = seq(mod)

    out = capfd.readouterr().err

    assert "PrintIR" in out
    assert "multiply" in out
    def expected_same_output_region():
        mod = tvm.IRModule()
        x = relay.var("x", shape=(8, 8))
        y = relay.var("y", shape=(8, 8))
        z = relay.var("z", shape=(8, 8))
        x0 = relay.var("x0", shape=(8, 8))
        y0 = relay.var("y0", shape=(8, 8))
        log = relay.log(x0)
        sub = x0 - y0
        mul = log * sub
        # The partitioned graph contains log, subtract, and multiply
        func = relay.Function([x0, y0], mul)
        func = set_func_attr(func, "ccompiler", "ccompiler_0")
        glb_0 = relay.GlobalVar("ccompiler_0")
        mod[glb_0] = func
        mod = transform.InferType()(mod)

        add = x + y
        call = relay.Call(glb_0, [add, z])
        main = relay.Function([x, y, z], call)
        mod["main"] = main
        mod = transform.InferType()(mod)
        return mod
예제 #25
0
def test_loop_free_var(target, dev):
    x = relay.var("x", shape=(), dtype="int32")
    i = relay.var("i", shape=(), dtype="int32")
    s = relay.var("s", shape=(), dtype="int32")

    def cond(i, _):
        return i < relay.const(10, dtype="int32")

    def body_no_free_var(i, acc):
        incr = relay.const(1, "int32")
        return i + incr, acc + i

    def body_with_free_var(i, acc):
        incr = relay.const(1, "int32")
        return i + incr, acc + x

    for args, body, expected in zip([[], [1]], [body_no_free_var, body_with_free_var], [45, 10]):
        loop = while_loop(cond, [i, s], body)
        tup = loop(relay.const(0, dtype="int32"), relay.zeros(shape=(), dtype="int32"))
        ret = relay.TupleGetItem(tup, 1)
        mod = tvm.IRModule()
        mod["main"] = relay.Function(relay.analysis.free_vars(ret), ret)
        check_result(target, dev, args, expected, mod)
예제 #26
0
    def get_mod():
        x1 = relay.var("x1", shape=None)
        x2 = relay.var("x2", shape=None)
        z1 = x1 + x2
        lf = relay.Function([x1, x2], z1, relay.TensorType((), "float32"))
        lf = set_composite_func_attr(lf, "cmsis-nn.qnn_add")

        y0 = relay.expr.const(5, "float32")
        y1 = relay.expr.const(3, "float32")
        c0 = relay.Call(lf, [y0, y1])
        ef = relay.Function([], c0, relay.TensorType((), "float32"))

        ev = relay.GlobalVar("external_function")
        ef = set_external_func_attr(ef, "foo", ev.name_hint)
        c = relay.Call(ev, [])
        mf = relay.Function([], c, relay.TensorType((), "float32"))
        mv = relay.GlobalVar("main")

        mod = tvm.IRModule()
        mod[ev] = ef
        mod[mv] = mf
        mod = relay.transform.InferType()(mod)
        return mod
예제 #27
0
def test_prim_func_pass():
    @tvm.tir.transform.prim_func_pass(opt_level=1)
    class TestReplaceFunc:
        """Simple test function to replace one argument to another."""
        def __init__(self, new_func):
            self.new_func = new_func

        def transform_function(self, func, mod, ctx):
            return self.new_func

    x = te.var('x')
    y = te.var('y')
    b = tvm.tir.decl_buffer((x, ), "float32")
    stmt = tvm.tir.LetStmt(x, 10, tvm.tir.Evaluate(x + 1))

    func = tvm.tir.PrimFunc([x, y, b], stmt)

    new_func = tvm.tir.PrimFunc([x, y, b], tvm.tir.Evaluate(0))

    mod = tvm.IRModule({"main": func})
    mod = TestReplaceFunc(new_func)(mod)

    assert tvm.ir.structural_equal(mod["main"].body, new_func.body)
예제 #28
0
def test_global_function():
    m = tvm.IRModule()
    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)
    x = relay.Var("x", t)
    d = GlobalVar("double")
    m[d] = relay.Function([x], x + x)
    y = relay.Var("y", t)
    q = GlobalVar("q")
    m[q] = relay.Function([y], d(d(y)))
    g = GlobalVar("grad")
    m = tvm.relay.transform.InferType()(m)
    m[g] = tvm.relay.transform.gradient(q, m)
    m = tvm.relay.transform.InferType()(m)
    back_func = m[g]
    assert back_func.checked_type == relay.FuncType(
        [t], relay.TupleType([t, relay.TupleType([t])]))
    ex = create_executor(mod=m)
    x = rand(dtype, *shape)
    forward, (grad, ) = ex.evaluate(back_func)(x)
    tvm.testing.assert_allclose(forward.numpy(), 4 * x.numpy())
    tvm.testing.assert_allclose(grad.numpy(), 4 * np.ones_like(x.numpy()))
예제 #29
0
def test_mixed_input_type():
    mod = tvm.IRModule()
    dtype = "float32"
    static_data_shape = (9, 4)
    data_shape = (relay.Any(), 4)
    tensor_type = relay.TensorType(data_shape, dtype)
    tuple_type = relay.TupleType([tensor_type, tensor_type])
    data0 = relay.var("d0",
                      type_annotation=relay.TupleType(
                          [tuple_type, tensor_type]))
    data1 = relay.var("d1", shape=(relay.Any(), 4), dtype=dtype)
    data_tuple = relay.expr.TupleWrapper(data0, 2)
    nested_data_tuple = relay.expr.TupleWrapper(data_tuple[0], 2)
    y = nested_data_tuple[1] * data_tuple[1] + data1
    mod["main"] = relay.Function([data0, data1], y)
    data_np0 = np.random.uniform(size=static_data_shape).astype(dtype)
    data_np1 = np.random.uniform(size=static_data_shape).astype(dtype)
    ref_out_shape = (9, 4)
    for kind in ["vm"]:
        ex = relay.create_executor(kind, mod=mod, ctx=tvm.cpu(), target="llvm")
        result = ex.evaluate()([[data_np0, data_np0], data_np0], data_np1)
        assert result.asnumpy().shape == ref_out_shape, \
            "Shape mismatch: expect %s but got %s." % (str(ref_out_shape), str(result.asnumpy().shape))
예제 #30
0
def make_partitioned_function(relay_op):

    ifm0 = relay.analysis.free_vars(relay_op)
    ifm_shape = ifm0[0].type_annotation.shape

    ifm = relay.var("ifm", shape=ifm_shape, dtype="int8")

    glb_ethosu = relay.GlobalVar("tvmgen_default_ethosu_main_0")

    func = (relay.Function(ifm0, relay_op).with_attr("Inline", 1).with_attr(
        "Compiler",
        "ethos-u").with_attr("global_symbol",
                             "tvmgen_default_ethosu_main_0").with_attr(
                                 "Primitive", 1))
    mod = tvm.IRModule()
    mod[glb_ethosu] = func
    mod = relay.transform.InferType()(mod)

    call = relay.Call(glb_ethosu, [ifm])
    mod["main"] = relay.Function([ifm], call)
    mod = relay.transform.InferType()(mod)

    return mod