Пример #1
0
def test_function_alpha_equal():
    tt1 = relay.TensorType((1, 2, 3), "float32")
    tt2 = relay.TensorType((4, 5, 6), "int8")
    tt3 = relay.TupleType([tt1, tt2])

    v1 = relay.Var("v1", tt1)
    v2 = relay.Var("v2", tt2)
    v3 = relay.Var("v3", tt3)
    v4 = relay.Var("v4", tt2)
    vret = relay.Constant(tvm.nd.array(np.ones(1)))

    tp1 = relay.TypeVar("tp1", relay.Kind.Type)
    tp2 = relay.TypeVar("tp2", relay.Kind.Type)
    tp3 = relay.TypeVar("tp3", relay.Kind.Shape)
    tp4 = relay.TypeVar("tp4", relay.Kind.Shape)

    basic_args = [relay.Var("v3", tt1), relay.Var("v4", tt2)]
    basic_tps = [tp1, tp2]

    func = relay.Function([v1, v2], v1,
                          tt2, basic_tps)
    mapped = relay.Function(basic_args, basic_args[0], tt2, basic_tps)
    assert alpha_equal(func, mapped)

    fewer_params = relay.Function([relay.Var("v4", tt2)], v4, tt2, basic_tps)
    assert not alpha_equal(func, fewer_params)

    more_params = relay.Function([relay.Var("v3", tt1),
                                  relay.Var("v4", tt2),
                                  relay.Var("v2", tt2)], v4, tt2, basic_tps)
    assert not alpha_equal(func, more_params)

    params_unordered = relay.Function([v2, v1], v1,
                                      tt2, basic_tps)
    assert not alpha_equal(func, params_unordered)

    params_mismatch = relay.Function([v1, v3], v1,
                                     tt2, basic_tps)
    assert not alpha_equal(func, params_mismatch)

    # also would not typecheck
    ret_type_mismatch = relay.Function(basic_args, v4, tt1, basic_tps)
    assert not alpha_equal(func, ret_type_mismatch)

    # also mis-typed
    different_body = relay.Function(basic_args, v3, tt2, basic_tps)
    assert not alpha_equal(func, different_body)

    fewer_type_params = relay.Function(basic_args, v4, tt2, [tp1])
    assert not alpha_equal(func, fewer_type_params)

    more_type_params = relay.Function(basic_args, v4, tt2, [tp1, tp2, tp3])
    assert not alpha_equal(func, more_type_params)

    type_params_unordered = relay.Function(basic_args, v4, tt2, [tp2, tp1])
    assert not alpha_equal(func, type_params_unordered)

    different_type_params = relay.Function(basic_args, v4, tt2, [tp3, tp4])
    assert not alpha_equal(func, different_type_params)

    # a well-typed example that also differs in body, ret type, and type params
    tupled_example = relay.Function(basic_args, relay.Tuple([v3, v4]), tt3)
    assert not alpha_equal(func, tupled_example)

    # nullable
    no_ret_type = relay.Function(basic_args, v4, None, [tp1, tp2])
    # both null
    assert alpha_equal(no_ret_type, no_ret_type)
    # one null
    assert not alpha_equal(func, no_ret_type)
    assert not alpha_equal(no_ret_type, func)
Пример #2
0
def test_cast():
    x = relay.var("x", relay.TensorType((8, 9, 4), "float32"))
    y = x.astype("int32")
    yy = relay.ir_pass.infer_type(y)
    assert "dtype=" in yy.astext()
    assert yy.checked_type == relay.TensorType((8, 9, 4), "int32")
Пример #3
0
def test_const_inline():
    t = relay.TensorType([], "float32")
    d = Var("d", t)
    double = Function([d], d + d)
    orig = double(const(4.0))
    assert tvm.ir.structural_equal(dcpe(orig), const(8.0))
Пример #4
0
    def expected():
        # function variables for conv2d
        data0 = relay.var("data0", relay.TensorType((1, 3, 224, 224),
                                                    "float32"))
        weight0 = relay.var("weight0",
                            relay.TensorType((16, 3, 3, 3), "float32"))
        conv = relay.nn.conv2d(data=data0,
                               weight=weight0,
                               kernel_size=(3, 3),
                               channels=16,
                               padding=(1, 1))

        # function variables for batch_norm
        bn_gamma0 = relay.var("bn_gamma0", relay.TensorType((16, ), "float32"))
        bn_beta0 = relay.var("bn_beta0", relay.TensorType((16, ), "float32"))
        bn_mmean0 = relay.var("bn_mean0", relay.TensorType((16, ), "float32"))
        bn_mvar0 = relay.var("bn_var0", relay.TensorType((16, ), "float32"))
        bn = relay.nn.batch_norm(conv, bn_gamma0, bn_beta0, bn_mmean0,
                                 bn_mvar0)
        func0 = relay.Function(
            [data0, weight0, bn_gamma0, bn_beta0, bn_mmean0, bn_mvar0],
            bn.astuple())
        func0 = set_func_attr(func0, "vitis_ai", "vitis_ai_0")
        gv0 = relay.GlobalVar("vitis_ai_0")
        mod = tvm.IRModule()
        mod[gv0] = func0
        mod = relay.transform.InferType()(mod)

        # main function
        data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
        weight = relay.var("weight", relay.TensorType((16, 3, 3, 3),
                                                      "float32"))
        bn_gamma = relay.var("bn_gamma", relay.TensorType((16, ), "float32"))
        bn_beta = relay.var("bn_beta", relay.TensorType((16, ), "float32"))
        bn_mmean = relay.var("bn_mean", relay.TensorType((16, ), "float32"))
        bn_mvar = relay.var("bn_var", relay.TensorType((16, ), "float32"))
        call0 = gv0(data, weight, bn_gamma, bn_beta, bn_mmean, bn_mvar)
        mod["main"] = relay.Function(
            [data, weight, bn_gamma, bn_beta, bn_mmean, bn_mvar], call0)
        mod = relay.transform.InferType()(mod)
        return mod
def verify_bias_add(d_shape, b_shape, axis=1):
    data = relay.var("data", relay.TensorType(d_shape, "float32"))
    bias = relay.var("bias", relay.TensorType(b_shape, "float32"))
    fwd_func = relay.Function([data, bias],
                              relay.nn.bias_add(data, bias, axis=axis))
    check_grad(fwd_func)
Пример #6
0
def test_tuple():
    tp = relay.TensorType((10, ))
    x = relay.var("x", tp)
    res = relay.Tuple([x, x])
    assert (relay.ir_pass.infer_type(res).checked_type == relay.TupleType(
        [tp, tp]))
Пример #7
0
    B = T.match_buffer(b, [128, 128], scope="scopeA")
    C = T.match_buffer(c, [128, 128], scope="scopeB")
    D = T.match_buffer(d, [128, 128], scope="scopeC")

    for i, j, k in T.grid(128, 128, 128):
        with T.block("update"):
            vi, vj, vk = T.axis.remap("SSR", [i, j, k])
            with T.init():
                D[vi, vj] = C[vi, vj]
            D[vi, vj] = D[vi, vj] + A[vi, vk] * B[vj, vk]


gem_ty = relay.FuncType(
    [
        relay.TupleType([
            relay.TensorType((128, 128), "float32"),
            relay.TensorType((128, 128), "float32"),
        ]),
        relay.TensorType((128, 128), "float32"),
    ],
    relay.TensorType((128, 128), "float32"),
)


def test_get_prim_func_arg_and_result_constraints():
    scopes = tir.analysis.get_prim_func_arg_and_result_memory_constraints(
        gem, gem_ty)
    assert [x for x in scopes] == ["scopeA", "scopeB", "scopeC"]


def test_apply_prim_func_arg_and_result_memory_constraints():
Пример #8
0
def test_single_op():
    "Program: fn (%x : float32) { let %t1 = f(%x); %t1 }"
    x = relay.var("x", shape=[])
    func = relay.Function([x], op.log(x))
    ttype = relay.TensorType([], dtype="float32")
    assert_has_type(func, relay.FuncType([ttype], ttype))
Пример #9
0
 def verify_yolo_reorg(shape, stride, out_shape):
     x = relay.var("x", relay.TensorType(shape, "float32"))
     z = relay.vision.yolo_reorg(x, stride=stride)
     zz = relay.ir_pass.infer_type(z)
     assert "stride=" in z.astext()
     assert zz.checked_type == relay.ty.TensorType(out_shape, "float32")
Пример #10
0
 def get_ref_abs():
     shape = (5, 10)
     tp = relay.TensorType(shape, "float32")
     a = relay.var("a", tp)
     ref_abs = relay.Function([a], relay.abs(relay.add(a, a)))
     return ref_abs
Пример #11
0
 def custom_log1_rel(arg_types, attrs):
     assert len(arg_types) == 1, "type relation arg number mismatch!"
     if attrs:
         assert isinstance(attrs, DictAttrs)
     inputa_type = arg_types[0]
     return relay.TensorType(inputa_type.shape, inputa_type.dtype)
Пример #12
0
def test_sequential_pass():
    shape = (10,)
    dtype = "float32"
    tp = relay.TensorType(shape, dtype)
    x = relay.var("x", tp)
    y = relay.var("y", tp)
    v_sub = relay.GlobalVar("mySub")
    sub = relay.Function([x, y], relay.subtract(x, y))

    z = relay.var("z", tp)
    v_log = relay.GlobalVar("myLog")
    log = relay.Function([z], relay.log(z))

    mod = tvm.IRModule({v_sub: sub, v_log: log})

    def get_ref_log():
        ref_log = relay.Function([x], relay.log(relay.add(x, x)))
        return ref_log

    def get_ref_sub():
        ref_sub = relay.Function([x, y], relay.subtract(relay.add(x, x), relay.add(y, y)))
        return ref_sub

    def get_ref_abs():
        shape = (5, 10)
        tp = relay.TensorType(shape, "float32")
        a = relay.var("a", tp)
        ref_abs = relay.Function([a], relay.abs(relay.add(a, a)))
        return ref_abs

    # Register a module pass.
    opt_tester = OptTester(mod)
    pass_ctx = None

    @tvm.transform.module_pass(opt_level=1)
    def mod_transform(expr, ctx):
        return opt_tester.transform(expr, ctx)

    module_pass = mod_transform

    # Register a function pass.
    @_transform.function_pass(opt_level=1)
    def func_transform(expr, mod, ctx):
        return opt_tester.transform(expr, ctx)

    function_pass = func_transform

    def test_pass_registration():
        passes = [module_pass, function_pass]
        opt_level = 2
        pass_name = "sequential"
        sequential = tvm.transform.Sequential(passes=passes, opt_level=opt_level)
        pass_info = sequential.info
        assert pass_info.name == pass_name
        assert pass_info.opt_level == opt_level

    def test_no_pass():
        passes = []
        sequential = tvm.transform.Sequential(opt_level=1, passes=passes)
        ret_mod = sequential(mod)
        mod_func = ret_mod[v_sub]
        check_func(sub, mod_func)

    def test_only_module_pass():
        passes = [module_pass]
        sequential = tvm.transform.Sequential(opt_level=1, passes=passes)
        with tvm.transform.PassContext(required_pass=["mod_transform"]):
            ret_mod = sequential(mod)
        # Check the subtract function.
        sub_var, new_sub = extract_var_func(ret_mod, v_sub.name_hint)
        check_func(new_sub, sub)

        # Check the abs function is added.
        abs_var, abs_func = get_var_func()
        abs_var, new_abs = extract_var_func(ret_mod, abs_var.name_hint)
        check_func(new_abs, abs_func)

    def test_only_function_pass():
        # Check the subtract function.
        passes = [function_pass]
        sequential = tvm.transform.Sequential(opt_level=1, passes=passes)
        with tvm.transform.PassContext(required_pass=["func_transform"]):
            ret_mod = sequential(mod)
        _, new_sub = extract_var_func(ret_mod, v_sub.name_hint)
        check_func(new_sub, get_ref_sub())

        # Check the log function.
        log_var, new_log = extract_var_func(ret_mod, v_log.name_hint)
        check_func(new_log, get_ref_log())

    def test_multiple_passes():
        # Reset the current module since mod has been polluted by the previous
        # function pass.
        mod = tvm.IRModule({v_sub: sub, v_log: log})
        passes = [module_pass, function_pass]
        sequential = tvm.transform.Sequential(opt_level=1, passes=passes)
        required = ["mod_transform", "func_transform"]
        with tvm.transform.PassContext(required_pass=required):
            ret_mod = sequential(mod)

        # Check the abs function is added.
        abs_var, abs_func = get_var_func()
        abs_var, new_abs = extract_var_func(ret_mod, abs_var.name_hint)
        check_func(new_abs, get_ref_abs())

        # Check the subtract function is modified correctly.
        _, new_sub = extract_var_func(ret_mod, v_sub.name_hint)
        check_func(new_sub, get_ref_sub())

        # Check the log function is modified correctly.
        _, new_log = extract_var_func(ret_mod, v_log.name_hint)
        check_func(new_log, get_ref_log())

        # Execute the updated subtract function.
        x_nd = get_rand(shape, dtype)
        y_nd = get_rand(shape, dtype)
        ref_res = np.subtract(x_nd.asnumpy() * 2, y_nd.asnumpy() * 2)
        for target, dev in tvm.testing.enabled_targets():
            exe1 = relay.create_executor("graph", device=dev, target=target)
            exe2 = relay.create_executor("debug", device=dev, target=target)
            res1 = exe1.evaluate(new_sub)(x_nd, y_nd)
            tvm.testing.assert_allclose(res1.asnumpy(), ref_res, rtol=1e-5)
            res2 = exe2.evaluate(new_sub)(x_nd, y_nd)
            tvm.testing.assert_allclose(res2.asnumpy(), ref_res, rtol=1e-5)

        # Execute the updated abs function.
        x_nd = get_rand((5, 10), dtype)
        ref_res = np.abs(x_nd.asnumpy() * 2)
        for target, dev in tvm.testing.enabled_targets():
            exe1 = relay.create_executor("graph", device=dev, target=target)
            exe2 = relay.create_executor("debug", device=dev, target=target)
            res1 = exe1.evaluate(new_abs)(x_nd)
            tvm.testing.assert_allclose(res1.asnumpy(), ref_res, rtol=1e-5)
            res2 = exe2.evaluate(new_abs)(x_nd)
            tvm.testing.assert_allclose(res2.asnumpy(), ref_res, rtol=1e-5)

    test_pass_registration()
    test_no_pass()
    test_only_module_pass()
    test_only_function_pass()
    test_multiple_passes()
Пример #13
0
def test_function_pass():
    shape = (10,)
    dtype = "float32"
    tp = relay.TensorType(shape, dtype)
    x = relay.var("x", tp)
    v_log = relay.GlobalVar("myLog")
    log = relay.Function([x], relay.log(x))
    mod = tvm.IRModule({v_log: log})

    pass_name = "function_pass_test"
    opt_level = 1
    opt_tester = OptTester(mod)
    pass_ctx = None

    @_transform.function_pass(opt_level=opt_level, name=pass_name)
    def transform(expr, mod, ctx):
        return opt_tester.transform(expr, ctx)

    def get_ref_log():
        ref_log = relay.Function([x], relay.log(relay.add(x, x)))
        return ref_log

    def test_pass_registration():
        function_pass = transform
        assert isinstance(function_pass, _transform.FunctionPass)
        pass_info = function_pass.info
        assert pass_info.name == pass_name
        assert pass_info.opt_level == opt_level

    def test_pass_registration_no_decorator():
        def direct_transform(expr, ctx):
            return opt_tester.transform(expr, ctx)

        mod_pass = _transform.function_pass(direct_transform, opt_level=0)
        assert isinstance(mod_pass, _transform.FunctionPass)
        pass_info = mod_pass.info
        assert pass_info.name == "direct_transform"
        assert pass_info.opt_level == 0

    def test_pass_run():
        function_pass = transform
        assert pass_name in str(function_pass)

        updated_mod = function_pass(mod)
        assert isinstance(updated_mod, tvm.IRModule)

        # Check the log function in the updated module.
        new_v_log = updated_mod.get_global_var(v_log.name_hint)
        new_log = updated_mod[new_v_log]
        check_func(new_log, get_ref_log())

        # Check the log function in the python transformed function.
        ret = opt_tester.transform(log, pass_ctx)
        check_func(new_log, ret)

        # Execute the add function.
        x_nd = get_rand(shape, dtype)
        ref_res = np.log(x_nd.asnumpy() * 2)
        for target, dev in tvm.testing.enabled_targets():
            exe1 = relay.create_executor("graph", device=dev, target=target)
            exe2 = relay.create_executor("debug", device=dev, target=target)
            res1 = exe1.evaluate(new_log)(x_nd)
            tvm.testing.assert_allclose(res1.asnumpy(), ref_res, rtol=1e-5)
            res2 = exe2.evaluate(new_log)(x_nd)
            tvm.testing.assert_allclose(res2.asnumpy(), ref_res, rtol=1e-5)

    test_pass_registration()
    test_pass_registration_no_decorator()
    test_pass_run()
Пример #14
0
def test_module_pass():
    shape = (5, 10)
    dtype = "float32"
    tp = relay.TensorType(shape, dtype)
    x = relay.var("x", tp)
    y = relay.var("y", tp)
    v_add = relay.GlobalVar("myAdd")
    func = relay.Function([x, y], x + y)
    mod = tvm.IRModule({v_add: func})

    pass_name = "module_pass_test"
    opt_level = 0
    opt_tester = OptTester(mod)
    pass_ctx = None

    @tvm.transform.module_pass(opt_level=opt_level, name=pass_name)
    def transform(expr, ctx):
        return opt_tester.transform(expr, ctx)

    def test_pass_registration():
        mod_pass = transform
        assert isinstance(mod_pass, tvm.transform.ModulePass)
        pass_info = mod_pass.info
        assert pass_info.name == pass_name
        assert pass_info.opt_level == opt_level

    def test_pass_registration_no_decorator():
        def direct_transform(expr, ctx):
            return opt_tester.transform(expr, ctx)

        mod_pass = tvm.transform.module_pass(direct_transform, opt_level=3)
        assert isinstance(mod_pass, tvm.transform.ModulePass)
        pass_info = mod_pass.info
        assert pass_info.name == "direct_transform"
        assert pass_info.opt_level == 3

    def test_pass_run():
        module_pass = transform
        assert pass_name in str(module_pass)

        updated_mod = module_pass(mod)
        assert isinstance(updated_mod, tvm.IRModule)

        # Check the abs function in the updated module.
        v_abs, myabs = get_var_func()
        new_v_add = updated_mod.get_global_var(v_abs.name_hint)
        new_abs = updated_mod[new_v_add]
        check_func(new_abs, myabs)

        # Check the add function in the updated module.
        v_abs, myabs = get_var_func()
        new_v_add = updated_mod.get_global_var(v_add.name_hint)
        new_add = updated_mod[new_v_add]
        check_func(new_add, func)

        # Check the add function in the python transformed module.
        ret = opt_tester.transform(mod, pass_ctx)
        transformed_v_add = ret.get_global_var(v_add.name_hint)
        transformed_add = mod[transformed_v_add]
        check_func(new_add, transformed_add)

        # Execute the add function.
        x_nd = get_rand(shape, dtype)
        y_nd = get_rand(shape, dtype)
        ref_res = x_nd.asnumpy() + y_nd.asnumpy()
        for target, dev in tvm.testing.enabled_targets():
            exe1 = relay.create_executor("graph", device=dev, target=target)
            exe2 = relay.create_executor("debug", device=dev, target=target)
            res1 = exe1.evaluate(new_add)(x_nd, y_nd)
            tvm.testing.assert_allclose(res1.asnumpy(), ref_res, rtol=1e-5)
            res2 = exe2.evaluate(new_add)(x_nd, y_nd)
            tvm.testing.assert_allclose(res2.asnumpy(), ref_res, rtol=1e-5)

    test_pass_registration()
    test_pass_registration_no_decorator
    test_pass_run()
Пример #15
0
def test_batch_norm():
    for dtype in ['float16', 'float32']:
        # beta and gamma ignored
        data = relay.var("data", relay.TensorType((3, 2, 1), dtype))
        beta = relay.var("beta", relay.TensorType((2,), dtype))
        gamma = relay.var("gamma", relay.TensorType((2,), dtype))
        moving_mean = relay.var("moving_mean", relay.TensorType((2,), dtype))
        moving_var = relay.var("moving_var", relay.TensorType((2,), dtype))
        y = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var,
                                center=False, scale=False)
        yy = run_infer_type(y.astuple())
        assert "center=" in yy.astext()
        assert yy.checked_type == relay.ty.TupleType(tvm.runtime.convert([
            relay.TensorType((3, 2, 1), dtype),
            relay.TensorType((2,), dtype),
            relay.TensorType((2,), dtype)
        ]))

        beta = relay.var("beta", relay.TensorType((3,), dtype))
        gamma = relay.var("gamma", relay.TensorType((3,), dtype))
        moving_mean = relay.var("moving_mean", relay.TensorType((3,), dtype))
        moving_var = relay.var("moving_var", relay.TensorType((3,), dtype))

        y = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var,
                                axis=0, center=False, scale=False)
        yy = run_infer_type(y.astuple())
        assert yy.checked_type == relay.ty.TupleType(tvm.runtime.convert([
            relay.ty.TensorType((3, 2, 1), dtype),
            relay.ty.TensorType((3,), dtype),
            relay.ty.TensorType((3,), dtype)
        ]))

        # axis=-1
        data = relay.var("data", relay.TensorType((1, 2, 3), dtype))
        beta = relay.var("beta", relay.TensorType((3,), dtype))
        gamma = relay.var("gamma", relay.TensorType((3,), dtype))
        moving_mean = relay.var("moving_mean", relay.TensorType((3,), dtype))
        moving_var = relay.var("moving_var", relay.TensorType((3,), dtype))
        y = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var,
                                axis=-1, center=False, scale=False)
        yy = run_infer_type(y.astuple())
        assert yy.checked_type == relay.ty.TupleType(tvm.runtime.convert([
            relay.ty.TensorType((1, 2, 3), dtype),
            relay.ty.TensorType((3,), dtype),
            relay.ty.TensorType((3,), dtype)
        ]))
Пример #16
0
def test_multibox_prior():
    def get_ref_result(dshape,
                       sizes=(1.0, ),
                       ratios=(1.0, ),
                       steps=(-1.0, -1.0),
                       offsets=(0.5, 0.5),
                       clip=True):
        in_height = dshape[2]
        in_width = dshape[3]
        num_sizes = len(sizes)
        num_ratios = len(ratios)
        size_ratio_concat = sizes + ratios
        steps_h = steps[0] if steps[0] > 0 else 1.0 / in_height
        steps_w = steps[1] if steps[1] > 0 else 1.0 / in_width
        offset_h = offsets[0]
        offset_w = offsets[1]

        oshape = (1, in_height * in_width * (num_sizes + num_ratios - 1), 4)
        dtype = "float32"
        np_out = np.zeros(oshape).astype(dtype)

        for i in range(in_height):
            center_h = (i + offset_h) * steps_h
            for j in range(in_width):
                center_w = (j + offset_w) * steps_w
                for k in range(num_sizes + num_ratios - 1):
                    w = size_ratio_concat[k] * in_height / in_width / 2.0 if k < num_sizes else \
                        size_ratio_concat[0] * in_height / in_width * math.sqrt(size_ratio_concat[k + 1]) / 2.0
                    h = size_ratio_concat[k] / 2.0 if k < num_sizes else \
                        size_ratio_concat[0] / math.sqrt(size_ratio_concat[k + 1]) / 2.0
                    count = i * in_width * (num_sizes + num_ratios - 1) + j * (
                        num_sizes + num_ratios - 1) + k
                    np_out[0][count][0] = center_w - w
                    np_out[0][count][1] = center_h - h
                    np_out[0][count][2] = center_w + w
                    np_out[0][count][3] = center_h + h
        if clip:
            np_out = np.clip(np_out, 0, 1)

        return np_out

    def verify_multibox_prior(x,
                              dshape,
                              ref_res,
                              sizes=(1.0, ),
                              ratios=(1.0, ),
                              steps=(-1.0, -1.0),
                              offsets=(0.5, 0.5),
                              clip=True,
                              check_size=False,
                              check_type_only=False):

        z = relay.vision.multibox_prior(x, sizes, ratios, steps, offsets, clip)
        zz = relay.ir_pass.infer_type(z)
        if check_size:
            assert "sizes=" in z.astext()
        assert zz.checked_type == relay.TensorType(
            (1, dshape[2] * dshape[3] * (len(sizes) + len(ratios) - 1), 4),
            "float32")

        if check_type_only:
            return

        data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
        func = relay.Function([x], z)
        func = relay.ir_pass.infer_type(func)
        for target, ctx in ctx_list():
            intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
            op_res1 = intrp1.evaluate(func)(data)
            tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
            intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
            op_res2 = intrp2.evaluate(func)(data)
            tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)

    sizes = (0.3, 1.5, 0.7)
    ratios = (1.3, 2.4)
    steps = (2.0, 1.5)
    offsets = (0.2, 0.3)
    dshape = (1, 3, 56, 56)
    ref_res = get_ref_result(dshape, sizes, ratios, steps, offsets)
    x = relay.var("x", relay.TensorType(dshape, "float32"))
    verify_multibox_prior(x,
                          dshape,
                          ref_res,
                          sizes,
                          ratios,
                          steps,
                          offsets,
                          check_size=True)
    y = relay.var("y", relay.TensorType((tvm.var("n"), 3, 56, 56), "float32"))
    verify_multibox_prior(x,
                          dshape,
                          ref_res,
                          sizes,
                          ratios,
                          steps,
                          offsets,
                          check_size=True,
                          check_type_only=True)

    dshape = (1, 24, 32, 32)
    ref_res = get_ref_result(dshape, clip=False)
    x = relay.var("x", relay.TensorType(dshape, "float32"))
    verify_multibox_prior(x, dshape, ref_res, clip=False)
    y = relay.var("y", relay.TensorType((tvm.var("n"), 24, 32, 32), "float32"))
    verify_multibox_prior(x, dshape, ref_res, clip=False, check_type_only=True)
Пример #17
0
def test_equal():
    i = relay.var('i', shape=[], dtype='int32')
    eq = op.equal(i, relay.const(0, dtype='int32'))
    # This should fail ....
    func = relay.Function([i], eq, ret_type=relay.TensorType([], 'int32'))
Пример #18
0
def test_no_match_type():
    x = relay.var('x', shape=(10, 10), dtype="int32")
    ty_pat = has_type(relay.TensorType((10, 10), "float32"))
    assert not ty_pat.match(x)
Пример #19
0
def test_single_op():
    "Program: fn (x : float32) { let t1 = f(x); t1 }"
    x = relay.var('x', shape=[])
    func = relay.Function([x], op.log(x))
    ttype = relay.TensorType([], dtype='float32')
    assert_has_type(func, relay.FuncType([ttype], ttype))
Пример #20
0
def test_TypePattern():
    ttype = relay.TensorType((10, 10), "float32")
    ty_pat = has_type(ttype)
    assert isinstance(ty_pat, TypePattern)
    assert ty_pat.type == ttype
def test_subtract():
    i = relay.var("i", shape=[], dtype="int32")
    sub = relay.subtract(i, relay.const(1, dtype="int32"))
    func = relay.Function([i], sub, ret_type=relay.TensorType([], "int32"))
    i_data = np.array(1, dtype="int32")
    check_eval(func, [i_data], 0)
Пример #22
0
def test_conv2d_infer_type():
    # symbolic in batch dimension
    n, c, h, w = tvm.var("n"), 10, 224, 224
    x = relay.var("x", relay.ty.TensorType((n, c, h, w), "float32"))
    w = relay.var("w")
    y = relay.nn.conv2d(x, w, kernel_size=(3, 3), padding=(1, 1), channels=2)
    yy = run_infer_type(y)
    assert yy.checked_type == relay.TensorType((n, 2, 224, 224), "float32")
    assert yy.args[1].checked_type == relay.TensorType((2, 10, 3, 3),
                                                       "float32")

    # infer by shape of w, mixed precision
    n, c, h, w = tvm.var("n"), 10, 224, 224
    x = relay.var("x", relay.TensorType((n, c, h, w), "int8"))
    w = relay.var("w", relay.TensorType((2, 10, 3, 3), "int8"))
    y = relay.nn.conv2d(x, w, out_dtype="int32")
    assert "out_dtype=\"int32\"" in y.astext()
    yy = run_infer_type(y)
    assert yy.checked_type == relay.TensorType((n, 2, 222, 222), "int32")

    # infer shape in case of different dtypes for input and weight.
    n, c, h, w = tvm.var("n"), 10, 224, 224
    x = relay.var("x", relay.TensorType((n, c, h, w), "uint8"))
    w = relay.var("w", relay.TensorType((2, 10, 3, 3), "int8"))
    y = relay.nn.conv2d(x, w, out_dtype="int32")
    assert "out_dtype=\"int32\"" in y.astext()
    yy = run_infer_type(y)
    assert yy.checked_type == relay.TensorType((n, 2, 222, 222), "int32")

    # Infer with a different layout
    n, c, h, w = 4, 32, 224, 224
    x = relay.var("x", relay.TensorType((n // 4, c // 4, h, w, 4, 4), "int8"))
    wt = relay.var("w")
    y = relay.nn.conv2d(x,
                        wt,
                        kernel_size=(3, 3),
                        padding=(1, 1),
                        channels=16,
                        data_layout="NCHW4n4c",
                        kernel_layout="OIHW4o4i",
                        out_dtype="int32")
    yy = run_infer_type(y)
    assert yy.checked_type == relay.TensorType((1, 4, 224, 224, 4, 4), "int32")
    assert yy.args[1].checked_type == relay.TensorType((4, 8, 3, 3, 4, 4),
                                                       "int8")

    # Infer with NHWC
    n, c, h, w = 4, 32, 224, 224
    x = relay.var("x", relay.TensorType((n, h, w, c), "int8"))
    wt = relay.var("w")
    y = relay.nn.conv2d(x,
                        wt,
                        kernel_size=(3, 3),
                        padding=(1, 1),
                        channels=16,
                        data_layout="NHWC",
                        out_dtype="int32")
    yy = run_infer_type(y)
    assert yy.checked_type == relay.TensorType((n, h, w, 16), "int32")
def test_log_softmax_grad():
    data = relay.var("data", relay.TensorType((2, 16), "float64"))
    fwd_func = relay.Function([data], relay.nn.log_softmax(data))
    check_grad(fwd_func, scale=1)
    def expected():
        # function for batch_norm
        data0 = relay.var("data0", relay.TensorType((1, 16, 224, 224),
                                                    "float32"))
        mod = tvm.IRModule()
        bn_gamma = relay.var("bn_gamma1", relay.TensorType((16, ), "float32"))
        bn_beta = relay.var("bn_beta1", relay.TensorType((16, ), "float32"))
        bn_mmean = relay.var("bn_mean1", relay.TensorType((16, ), "float32"))
        bn_mvar = relay.var("bn_var1", relay.TensorType((16, ), "float32"))

        bn = relay.nn.batch_norm(data0, bn_gamma, bn_beta, bn_mmean, bn_mvar)
        func0 = relay.Function([data0, bn_gamma, bn_beta, bn_mmean, bn_mvar],
                               bn.astuple())
        func0 = set_func_attr(func0, "test_compiler", "test_compiler_2")
        gv0 = relay.GlobalVar("test_compiler_2")
        mod[gv0] = func0

        # function for conv2d
        data1 = relay.var("data1", relay.TensorType((1, 3, 224, 224), "float32"))
        weight1 = relay.var("weight1", relay.TensorType((16, 3, 3, 3), "float32"))
        conv = relay.nn.conv2d(
            data=data1,
            weight=weight1,
            kernel_size=(3, 3),
            channels=16,
            padding=(1, 1))
        func1 = relay.Function([data1, weight1], conv)
        func1 = set_func_attr(func1, "test_compiler", "test_compiler_0")
        gv1 = relay.GlobalVar("test_compiler_0")
        mod[gv1] = func1

        # main function
        data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
        weight = relay.var("weight", relay.TensorType((16, 3, 3, 3), "float32"))
        bn_gamma0 = relay.var("bn_gamma", relay.TensorType((16, ), "float32"))
        bn_beta0 = relay.var("bn_beta", relay.TensorType((16, ), "float32"))
        bn_mmean0 = relay.var("bn_mean", relay.TensorType((16, ), "float32"))
        bn_mvar0 = relay.var("bn_var", relay.TensorType((16, ), "float32"))

        call1 = gv1(data, weight)
        call0 = gv0(call1, bn_gamma0, bn_beta0, bn_mmean0, bn_mvar0)
        mod["main"] = relay.Function([data, weight, bn_gamma0, bn_beta0, bn_mmean0,
                                      bn_mvar0], call0)
        mod = transform.InferType()(mod)
        return mod
Пример #25
0
def test_squeeze_bad_axes_infer_type():
    n, t, d = 1, 4, 1
    x = relay.var("x", relay.TensorType((n, t, d), "float32"))
    y = relay.squeeze(x, axis=(1,))
    yy = relay.ir_pass.infer_type(y)
    def expected():
        mod = tvm.IRModule()

        # function 0
        data = relay.var("test_target_0_i0", relay.TensorType((1, 3, 224, 224), "float32"))
        weight = relay.var("test_target_0_i1", relay.TensorType((16, 3, 3, 3), "float32"))
        bn_gamma = relay.var("test_target_0_i2", relay.TensorType((16, ), "float32"))
        bn_beta = relay.var("test_target_0_i3", relay.TensorType((16, ), "float32"))
        bn_mean = relay.var("test_target_0_i4", relay.TensorType((16, ), "float32"))
        bn_var = relay.var("test_target_0_i5", relay.TensorType((16, ), "float32"))

        conv_o = relay.nn.conv2d(
            data=data,
            weight=weight,
            kernel_size=(3, 3),
            channels=16,
            padding=(1, 1))

        bn_o = relay.nn.batch_norm(conv_o, bn_gamma, bn_beta, bn_mean,
                                   bn_var)

        relu_o = relay.nn.relu(bn_o[0])
        tuple_o = relay.Tuple((relu_o, bn_o[1], bn_o[2]))

        func0 = relay.Function([data, weight, bn_gamma, bn_beta,
                                bn_mean, bn_var], tuple_o)
        func0 = set_func_attr(func0, "test_target", "test_target_0")
        gv0 = relay.GlobalVar("test_target_0")
        mod[gv0] = func0

        # body
        data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
        weight = relay.var("weight", relay.TensorType((16, 3, 3, 3), "float32"))
        bn_gamma = relay.var("bn_gamma", relay.TensorType((16, ), "float32"))
        bn_beta = relay.var("bn_beta", relay.TensorType((16, ), "float32"))
        bn_mean = relay.var("bn_mean", relay.TensorType((16, ), "float32"))
        bn_var = relay.var("bn_var", relay.TensorType((16, ), "float32"))

        f0_o = gv0(data, weight, bn_gamma, bn_beta, bn_mean, bn_var)
        f0_relu_o = relay.TupleGetItem(f0_o, 0)
        f0_mean_o = relay.TupleGetItem(f0_o, 1)
        f0_var_o = relay.TupleGetItem(f0_o, 2)

        f0_mean_abs = relay.abs(f0_mean_o)
        f0_var_abs = relay.abs(f0_var_o)
        main_tuple = relay.Tuple((f0_relu_o, f0_mean_abs, f0_var_abs))

        func = relay.Function([data, weight, bn_gamma,
                               bn_beta, bn_mean, bn_var], main_tuple)
        mod["main"] = func
        return mod
Пример #27
0
def test_concat():
    t = relay.TensorType([10], "float32")
    x = Var("x", t)
    y = Var("x", t)
    orig = run_infer_type(Function([x, y], op.concatenate([x, y], axis=0)))
    tvm.ir.assert_structural_equal(dcpe(orig), orig)
 def get_net(include_bn=True, include_sigmoid=False):
     data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
     block1 = get_blocks("block1_", data, 3, 8, include_bn, include_sigmoid)
     # The second block is always conv + relu, to make it more interesting
     block2 = get_blocks("block2_", block1, 8, 8, False, include_sigmoid)
     return relay.Function(relay.analysis.free_vars(block2), block2)
Пример #29
0
def verify_reduction_grad(red_fn, d_shape, axis=None, keepdims=False, exclude=False):
    data = relay.var("data", relay.TensorType(d_shape, "float32"))
    fwd_func = relay.Function([data], red_fn(data, axis=axis, keepdims=keepdims, exclude=exclude))
    check_grad(fwd_func)
    def verify_sparse_fill_empty_rows(
        sparse_indices_np: np.ndarray,
        sparse_values_np: np.ndarray,
        dense_shape_np: np.ndarray,
        default_value_np: np.ndarray,
    ) -> None:
        """
        This function verifies the relay output of sparse_fill_empty_rows with its expected output.
        """
        if use_dyn:
            sparse_indices = relay.var(
                "sparse_indices",
                shape=[relay.Any(), relay.Any()],
                dtype=str(sparse_indices_np.dtype),
            )
            sparse_values = relay.var(
                "sparse_values",
                shape=[relay.Any()],
                dtype=str(sparse_values_np.dtype),
            )
            dense_shape = relay.var(
                "dense_shape",
                shape=[relay.Any()],
                dtype=str(dense_shape_np.dtype),
            )
            default_value = relay.var(
                "default_value",
                shape=[relay.Any()],
                dtype=str(default_value_np.dtype),
            )
        else:
            sparse_indices = relay.var(
                "sparse_indices",
                relay.TensorType(sparse_indices_np.shape,
                                 str(sparse_indices_np.dtype)),
            )
            sparse_values = relay.var(
                "sparse_values",
                relay.TensorType(sparse_values_np.shape,
                                 str(sparse_values_np.dtype)),
            )
            dense_shape = relay.var(
                "dense_shape",
                relay.TensorType(dense_shape_np.shape,
                                 str(dense_shape_np.dtype)),
            )
            default_value = relay.var(
                "default_value",
                relay.TensorType(default_value_np.shape,
                                 str(default_value_np.dtype)),
            )
        z = relay.sparse_fill_empty_rows(sparse_indices, sparse_values,
                                         dense_shape, default_value)
        func = relay.Function(
            [sparse_indices, sparse_values, dense_shape, default_value], z)
        ref_res = ref_sparse_fill_empty_rows(
            sparse_indices_np,
            sparse_values_np,
            dense_shape_np,
            default_value_np,
        )
        (
            new_sparse_indices_infer_type,
            new_sparse_values_infer_type,
            empty_row_indicator_infer_type,
        ) = run_infer_type(z)

        assert new_sparse_indices_infer_type.checked_type.dtype == sparse_indices_np.dtype
        assert new_sparse_values_infer_type.checked_type.dtype == sparse_indices_np.dtype
        assert empty_row_indicator_infer_type.checked_type.dtype == "bool"

        verify_func(
            func,
            [
                sparse_indices_np, sparse_values_np, dense_shape_np,
                default_value_np
            ],
            ref_res,
            [("llvm", tvm.cpu())],
        )