def test_compose(): mod = Module() p = Prelude(mod) x = relay.Var('x') inc = GlobalVar('inc') mod[inc] = Function([x], p.s(x)) x = relay.Var('x') func = GlobalVar('func') f = Function([x], relay.Call(p.compose(inc, p.double), [x])) mod[func] = f cfunc = compile(func, mod) assert nat_to_int(cfunc(p.s(p.s(p.z())))) == 5
def get_expected(): def set_func_attr(func, compile_name, symbol_name): func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1)) func = func.with_attr("Inline", tvm.tir.IntImm("int32", 1)) func = func.with_attr("Compiler", compile_name) func = func.with_attr("global_symbol", symbol_name) return func # Create a nested TRT function that matches the expected output mod = tvm.IRModule() var1 = relay.var("tensorrt_0_i0", shape=(data_shape), dtype="float32") kernel_trt = relay.var("tensorrt_0_i1", shape=(k_shape), dtype="float32") out1 = relay.nn.conv2d(var1, kernel_trt, channels=k_shape[0], kernel_size=k_shape[2:4]) f1 = GlobalVar("tensorrt_0") func = relay.Function([var1, kernel_trt], out1) func = set_func_attr(func, "tensorrt", "tensorrt_0") mod[f1] = func mod = relay.transform.InferType()(mod) # Create the main function out1 = relay.nn.conv2d(x, kernel, channels=k_shape[0], kernel_size=k_shape[2:4]) out = relay.add(out1, f1(y, kernel)) f = relay.Function([x, y, kernel], out) mod["main"] = f mod = relay.transform.InferType()(mod) return mod
def test_empty_subgraph(): if skip_codegen_test(): return x_shape = (1, 3, 5) mod = tvm.IRModule() # Empty tensorrt subgraph. var1 = relay.var("tensorrt_0_i0", shape=(x_shape), dtype="float32") f1 = GlobalVar("tensorrt_0") func = relay.Function([var1], var1) func = set_func_attr(func, "tensorrt", "tensorrt_0") mod[f1] = func mod = relay.transform.InferType()(mod) # Create the main function x = relay.var("x", shape=x_shape, dtype="float32") out = f1(relay.nn.relu(x)) f = relay.Function([x], out) mod["main"] = f x_data = np.random.uniform(-1, 1, x_shape).astype("float32") for mode in ["graph", "vm"]: with tvm.transform.PassContext(opt_level=3): exec = relay.create_executor(mode, mod=mod, ctx=tvm.gpu(0), target="cuda") if not skip_runtime_test(): results = exec.evaluate()(x_data)
def test_loop(): mod = Module() t = TypeVar("t") x = Var("x", t) loop = GlobalVar("loop") mod[loop] = Function([x], loop(x), t, [t]) res = dcpe(loop(const(1)), mod=mod) expected = Call(loop, [const(1)], None, [None]) assert alpha_equal(res, expected)
def test_double(): mod = Module() x = var('x', shape=()) double = GlobalVar('double') mod[double] = Function([x], x + x) x = var('x', shape=()) cfunc = compile(Function([x], double(double(x))), mod) a = tvm.nd.array(np.array(1.5, dtype='float32')) output = cfunc(a) np.testing.assert_allclose(output.asnumpy(), np.array(6.0, dtype='float32'))
def test_loop(): mod = Module() t = TypeVar("t") x = Var("x", t) loop = GlobalVar("loop") mod[loop] = Function([x], loop(x), t, [t]) expected = Call(loop, [const(1)]) mod[mod.entry_func] = Function([], expected) expected = mod[mod.entry_func].body call = Function([], loop(const(1))) res = dcpe(call, mod=mod) assert alpha_equal(res.body, expected)
def test_global_function(): m = tvm.IRModule() shape = (10, 10) dtype = "float32" t = relay.TensorType(shape, dtype) x = relay.Var("x", t) d = GlobalVar("double") m[d] = relay.Function([x], x + x) y = relay.Var("y", t) q = GlobalVar("q") m[q] = relay.Function([y], d(d(y))) g = GlobalVar("grad") m[g] = tvm.relay.transform.gradient(q, m) back_func = m[g] assert back_func.checked_type == relay.FuncType( [t], relay.TupleType([t, relay.TupleType([t])])) ex = create_executor(mod=m) x = rand(dtype, *shape) forward, (grad, ) = ex.evaluate(back_func)(x) tvm.testing.assert_allclose(forward.asnumpy(), 4 * x.asnumpy()) tvm.testing.assert_allclose(grad.asnumpy(), 4 * np.ones_like(x.asnumpy()))
def test_nat_id(): mod = Module() p = Prelude(mod) add_nat_definitions(p) nat = p.nat() x = Var("x", nat) y = Var("y", nat) nat_id = GlobalVar("nat_id") mod[nat_id] = Function([x], x) orig = nat_id(make_nat_expr(p, 3)) res = dcpe(orig, mod=mod) assert alpha_equal(res, make_nat_expr(p, 3))
def test_loop(): mod = tvm.IRModule() t = TypeVar("t") x = Var("x", t) loop = GlobalVar("loop") mod[loop] = Function([x], loop(x), t, [t]) expected = Call(loop, [const(1)]) mod["main"] = Function([], expected) expected = mod["main"].body call = Function([], loop(const(1))) res = dcpe(call, mod=mod) assert tvm.ir.structural_equal(res.body, expected)
def test_swap_loop(): mod = Module() p = Prelude(mod) add_nat_definitions(p) nat = p.nat() x = Var("x", nat) y = Var("y", nat) loop = GlobalVar("loop") mod[loop] = Function([x, y], loop(y, x), nat) prog = loop(make_nat_expr(p, 1), make_nat_expr(p, 2)) res = dcpe(prog, mod=mod) assert alpha_equal(prog, res)
def test_recur_sum_global(): mod = Module() x = var('x', dtype='int32', shape=()) sum = GlobalVar('sum') c = relay.const(0) mod[sum] = Function([x], relay.If(op.less(x, c), c, x + sum(x - relay.const(1))), relay.TensorType(dtype='int32', shape=())) cfunc = compile(Function([], sum(relay.const(10))), mod) output = cfunc() np.testing.assert_allclose(output.asnumpy(), np.array(55, dtype='int32'))
def test_nat_id(): mod = tvm.IRModule() p = Prelude(mod) p.mod.import_from_std("nat.rly") nat, _, _ = p.mod.get_type("nat") x = Var("x", nat()) y = Var("y", nat()) nat_id = GlobalVar("nat_id") mod[nat_id] = Function([x], x) orig = nat_id(make_nat_expr(p, 3)) orig = Function([], orig) res = dcpe(orig, mod=mod) assert tvm.ir.structural_equal(res.body, make_nat_expr(p, 3))
def test_swap_loop(): mod = tvm.IRModule() p = Prelude(mod) add_nat_definitions(p) nat = p.nat() x = Var("x", nat) y = Var("y", nat) loop = GlobalVar("loop") mod[loop] = Function([x, y], loop(y, x), nat) prog = loop(make_nat_expr(p, 1), make_nat_expr(p, 2)) res = Function([], prog) res = dcpe(res, mod=mod) assert tvm.ir.structural_equal(prog, res.body)
def test_swap_loop(): mod = tvm.IRModule() p = Prelude(mod) p.mod.import_from_std("nat.rly") nat, _, _ = p.mod.get_type("nat") x = Var("x", nat()) y = Var("y", nat()) loop = GlobalVar("loop") mod[loop] = Function([x, y], loop(y, x), nat()) prog = loop(make_nat_expr(p, 1), make_nat_expr(p, 2)) res = Function([], prog) res = dcpe(res, mod=mod) assert tvm.ir.structural_equal(prog, res.body)
def test_nat_id(): mod = tvm.IRModule() p = Prelude(mod) add_nat_definitions(p) nat = p.nat() x = Var("x", nat) y = Var("y", nat) nat_id = GlobalVar("nat_id") mod[nat_id] = Function([x], x) orig = nat_id(make_nat_expr(p, 3)) orig = Function([], orig) res = dcpe(orig, mod=mod) assert tvm.ir.structural_equal(res.body, make_nat_expr(p, 3))
def test_match_nat_id(): mod = Module() p = Prelude(mod) add_nat_definitions(p) nat = p.nat() x = Var("x", nat) y = Var("y", nat) nat_id = GlobalVar("nat_id") z_case = Clause(PatternConstructor(p.z, []), p.z()) s_case = Clause(PatternConstructor(p.s, [PatternVar(y)]), p.s(y)) mod[nat_id] = Function([x], Match(x, [z_case, s_case])) orig = nat_id(make_nat_expr(p, 3)) res = dcpe(orig, mod=mod) assert alpha_equal(res, make_nat_expr(p, 3))
def test_match_nat_id(): mod = tvm.IRModule() p = Prelude(mod) p.mod.import_from_std("nat.rly") nat, z, s = p.mod.get_type("nat") x = Var("x", nat()) y = Var("y", nat()) nat_id = GlobalVar("nat_id") z_case = Clause(PatternConstructor(z, []), z()) s_case = Clause(PatternConstructor(s, [PatternVar(y)]), s(y)) mod[nat_id] = Function([x], Match(x, [z_case, s_case])) orig = nat_id(make_nat_expr(p, 3)) orig = Function([], orig) res = dcpe(orig, mod=mod) assert tvm.ir.structural_equal(res.body, make_nat_expr(p, 3))
def test_map(): mod = Module() p = Prelude(mod) f = GlobalVar("f") t = TypeVar("t") a = Var("a", t) mod[f] = Function([a], a, t, [t]) orig = p.map(f, p.cons(const(1), p.cons(const(2), p.cons(const(3), p.nil())))) expected = p.cons((const(1)), p.cons((const(2)), p.cons((const(3)), p.nil()))) expected = Function([], expected) mod["main"] = expected expected = mod["main"] orig = Function([], orig) res = dcpe(orig, mod=mod) assert alpha_equal(res.body, expected.body)
def double_example(): # Declare a Relay module. mod = Module() # Implement the double function. x = var('x', shape=()) double = GlobalVar('double') mod[double] = Function([x], x + x) # Generate a function which calls double twice. x = var('x', shape=()) f = Function([x], double(double(x))) # Compile the function. cfunc = compile(f, mod) a = tvm.nd.array(np.array(1.5, dtype='float32')) print(cfunc(a).asnumpy())
def test_map(): mod = tvm.IRModule() p = Prelude(mod) rlist, cons, nil = p.mod.get_type("List") rmap = p.mod.get_global_var("map") f = GlobalVar("f") t = TypeVar("t") a = Var("a", t) mod[f] = Function([a], a, t, [t]) orig = rmap(f, cons(const(1), cons(const(2), cons(const(3), nil())))) expected = cons((const(1)), cons((const(2)), cons((const(3)), nil()))) expected = Function([], expected) mod["main"] = expected mod = transform.InferType()(mod) expected = mod["main"] orig = Function([], orig) res = dcpe(orig, mod=mod) assert tvm.ir.structural_equal(res.body, expected.body)
def get_expected(): # Create a nested TRT function that matches the expected output mod = tvm.IRModule() var1 = relay.var("tensorrt_0_i0", shape=(data_shape), dtype="float32") kernel_trt = relay.var("tensorrt_0_i1", shape=(k_shape), dtype="float32") out1 = relay.nn.conv2d(var1, kernel_trt, channels=k_shape[0], kernel_size=k_shape[2:4]) f1 = GlobalVar("tensorrt_0") func = relay.Function([var1, kernel_trt], out1) func = set_func_attr(func, "tensorrt", "tensorrt_0") mod[f1] = func mod = relay.transform.InferType()(mod) # Create the main function out1 = relay.nn.conv2d(x, kernel, channels=k_shape[0], kernel_size=k_shape[2:4]) out = relay.add(out1, f1(y, kernel)) f = relay.Function([x, y, kernel], out) mod["main"] = f mod = relay.transform.InferType()(mod) return mod
def test_abs_diff(): # TODO(@M.K.): refactor using tuple pattern (not yet implemented) mod = Module() p = Prelude(mod) add_nat_definitions(p) nat = p.nat() x = Var("x", nat) y = Var("y", nat) xp = Var("x'", nat) yp = Var("y'", nat) diff = GlobalVar("diff") y_z_case = Clause(PatternConstructor(p.z, []), x) y_s_case = Clause(PatternConstructor(p.s, [PatternVar(yp)]), diff(yp, xp)) x_z_case = Clause(PatternConstructor(p.z, []), y) x_s_case = Clause(PatternConstructor(p.s, [PatternVar(xp)]), Match(y, [y_z_case, y_s_case])) mod[diff] = Function([x, y], Match(x, [x_z_case, x_s_case])) orig = diff(make_nat_expr(p, 7), make_nat_expr(p, 3)) orig = Function([], orig) res = dcpe(orig, mod=mod) assert alpha_equal(res.body, make_nat_expr(p, 4))
def test_abs_diff(): # TODO(@M.K.): refactor using tuple pattern (not yet implemented) mod = tvm.IRModule() p = Prelude(mod) p.mod.import_from_std("nat.rly") nat, z, s = p.mod.get_type("nat") x = Var("x", nat()) y = Var("y", nat()) xp = Var("x'", nat()) yp = Var("y'", nat()) diff = GlobalVar("diff") y_z_case = Clause(PatternConstructor(z, []), x) y_s_case = Clause(PatternConstructor(s, [PatternVar(yp)]), diff(yp, xp)) x_z_case = Clause(PatternConstructor(z, []), y) x_s_case = Clause(PatternConstructor(s, [PatternVar(xp)]), Match(y, [y_z_case, y_s_case])) mod[diff] = Function([x, y], Match(x, [x_z_case, x_s_case])) orig = diff(make_nat_expr(p, 7), make_nat_expr(p, 3)) orig = Function([], orig) res = dcpe(orig, mod=mod) assert tvm.ir.structural_equal(res.body, make_nat_expr(p, 4))
def test_flatten_tir(): orig_mod = tvm.IRModule({GlobalVar("main"): tir_func}) mod = tvm.tir.transform.StorageFlatten(64)(orig_mod) tvm.ir.assert_structural_equal( orig_mod, mod) # StorageFlatten should do nothing to TIR functions
def make(name): return GlobalVar(name + str(CHECK_GRAD_COUNTER))