def test_explicit_bound(): x = relay.const(1) y = op.add(x, x) z = op.add(y, y) f = relay.Function([], op.add(z, z)) assert not "let" in f.astext() # assert the values are implicitly bounded anf = to_a_normal_form(f) assert "let" in anf.astext() # assert the values are explicitly bounded check_eval(f(), 8.0) check_eval(anf(), 8.0)
def test_explicit_bound(): x = relay.const(1) y = op.add(x, x) z = op.add(y, y) f = relay.Function([], op.add(z, z)) assert not Feature.fLet in detect_feature(f) anf = run_opt_pass(f, transform.ToANormalForm()) assert Feature.fLet in detect_feature(anf) check_eval(f(), 8.0) check_eval(anf(), 8.0)
def test_explicit_bound(): x = relay.const(1) y = op.add(x, x) z = op.add(y, y) f = relay.Function([], op.add(z, z)) assert not Feature.fLet in detect_feature(f) anf = to_a_normal_form(f) assert Feature.fLet in detect_feature(anf) check_eval(f(), 8.0) check_eval(anf(), 8.0)
def test_implicit_share(): x = relay.Var('x') y = relay.Var('y') z = relay.Var('z') body = relay.Let(z, op.add(y, y), op.add(z, z)) body = relay.Let(y, op.add(x, x), body) f = relay.Function([], relay.Let(x, relay.const(1), body)) g = to_graph_normal_form(f) assert "let" in f.astext() assert not "let" in g.astext() check_eval(f, [], 8.0) check_eval(g, [], 8.0)
def test_no_explicit_bind(): x = relay.const(1) y = op.add(x, x) z = op.add(y, y) f = relay.Function([], op.add(z, z)) '\n fn () {\n %0 = add(1, 1);\n %1 = add(%0, %0);\n add(%1, %1)\n }\n ' assert (not (Feature.fLet in detect_feature(f))) bblock = run_opt_pass(f, transform.ToBasicBlockNormalForm()) assert (Feature.fLet not in detect_feature(bblock)) check_eval(f(), 8.0) check_eval(bblock(), 8.0) check_basic_block_normal_form(bblock)
def test_implicit_share(): x = relay.Var('x') y = relay.Var('y') z = relay.Var('z') body = relay.Let(z, op.add(y, y), op.add(z, z)) body = relay.Let(y, op.add(x, x), body) f = relay.Function([], relay.Let(x, relay.const(1), body)) g = run_opt_pass(f, transform.ToGraphNormalForm()) assert Feature.fLet in detect_feature(f) assert not Feature.fLet in detect_feature(g) check_eval(f, [], 8.0) check_eval(g, [], 8.0)
def test_round_trip(): x = relay.Var('x') y = relay.Var('y') z = relay.Var('z') body = relay.Let(z, op.add(y, y), op.add(z, z)) body = relay.Let(y, op.add(x, x), body) f = relay.Function([], relay.Let(x, relay.const(1), body)) g = transform.OptimizeOnExpr(f, transform.ToGraphNormalForm()) h = transform.OptimizeOnExpr(g, transform.ToANormalForm()) assert Feature.fLet in detect_feature(f) assert not Feature.fLet in detect_feature(g) check_eval(f, [], 8.0) check_eval(g, [], 8.0) check_eval(h, [], 8.0)
def test_round_trip(): x = relay.Var("x") y = relay.Var("y") z = relay.Var("z") body = relay.Let(z, op.add(y, y), op.add(z, z)) body = relay.Let(y, op.add(x, x), body) f = relay.Function([], relay.Let(x, relay.const(1), body)) g = run_opt_pass(f, transform.ToGraphNormalForm()) h = run_opt_pass(g, transform.ToANormalForm()) assert Feature.fLet in detect_feature(f) assert not Feature.fLet in detect_feature(g) check_eval(f, [], 8.0) check_eval(g, [], 8.0) check_eval(h, [], 8.0)
def test_round_trip(): x = relay.Var('x') y = relay.Var('y') z = relay.Var('z') body = relay.Let(z, op.add(y, y), op.add(z, z)) body = relay.Let(y, op.add(x, x), body) f = relay.Function([], relay.Let(x, relay.const(1), body)) g = to_graph_normal_form(f) h = to_a_normal_form(g) assert Feature.fLet in detect_feature(f) assert not Feature.fLet in detect_feature(g) check_eval(f, [], 8.0) check_eval(g, [], 8.0) check_eval(h, [], 8.0)
def test_large_grpah(): # Test large graphs to avoid stack overflow in serialize/deserialize size = int(1e5) var = [relay.var("var_" + str(i), shape=(2, 3)) for i in range(size)] body = var[-1] for i in range(size, 1, -1): body = relay.Let(var[i - 1], op.add(var[i - 2], var[i - 2]), body) func = relay.Function([var[0]], body) check_json_roundtrip(func)
def _add(children, attrs, odtype='float32'): if len(children) == 1: left = children[0] scalar = attrs.get_float('scalar') right = relay.const(scalar, dtype=odtype) else: assert len(children) == 2 left = children[0] right = children[1] return op.add(left, right)
def test_add_op_broadcast(): """ Program: fn (x, y) { return x + y; } """ x = relay.var('x', shape=(10, 5)) y = relay.var('y', shape=(1, 5)) func = relay.Function([x, y], add(x, y)) x_data = np.random.rand(10, 5).astype('float32') y_data = np.random.rand(1, 5).astype('float32') check_rts(func, [x_data, y_data], x_data + y_data)
def test_add_op_scalar(): """ Program: fn (x, y) { return x + y; } """ x = relay.var('x', shape=()) y = relay.var('y', shape=()) func = relay.Function([x, y], add(x, y)) x_data = np.array(10.0, dtype='float32') y_data = np.array(1.0, dtype='float32') check_rts(func, [x_data, y_data], x_data + y_data)
def test_add_op_broadcast(): """ Program: fn (x, y) { return x + y; } """ x = relay.var("x", shape=(10, 5)) y = relay.var("y", shape=(1, 5)) func = relay.Function([x, y], add(x, y)) x_data = np.random.rand(10, 5).astype("float32") y_data = np.random.rand(1, 5).astype("float32") check_rts(func, [x_data, y_data], x_data + y_data)
def test_add_op_tensor(): """ Program: fn (x, y) { return x + y; } """ x = relay.var('x', shape=(10, 5)) y = relay.var('y', shape=(10, 5)) func = relay.Function([x, y], add(x, y)) x_data = np.random.rand(10, 5).astype('float32') y_data = np.random.rand(10, 5).astype('float32') check_rts(func, [x_data, y_data], x_data + y_data)
def aten_add(inputs, attributes, scope): lfs, rfs, alpha = inputs assert alpha == 1 ctx = current_context() net = ctx.network if ctx.is_tensorrt and has_trt_tensor(inputs): output = _scale_or_elementwise(net, lfs, rfs, "add", scope) output.name = scope return [output] elif ctx.is_tvm and has_tvm_tensor(inputs): lfs, rfs = _tvm_to_const([lfs, rfs]) return [_op.add(lfs, rfs)] return [lfs + rfs]
def test_add_op_scalar(): """ Program: fn (x, y) { return x + y; } """ x = relay.var("x", shape=()) y = relay.var("y", shape=()) func = relay.Function([x, y], add(x, y)) x_data = np.array(10.0, dtype="float32") y_data = np.array(1.0, dtype="float32") check_rts(func, [x_data, y_data], x_data + y_data)
def test_add_op_broadcast(): """ Program: fn (x, y) { return x + y; } """ env = Environment() x = relay.var('x', shape=(10, 5)) y = relay.var('y', shape=(1, 5)) func = relay.Function([x, y], add(x, y)) x_data = np.random.rand(10, 5).astype('float32') y_data = np.random.rand(1, 5).astype('float32') check_rts(env, func, [x_data, y_data], x_data + y_data)
def test_add_op_scalar(): """ Program: fn (x, y) { return x + y; } """ env = Environment() x = relay.var('x', shape=()) y = relay.var('y', shape=()) func = relay.Function([x, y], add(x, y)) x_data = np.array(10.0, dtype='float32') y_data = np.array(1.0, dtype='float32') check_rts(env, func, [x_data, y_data], x_data + y_data)
def test_simple_loop(): env = relay.env.Environment({}) sum_up = relay.GlobalVar('sum_up') i = relay.var('i', shape=[], dtype='int32') sb = ScopeBuilder() with sb.if_scope(op.equal(i, relay.const(0, dtype='int32'))): sb.ret(i) with sb.else_scope(): one_less = op.subtract(i, relay.const(1, dtype='int32')) rec_call = relay.Call(sum_up, [one_less]) sb.ret(op.add(rec_call, i)) func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], 'int32')) env[sum_up] = func i_data = np.array(10, dtype='int32') check_eval(sum_up, [i_data], sum(range(1, 11)), env=env)
def test_with_params(): x = relay.var('x', shape=(10, 5)) y = relay.var('y', shape=(1, 5)) func = relay.Function([x, y], add(x, y)) x_data = np.random.rand(10, 5).astype('float32') y_data = np.random.rand(1, 5).astype('float32') params = {"y": y_data} graph, lib, params = relay.build(func, "llvm", params=params) mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0)) mod.set_input(**params) mod.set_input(x=x_data) mod.run() res = mod.get_output(0).asnumpy() ref_res = y_data + x_data tvm.testing.assert_allclose(res, ref_res)
def test_dual_op(): """Program: fn (x : Tensor[f32, (10, 10)]) { let t1 = log(x); let t2 = add(t1, x); return t1; } """ b = IRBuilder() with b.function(('x', tensor_type(10, 10))) as func: x, = func.param_ids() t1 = b.let('t1', log(x)) t2 = b.let('t2', add(t1, x)) b.ret(t2) assert_has_type(func.to_func(), func_type(['float32'], 'float32'))
def test_add_op_scalar_int(): """ test_add_op_scalar_int: fn (x, y) { return x + y; } """ x = relay.var("x", shape=(), dtype="int32") y = relay.var("y", shape=(), dtype="int32") func = relay.Function([x, y], add(x, y)) x_y_data = [ (np.array(10.0, dtype="int32"), np.array(1.0, dtype="int32")), (np.int32(10), np.int32(1)), (10, 1), ] for (x_data, y_data) in x_y_data: check_rts(func, [x_data, y_data], x_data + y_data)
def test_add_op_scalar(): """ test_add_op_scalar: fn (x, y) { return x + y; } """ x = relay.var("x", shape=()) # Default to float32 y = relay.var("y", shape=()) # Default to float32 func = relay.Function([x, y], add(x, y)) x_y_data = [ (np.array(10.0, dtype="float32"), np.array(1.0, dtype="float32")), (np.float32(10.0), np.float32(1.0)), (10.0, 1.0), ] for (x_data, y_data) in x_y_data: check_rts(func, [x_data, y_data], x_data + y_data)
def test_loop(): env = relay.env.Environment({}) sum_up = relay.GlobalVar('sum_up') i = relay.var('i', shape=[], dtype='int32') accum = relay.var('accum', shape=[], dtype='int32') sb = ScopeBuilder() with sb.if_scope(op.equal(i, relay.const(0))): sb.ret(accum) with sb.else_scope(): one_less = op.subtract(i, relay.const(1)) new_accum = op.add(accum, i) sb.ret(relay.Call(sum_up, [one_less, new_accum])) func = relay.Function([i, accum], sb.get()) env[sum_up] = func i_data = np.array(10, dtype='int32') accum_data = np.array(0, dtype='int32') check_eval(sum_up, [i_data, accum_data], sum(range(1, 11)), env=env)
def test_add_broadcast_op(): """ Program: fn (x: Tensor[(10, 4), f32], y: Tensor[(5, 10, 1), f32]) -> Tensor[(5, 10, 4), f32] { return x + y; } """ b = IRBuilder() x = b.param('x', tensor_type(10, 4)) y = b.param('y', tensor_type(5, 10, 1)) with b.function(x, y) as func: b.ret(add(x.var, y.var)) b.ret(func) prog, env = b.get() ttype = tensor_type(5, 5, 5) expected_ty = func_type([ttype, ttype], ttype) assert_has_type(func.to_func(), expected_ty)
def test_pass_profiler(): x, y, z = [tvm.relay.var(c, shape=(3, 4), dtype="float32") for c in "xyz"] e1 = op.add(x, y) e2 = op.subtract(x, z) e3 = op.multiply(e1, e1 / e2) mod = tvm.IRModule.from_expr(e3 + e2) tvm.transform.enable_pass_profiling() mod = tvm.relay.transform.AnnotateSpans()(mod) mod = tvm.relay.transform.ToANormalForm()(mod) mod = tvm.relay.transform.InferType()(mod) profiles = tvm.transform.render_pass_profiles() assert "AnnotateSpans" in profiles assert "ToANormalForm" in profiles assert "InferType" in profiles tvm.transform.clear_pass_profiles() tvm.transform.disable_pass_profiling()
def get_test_model(): x, y, z = [tvm.relay.var(c, shape=(3, 4), dtype="float32") for c in "xyz"] e1 = op.add(x, y) e2 = op.subtract(x, z) e3 = op.multiply(e1, e1 / e2) return tvm.IRModule.from_expr(e3 + e2)
def test_op_let(): assert alpha_equal(dead_code_elimination(add(relay.Let(e.a, e.one, e.three), e.two)), add(e.three, e.two))
def test_op_let(): dced = run_opt_pass(add(relay.Let(e.a, e.one, e.three), e.two), transform.DeadCodeElimination()) assert tvm.ir.structural_equal(dced, add(e.three, e.two))
def linear(self, input_size, output_size, x, name=""): weight = self.add_param(f'{name}linear_weight', shape=(output_size, input_size)) bias = self.add_param(f'{name}linear_bias', shape=(output_size,)) return op.add(op.nn.dense(x, weight), bias)
def build_impl(self, input_size, output_size, dtype="float32"): x = self.input(var("linear_input", shape=(1, input_size), dtype=dtype)) w = self.weight( var("linear_weight", shape=(output_size, input_size), dtype=dtype)) b = self.weight(var("linear_bias", shape=(output_size, ), dtype=dtype)) return op.add(op.nn.dense(x, w), b)