def get_synthetic_lib(): x = relay.var('x', shape=(10, 10)) w0 = relay.var('w0', shape=(10, 10)) w1 = relay.var('w1', shape=(10, 10)) w2 = relay.var('w2', shape=(10, 10)) w3 = relay.var('w3', shape=(10, 10)) w4 = relay.var('w4', shape=(10, 10)) w5 = relay.var('w5', shape=(10, 10)) w6 = relay.var('w6', shape=(10, 10)) w7 = relay.var('w7', shape=(10, 10)) # subgraph0 gcc_input0 = relay.var('gcc_input0', shape=(10, 10)) gcc_input1 = relay.var('gcc_input1', shape=(10, 10)) gcc_input2 = relay.var('gcc_input2', shape=(10, 10)) gcc_input3 = relay.var('gcc_input3', shape=(10, 10)) subgraph0 = relay.Function( [gcc_input0, gcc_input1, gcc_input2, gcc_input3], relay.copy(gcc_input0)) subgraph0 = subgraph0.set_attribute("Primitive", tvm.tir.IntImm("int32", 1)) # Call subgraph0 subgraph0_ret = relay.Call(subgraph0, [x, w0, w1, w2]) # subgraph1 gcc_input4 = relay.var('gcc_input4', shape=(10, 10)) gcc_input5 = relay.var('gcc_input5', shape=(10, 10)) gcc_input6 = relay.var('gcc_input6', shape=(10, 10)) gcc_input7 = relay.var('gcc_input7', shape=(10, 10)) subgraph1 = relay.Function( [gcc_input4, gcc_input5, gcc_input6, gcc_input7], relay.copy(gcc_input4)) subgraph1 = subgraph1.set_attribute("Primitive", tvm.tir.IntImm("int32", 1)) # Call subgraph1 subgraph1_ret = relay.Call(subgraph1, [x, w3, w4, w5]) # Other ops that will be executed on TVM. add2 = relay.add(x, w6) sub2 = relay.subtract(add2, w7) ret = relay.concatenate((subgraph0_ret, subgraph1_ret, sub2), 0) func = relay.Function([x, w0, w1, w2, w3, w4, w5, w6, w7], ret) mod = tvm.IRModule.from_expr(func) _, lib, _ = relay.build(mod, "llvm") return lib
def test_copy_infer_type(): ib = relay.ir_builder.IRBuilder() n, t, d = tvm.var("n"), tvm.var("t"), 100 x = ib.param("x", relay.ty.TensorType((n, t, d), "float32")) with ib.function(x) as func: ib.ret(relay.copy(x)) ib.ret(func) func = relay.ir_pass.infer_type(ib.env, func.to_func()) ftype = func.checked_type assert ftype.ret_type == relay.ty.TensorType((n, t, 100), "float32")
def test_vm_reshape_and_copy(target, dev): """Make sure the compiler notices the reshape result shape is a literal and can use the immediate-mode alloc_tensor instruction instead of alloc_tensor_reg.""" x_np = np.random.uniform(size=(1, 1)).astype("float32") x = relay.var("x", shape=(1, 1), dtype="float32") mod = tvm.IRModule.from_expr(relay.Function([x], relay.copy(relay.reshape(x, [0, 1])))) with tvm.transform.PassContext(opt_level=3): exec = relay.vm.compile(mod, "llvm") assert "alloc_tensor" in exec.bytecode assert not "alloc_tensor_reg" in exec.bytecode check_result(target, dev, [x_np], x_np.reshape([1, 1]), mod)
def expected(dshape): x = relay.var("x", shape=dshape) pooled = relay.nn.max_pool2d(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0)) f0 = relay.Function([x], pooled) p0 = relay.var("p0", shape=(dshape[0], dshape[1], dshape[2]//2, dshape[3]//2)) p1 = relay.var("p1", shape=(dshape[0], dshape[1], dshape[2], dshape[3])) p1_copy = relay.copy(p1) upsampled = relay.nn.upsampling(p0, scale=2, layout="NCHW") out = relay.Tuple((upsampled, p1_copy)) f1 = relay.Function([p0, p1], out) x = relay.var("x", shape=dshape) y = relay.Call(f0, [x]) z = relay.Call(f1, [y, x]) return relay.Function([x], z)
def test_copy_grad(): data = relay.var("data", relay.TensorType((10, 4), "float64")) fwd_func = relay.Function([data], relay.copy(data)) check_grad(fwd_func)
def verify_copy(dshape, dtype="float32"): x = relay.var("x", relay.ty.TensorType(dshape, dtype)) y = relay.copy(x) func = relay.Function([x], y) x_data = np.random.uniform(size=dshape).astype(dtype) verify_results(func, [x_data], "test_copy", rtol=1e-4, atol=1e-4)