def validate(shape, value, dtype): def before_left(x, elem_op, full): return elem_op(full, x) def after_left(x, elem_op, value): return elem_op(relay.const(value, dtype), x) def before_right(x, elem_op, full): return elem_op(x, full) def after_right(x, elem_op, value): return elem_op(x, relay.const(value, dtype)) x = relay.var("x", shape=shape, dtype=dtype) elem_ops = [relay.add, relay.multiply, relay.subtract, relay.divide] full_ops = [] if value == 0: full_ops.append(relay.zeros(shape, dtype)) full_ops.append(relay.zeros_like(x)) if value == 1: full_ops.append(relay.ones(shape, dtype)) full_ops.append(relay.ones_like(x)) else: full_ops.append(relay.full(relay.const(value, dtype), shape)) full_ops.append(relay.full_like(x, relay.const(value, dtype))) for op in elem_ops: for full in full_ops: z = before_left(x, op, full) zz = run_opt_pass(z, transform.SimplifyExpr()) after = run_opt_pass(after_left(x, op, value), transform.InferType()) assert tvm.ir.structural_equal(zz, after) z = before_right(x, op, full) zz = run_opt_pass(z, transform.SimplifyExpr()) after = run_opt_pass(after_right(x, op, value), transform.InferType()) assert tvm.ir.structural_equal(zz, after) # Test the case in which x is broadcast to full's shape full_ops = [] if value == 0: full_ops.append(relay.zeros(shape * 2, dtype)) if value == 1: full_ops.append(relay.ones(shape * 2, dtype)) else: full_ops.append(relay.full(relay.const(value, dtype), shape * 2)) for op in elem_ops: for full in full_ops: z = before_left(x, op, full) zz = run_opt_pass(z, transform.SimplifyExpr()) after = run_opt_pass(before_left(x, op, full), transform.InferType()) assert tvm.ir.structural_equal(zz, after) z = before_right(x, op, full) zz = run_opt_pass(z, transform.SimplifyExpr()) after = run_opt_pass(before_right(x, op, full), transform.InferType()) assert tvm.ir.structural_equal(zz, after)
def test_concretize_zeros_like(): dtype = "int32" shape_like = relay.var("shape_like", shape=(3, 4, 5), dtype=dtype) expr = relay.zeros_like(shape_like) expected = run_infer_type(relay.zeros((3, 4, 5), dtype)) actual = run_opt_pass(expr, relay.transform.SimplifyExpr()) assert tvm.ir.structural_equal(actual, expected)
def after(annotate_non_call_ops): var1 = relay.var("var1", shape=(2,)) var2 = relay.var("var2", shape=(), dtype="int32") var3 = relay.var("var3", shape=(2,)) var4 = relay.const(10, dtype="int32") cb_1 = relay.annotation.compiler_begin(var2, target) cb_2 = relay.annotation.compiler_begin(var4, target) less_condition = relay.less(cb_1, cb_2) ce_1 = relay.annotation.compiler_end(less_condition, target) loop = relay.var("while_loop") # if condition cb_3 = relay.annotation.compiler_begin(var2, target) cb_4 = relay.annotation.compiler_begin(relay.const(1, dtype="int32"), target) add_op_1 = relay.add(cb_3, cb_4) ce_2 = relay.annotation.compiler_end(add_op_1, target) cb_5 = relay.annotation.compiler_begin(ce_2, "default") if annotate_non_call_ops else ce_2 cb_6 = relay.annotation.compiler_begin(var3, target) cb_7 = relay.annotation.compiler_begin(var1, target) add_op_2 = relay.add(cb_6, cb_7) ce_3 = relay.annotation.compiler_end(add_op_2, target) cb_8 = relay.annotation.compiler_begin(ce_3, "default") if annotate_non_call_ops else ce_3 true_branch = loop(cb_5, cb_8) # while loop ce_4 = ( relay.annotation.compiler_end(true_branch, "default") if annotate_non_call_ops else true_branch ) if_condition = relay.If(ce_1, ce_4, var3) const_1 = relay.const(0, dtype="int32") cb_9 = ( relay.annotation.compiler_begin(const_1, "default") if annotate_non_call_ops else const_1 ) cb_10 = relay.annotation.compiler_begin(var1, target) zeros_like = relay.zeros_like(cb_10) ce_5 = relay.annotation.compiler_end(zeros_like, target) cb_11 = relay.annotation.compiler_begin(ce_5, "default") if annotate_non_call_ops else ce_5 while_condition = loop(cb_9, cb_11) ce_6 = ( relay.annotation.compiler_end(while_condition, "default") if annotate_non_call_ops else while_condition ) func_1 = relay.Function([var2, var3], if_condition) ret = relay.Let(loop, func_1, ce_6) func_2 = relay.Function([var1], ret) mod = tvm.IRModule.from_expr(func_2) return mod
def verify_any_full(x_shape, x_np_shape, relay_op, np_op, dtype='float32'): x = relay.var('x', shape=x_shape, dtype=dtype) mod = relay.module.Module() mod['main'] = relay.Function([x], relay.zeros_like(x)) x_np = np.random.uniform(size=x_np_shape).astype(dtype) res_np = np.zeros_like(x_np) for kind in ['debug', 'vm']: ex = relay.create_executor(kind, mod=mod, ctx=tvm.cpu(), target='llvm') result = ex.evaluate()(x_np).asnumpy() tvm.testing.assert_allclose(result, res_np)
def safe_exp(w): slope = relay.const(np.exp(1, dtype=np.float32)) lin_bool = w > slope lin_region = relay.cast(lin_bool, "float32") lin_out = slope * w exp_out = relay.exp(relay.where(lin_bool, relay.zeros_like(w), w)) out = lin_region * lin_out + (relay.const(1.) - lin_region) * exp_out return out
def build_relay_module(batch_size, input_size, hidden_size, time_steps, dense_dim): mod = tvm.IRModule() mod["lstm_layer"] = lstm_definition(batch_size, input_size, hidden_size, time_steps) mod["linear_layer"] = linear_layer_definition(batch_size, hidden_size, dense_dim) lstm_var = mod.get_global_var("lstm_layer") linear_var = mod.get_global_var("linear_layer") # now we build up our main function input_var = relay.var("input", shape=(batch_size, time_steps, input_size)) init_hidden_var = relay.var("init_hidden", shape=(batch_size, hidden_size)) init_cell_var = relay.var("init_cell", shape=(batch_size, hidden_size)) i2h_weight_var = relay.var("i2h_weight", shape=(4 * hidden_size, input_size)) h2h_weight_var = relay.var("h2h_weight", shape=(4 * hidden_size, hidden_size)) lstm_bias_var = relay.var("lstm_bias", shape=(4 * hidden_size, )) linear_weight_var = relay.var("linear_weight", shape=(dense_dim, hidden_size)) linear_bias_var = relay.var("linear_bias", shape=(dense_dim, )) builder = relay.ScopeBuilder() state_var = builder.let("state", relay.Tuple([init_hidden_var, init_cell_var])) lstm_res = builder.let( "lstm_res", lstm_var( input_var, state_var, i2h_weight_var, h2h_weight_var, lstm_bias_var, # the keras model only gave one bias, # so set the other to zero # (hopefully this is correct) relay.zeros_like(lstm_bias_var))) final_hidden = builder.let("final_hidden", relay.TupleGetItem(lstm_res, 1)) # to match PT's semantics, we're undoing the reshape in LSTM :) reshape_hidden = builder.let("reshape_hidden", relay.squeeze(final_hidden, axis=[0])) linear_result = builder.let( "linear_result", linear_var(reshape_hidden, linear_weight_var, linear_bias_var)) # finally do a softmax builder.ret(relay.nn.softmax(linear_result)) main_func = relay.Function([ input_var, init_hidden_var, init_cell_var, i2h_weight_var, h2h_weight_var, lstm_bias_var, linear_weight_var, linear_bias_var ], builder.get()) mod["main"] = main_func return mod
def test_zeros_like(self): c = relay.expr.const(np.ones((1, 6, 4, 4), np.float32)) net = relay.zeros_like(c) net = relay.Function([], net) mod = tvm.IRModule.from_expr(net) mod = relay.transform.InferType()(mod) xgraph = xf_relay.from_relay(mod, {}) layers = xgraph.get_layers() assert layers[0].type[0] == "Constant" assert layers[1].type[0] == "AnyOp" assert layers[1].shapes == [1, 6, 4, 4]
def test_concretize_multiple(): x = relay.var("x", shape=(2, 3), dtype="float32") y = relay.var("y", shape=(3, ), dtype="float32") l = x + y dl = relay.ones_like(l) dx = relay.zeros_like(x) dy = relay.zeros_like(y) dx = dx + relay.collapse_sum_like(dl, dx) dy = dy + relay.collapse_sum_like(dl, dy) ret = relay.Tuple([dx, dy]) dl_c = relay.ones((2, 3), "float32") # NOTE: these are removed by EliminateIdentity # dx_c = relay.zeros((2, 3), "float32") # dy_c = relay.zeros((3,), "float32") dx_c = relay.collapse_sum_to(dl_c, (2, 3)) dy_c = relay.collapse_sum_to(dl_c, (3, )) ret_c = relay.Tuple([dx_c, dy_c]) expected = run_infer_type(ret_c) actual = run_opt_pass(ret, relay.transform.SimplifyExpr()) assert tvm.ir.structural_equal(actual, expected)
def before(): var1 = relay.var("var1", shape=(2,)) var2 = relay.var("var2", shape=(), dtype="int32") var3 = relay.var("var3", shape=(2,)) cond = relay.less(var2, relay.const(10, dtype="int32")) loop = relay.var("while_loop") ii = var2 + relay.const(1, dtype="int32") ss = var3 + var1 true_branch = loop(ii, ss) ife = relay.If(cond, true_branch, var3) func_1 = relay.Function([var2, var3], ife) ret = relay.Let(loop, func_1, loop(relay.const(0, dtype="int32"), relay.zeros_like(var1))) func_2 = relay.Function([var1], ret) mod = tvm.IRModule.from_expr(func_2) return mod
def test_zeros_like(): """Simple test using "zeros_like" op""" mod = tvm.IRModule() shape = (10, 10) dtype = "float32" t = relay.TensorType(shape, dtype) x = relay.var("x", t) y = relay.Function([x], x + relay.zeros_like(x)) mod["main"] = y mod = transform.InferType()(mod) mod = transform.LazyGradientInit()(mod) y = mod["main"] assert mod["main"].checked_type == relay.FuncType([t], t) x = rand(dtype, *shape) y = create_executor(mod=mod).evaluate(y)(x) assert_allclose(y.numpy(), x.numpy())