def test_bind_params(): x = relay.var("x") y = relay.var("y") z = relay.add(x, y) f = relay.Function([x, y], z) fbinded = relay.bind(f, {x: relay.const(1, "float32")}) fexpected = relay.Function([y], relay.add(relay.const(1, "float32"), y)) assert relay.analysis.alpha_equal(fbinded, fexpected) zbinded = relay.bind(z, {y: x}) zexpected = relay.add(x, x) assert relay.analysis.alpha_equal(zbinded, zexpected)
def test_bind_params(): x = relay.var("x") y = relay.var("y") z = relay.add(x, y) f = relay.Function([x, y], z) fbinded = relay.bind(f, {x : relay.const(1, "float32")}) fexpected =relay.Function( [y], relay.add(relay.const(1, "float32"), y)) assert relay.ir_pass.alpha_equal(fbinded, fexpected) zbinded = relay.bind(z, {y: x}) zexpected = relay.add(x, x) assert relay.ir_pass.alpha_equal(zbinded, zexpected)
def annotate(func, compiler): """ An annotator for Core ML. """ # Bind free variables to the constant values. bind_dict = {} for arg in func.params: name = arg.name_hint if name in params: bind_dict[arg] = relay.const(params[name]) func = relay.bind(func, bind_dict) # Annotate the entire graph for Core ML mod = tvm.IRModule() mod["main"] = func seq = tvm.transform.Sequential([ transform.SimplifyInference(), transform.FoldConstant(), transform.FoldScaleAxis(), transform.AnnotateTarget(compiler), transform.MergeCompilerRegions(), transform.PartitionGraph(), ]) with relay.build_config(opt_level=3): mod = seq(mod) return mod
def make_ethosn_partition(ethosn_expr): # Create an Ethos-N global function mod = tvm.IRModule({}) vars = relay.analysis.free_vars(ethosn_expr) # NB: it is illegal to reuse variables inside and outside a scope in Relay # if you want to duplicate types and names you must re-allocate them. fresh_vars = [relay.Var(v.name_hint, v.type_annotation) for v in vars] binds = {} for var, fresh_var in zip(vars, fresh_vars): binds[var] = fresh_var ethosn_expr_fresh = relay.bind(ethosn_expr, binds) func = relay.Function(fresh_vars, ethosn_expr_fresh) func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1)) func = func.with_attr("Inline", tvm.tir.IntImm("int32", 1)) func = func.with_attr("Compiler", "ethos-n") func = func.with_attr("global_symbol", "ethos-n_0") g1 = relay.GlobalVar("ethos-n_0") mod[g1] = func mod = relay.transform.InferType()(mod) # These are the vars to call the Ethos-N partition with more_vars = relay.analysis.free_vars(ethosn_expr) # Call the Ethos-N partition in main call_fn1 = g1(*more_vars) mod["main"] = relay.Function(more_vars, call_fn1) return relay.transform.InferType()(mod)
def relay_partial(c, fn, *args): """Implementation of partial for Relay.""" ty = to_relay_type(fn.abstract) rargs = [relay.var("") for a in ty.arg_types] fn = relay.Function(rargs, relay.Call(c.ref(fn), rargs)) binds = {} for ra, a in zip(rargs, args): binds[ra] = c.ref(a) res = relay.bind(fn, binds) return res
def inline_params(mod, params): main_fn = mod["main"] str_to_var = {} for param in main_fn.params: str_to_var[param.name_hint] = param bind_map = {} for param in params: bind_map[str_to_var[param]] = relay.const(params[param]) body = relay.bind(main_fn.body, bind_map) main_fn = relay.Function(relay.analysis.free_vars(body), body) mod._add("main", main_fn, True) return mod
def visit_call(self, call): if isinstance(call.op, GlobalVar): name = call.op.name_hint if name in self.subgraphs_to_remove: # "Inline" the subgraph back into new main function. func = self.mod[name] var_map = {} for arg, param in zip(call.args, func.params): var_map[param] = super().visit(arg) new_body = relay.bind(func.body, var_map) return new_body if name != "main": args = [] for arg in call.args: args.append(super().visit(arg)) return call.op(*args) return super().visit_call(call)