def fuzz_expr(expr, tvm_pass): mod = tvm.relay.Module() mod["main"] = expr # Generate random concrete arguments based on the argument types conc_args = [] for arg_type in mod["main"].checked_type.arg_types: conc_args.append(gen_random_type(arg_type)) # Get the output of running the expr on concrete inputs intrp = relay.create_executor() out_org = intrp.evaluate(expr)(*conc_args) # Run the pass new_mod = tvm_pass(mod) intrp = relay.create_executor() out_new = intrp.evaluate(new_mod["main"])(*conc_args) if not check_outs(out_org, out_new): return (conc_args, out_org, out_new) vm_intrp = relay.create_executor(kind="vm", mod=new_mod) out_new = vm_intrp.evaluate(new_mod["main"])(*[a.data for a in conc_args]) if not check_outs(out_org, out_new): return (conc_args, out_org, out_new) f = aot.compile(new_mod["main"], new_mod, tvm.context('llvm', 0), tvm.target.create('llvm')) out_new = f(*conc_args) if not check_outs(out_org, out_new): return (conc_args, out_org, out_new)
def double_example(): # Declare a Relay module. mod = Module() # Implement the double function. x = var('x', shape=()) double = GlobalVar('double') mod[double] = Function([x], x + x) # Generate a function which calls double twice. x = var('x', shape=()) f = Function([x], double(double(x))) # Compile the function. cfunc = compile(f, mod) a = tvm.nd.array(np.array(1.5, dtype='float32')) print(cfunc(a).asnumpy())
def treelstm_setup(device, method, dataset, idx): use_aot = (method == 'aot') use_gpu = (device == 'gpu') torch_cpu = torch.device('cpu') model, data = initialize_treelstm(dataset) model.to(torch_cpu) model.eval() ltree, linput, rtree, rinput, label = data[idx] linput, rinput = linput.to(torch.device('cpu')), rinput.to( torch.device('cpu')) linput = model.emb(linput) tlstm, mod, prelude = converter.initialize_tlstm(300, 150) rosetree = converter.forward(ltree, linput) relay_tree = converter.from_tree(prelude, rosetree.fmap(converter.pytorch_to_relay), relay.TensorType([], dtype='float32')) context = tvm.gpu(0) if use_gpu else tvm.cpu(0) target = tvm.target.cuda() if use_gpu else tvm.target.create('llvm') if use_aot: mod['main'] = tlstm.get() func = aot.compile(tlstm.get(), mod, ctx=context, tgt=target) else: opts = relay.transform.Sequential( [relay.transform.SimplifyInference(), relay.transform.FuseOps()]) mod['main'] = tlstm.get() opts(mod) executor = relay.create_executor(mod=mod, ctx=context, target=target) func = executor.evaluate() thunk = lambda: func(relay_tree) return [thunk]
def __init__(self, do_aot, use_gpu, *args): assert isinstance(do_aot, bool) assert isinstance(use_gpu, bool) self.mod = Module() self.prelude = Prelude(self.mod) self.use_gpu = use_gpu self.context = tvm.gpu(0) if use_gpu else tvm.cpu(0) self.target = tvm.target.cuda() if use_gpu else tvm.target.create('llvm') self.executor = create_executor(mod=self.mod, ctx=self.context, target=self.target) self.parameters = [] self.forward_var = relay.GlobalVar('forward_var') # Set up forward pass. inputs, body, ret_type = self.compute(*args) self.inputs = inputs forward_compute = relay.Function(inputs + list([p[0] for p in self.parameters]), body, ret_type) self.mod[self.forward_var] = forward_compute self.mod['main'] = self.mod[self.forward_var] if do_aot: self.forward = aot.compile(self.forward_var, self.mod, ctx=self.context, tgt=self.target) else: self.forward = self.executor.evaluate(self.forward_var) self.args = [None] * len(inputs) + list([p[1] for p in self.parameters])
def compile(f, mod): tgt = tvm.target.create('llvm') ctx = tvm.context('llvm', 0) return aot.compile(f, mod, ctx=ctx, tgt=tgt)