def convert_graph_layout(mod, desired_layout): """Alter the layout of the input graph. Parameters ---------- mod : tvm.IRModule The relay module to convert. desired_layout : str The layout to convert to. Returns ------- mod : tvm.IRModule The converted module. """ # Assume for the time being that graphs only have # conv2d as heavily-sensitive operators. desired_layouts = { "nn.conv2d": [desired_layout, "default"], "nn.conv2d_transpose": [desired_layout, "default"], "qnn.conv2d": [desired_layout, "default"], } # Convert the layout of the graph where possible. seq = transform.Sequential([ relay.transform.RemoveUnusedFunctions(), relay.transform.ConvertLayout(desired_layouts), ]) with transform.PassContext(opt_level=3): try: return seq(mod) except Exception as err: raise TVMCException("Error converting layout to {0}: {1}".format( desired_layout, str(err)))
# Caffe2 input tensor name, shape and type input_name = resnet50.predict_net.op[0].input[0] shape_dict = {input_name: data.shape} dtype_dict = {input_name: data.dtype} # parse Caffe2 model and convert into Relay computation graph from tvm import relay, transform mod, params = relay.frontend.from_caffe2(resnet50.init_net, resnet50.predict_net, shape_dict, dtype_dict) # compile the model # target x86 CPU target = "llvm" with transform.PassContext(opt_level=3): lib = relay.build(mod, target, params=params) ###################################################################### # Execute on TVM # --------------- # The process is no different from other examples. import tvm from tvm import te from tvm.contrib import graph_runtime # context x86 CPU, use tvm.gpu(0) if you run on GPU dev = tvm.cpu(0) # create a runtime executor module m = graph_runtime.GraphModule(lib["default"](dev)) # set inputs