def f_timer( rt_mod: Union[tvm.runtime.Module, tvm.runtime.vm.Executable], dev: tvm.device, input_data: Dict[str, NDArray], ) -> None: """Run and benchmark the given runtime module, print out the result. Parameters ---------- rt_mod : Union[tvm.runtime.Module, tvm.runtime.vm.Executable] The runtime module or vm executable. dev : tvm.device The device type to run workload. input_data : Dict[str, np.ndarray] The input data as a dictionary. """ from tvm.contrib.graph_executor import GraphModule # pylint:disable=import-outside-toplevel from tvm.runtime.vm import VirtualMachine # pylint:disable=import-outside-toplevel try: if backend == "vm": vm = VirtualMachine(rt_mod, dev) # pylint: disable=invalid-name ftimer = vm.benchmark(dev, min_repeat_ms=500, repeat=5, number=1, end_to_end=False, **input_data) elif backend == "graph": mod = GraphModule(rt_mod["default"](dev)) for input_name, input_value in input_data.items(): mod.set_input(input_name, input_value) ftimer = mod.module.time_evaluator("run", dev, min_repeat_ms=500, repeat=5, number=1)() else: raise ValueError( f"Backend {backend} not supported in f_timer!") results = list(np.array(ftimer.results) * 1000.0) # type: ignore print("Running time in time_evaluator: ", results) print("-------------------------------") print(f" Min (ms) : {min(results)}") print(f" Max (ms) : {max(results)}") print(f" Median (ms) : {median(results)}") print(f"Average (ms) : {sum(results) / len(results)}") except Exception as exc: # pylint: disable=broad-except print(f"Run module f_timer via RPC failed, exception: {exc}", )
def f_timer(rt_mod, dev, input_data): # pylint: disable=import-outside-toplevel from tvm.contrib.graph_executor import GraphModule # pylint: enable=import-outside-toplevel mod = GraphModule(rt_mod["default"](dev)) mod.set_input(input_name, input_data) ftimer = mod.module.time_evaluator( "run", dev, min_repeat_ms=500, repeat=3, ) results = list(np.array(ftimer().results) * 1000.0) # type: ignore print("Running time in time_evaluator: ", results)
def run_with_graph_executor( rt_mod: "Module", device: "Device", evaluator_config: "EvaluatorConfig", repeated_args: List["NDArray"], ) -> List[float]: """Run a Relay module with GraphExecutor Parameters ---------- rt_mod : Module The Relay module to run. device : Device The device to run the module on. evaluator_config : EvaluatorConfig The evaluator configuration to run the module with. repeated_args : List[NDArray] The list of repeated arguments to run the module with. Returns ------- results : List[float] The list of results. """ import itertools from tvm.contrib.graph_executor import GraphModule graph_mod = GraphModule(rt_mod["default"](device)) evaluator = graph_mod.module.time_evaluator( func_name="run", dev=device, number=evaluator_config.number, repeat=evaluator_config.repeat, min_repeat_ms=evaluator_config.min_repeat_ms, f_preproc="cache_flush_cpu_non_first_arg" if evaluator_config.enable_cpu_cache_flush else "", ) repeated_costs = [] for args in repeated_args: profile_result = evaluator(*args) repeated_costs.append(profile_result.results) costs = [ float(cost) for cost in itertools.chain.from_iterable(repeated_costs) ] return costs