def test_pipe_runtime_error_check(): # This function is used to trigger runtime error by applying wrong logic. if pipeline_executor_build.pipeline_executor_build_enabled(): # Get three pipeline modules here. (mod1, mod2, mod3), dshape = get_split_mod() # The input or output name is illegal and expects a runtime error. pipe_error = pipeline_executor_build.PipelineConfig() with pytest.raises(RuntimeError): pipe_error[mod1]["output"][9] with pytest.raises(RuntimeError): pipe_error[mod1]["input"]["data_9"] # The module connection will cause a cycle in DAG and expects runtime error. with pytest.raises(RuntimeError): pipe_error[mod1]["output"][0].connect(pipe_error[mod2]["input"]["data_0"]) pipe_error[mod2]["output"][0].connect(pipe_error[mod1]["input"]["data_0"]) # The module connection is illegal and expects runtime error. with pytest.raises(RuntimeError): pipe_error[mod1]["output"][0].connect(pipe_error[mod1]["input"]["data_0"]) with pytest.raises(RuntimeError): pipe_error[mod1]["input"]["data_0"].connect(pipe_error[mod1]["input"]["data_0"]) with pytest.raises(RuntimeError): pipe_error[mod1]["input"]["data_0"].connect(pipe_error[mod2]["input"]["data_0"]) with pytest.raises(RuntimeError): pipe_error[mod1]["output"][0].connect(pipe_error["input"]["data_0"]) with pytest.raises(RuntimeError): pipe_error["input"]["data_0"].connect(pipe_error[mod1]["output"][0]) with pytest.raises(RuntimeError): pipe_error["output"]["0"].connect(pipe_error[mod1]["output"][0]) # Create pipeline executor to check the executor runtime errors. pipe_config = pipeline_executor_build.PipelineConfig() pipe_config[mod1].target = "llvm" pipe_config[mod1].dev = tvm.cpu(0) pipe_config["param_group"]["param_0"].connect(pipe_config[mod1]["param"]) pipe_config[mod1]["output"][0].connect(pipe_config["output"]["0"]) # Build and create a pipeline module. with tvm.transform.PassContext(opt_level=3): pipeline_mod_factory = pipeline_executor_build.build(pipe_config) pipeline_module = pipeline_executor.PipelineModule(pipeline_mod_factory) customized_parameters, _ = recreate_parameters(mod1) # Checking the pipeline executor runtime errors. with pytest.raises(RuntimeError): pipeline_module.set_params("param_0", None) with pytest.raises(RuntimeError): pipeline_module.set_params("param_1", customized_parameters)
def test_pipeline(): if pipeline_executor.pipeline_executor_enabled(): target_list = tvm.testing.enabled_targets() for target in target_list: affinity = os.sched_getaffinity(0) # Get the three pipeline modules here. (mod1, mod2, mod3), dshape = get_mannual_mod() # Prepare batch data for pipeline computation. datas = [] for i in range(5): datas.append(np.full(dshape, 3 + i).astype("float32")) pipe_config = pipeline_executor.PipelineConfig() customized_parameters, customized_parameters_mod = recreate_parameters( mod1) assert customized_parameters_mod == mod1 # The global parameters group named "param_0" will be connected to "mod1" as parameters. pipe_config["param_group"]["param_0"].connect( pipe_config[mod1]["param"]) # The pipeline input named "data_0" will be connected to a input named "data_0" # of mod1. pipe_config["input"]["data_a"].connect( pipe_config[mod1]["input"]["data_0"]) # The pipeline Input named "data_1" will be connected to a input named "data_1" # of mod2. pipe_config["input"]["data_b"].connect( pipe_config[mod2]["input"]["data_1"]) # The mod1 output[0] will be connected to a input named "data_0" of mod2. pipe_config[mod1]["output"][0].connect( pipe_config[mod2]["input"]["data_0"]) # The mod1 output[1] will be connected to a input named "data_0" of mod3. pipe_config[mod1]["output"][1].connect( pipe_config[mod3]["input"]["data_0"]) # The mod2 output[2] will be connected to a input named "data_1" of mod3. pipe_config[mod2]["output"][0].connect( pipe_config[mod3]["input"]["data_1"]) # The mod1 output[2] will be connected to pipeline output[0]. pipe_config[mod1]["output"][2].connect(pipe_config["output"]["0"]) # The mod3 output[0] will be connected to pipeline output[1]. pipe_config[mod3]["output"][0].connect(pipe_config["output"]["1"]) # Print configueration (print(pipe_config)), the result looks like following. # # Inputs # |data_a: mod1:data_0 # |data_b: mod2:data_1 # # output # |output(1) : mod1.output(2) # |output(2) : mod3.output(0) # # connections # |mod1.output(0)-> mod2.data_0 # |mod1.output(1)-> mod3.data_0 # |mod2.output(0)-> mod3.data_1 # Set other parameters. pipe_config[mod1].target = target[0] pipe_config[mod1].dev = target[1] pipe_config[mod1].cpu_affinity = "0" pipe_config[mod2].target = "llvm" pipe_config[mod2].dev = tvm.cpu(0) pipe_config[mod2].cpu_affinity = "0" pipe_config[mod3].target = "llvm" pipe_config[mod3].dev = tvm.cpu(0) pipe_config[mod3].cpu_affinity = "0" # Checking the configuration of modules dependency. mconfig = pipe_config.get_config() assert mconfig["module_connection"] == get_manual_conf( [mod1, mod2, mod3], target) # Build and create a pipeline module. with tvm.transform.PassContext(opt_level=3): pipeline_mod_factory = pipeline_executor.build(pipe_config) # Export the parameter configuration to a file. directory_path = tvm.contrib.utils.tempdir().temp_dir # If the directory does not exist, create it. if not os.path.exists(directory_path): os.makedirs(directory_path) config_file_name = pipeline_mod_factory.export_library( directory_path) # Use the output of build to create and initialize PipelineModule. pipeline_module = pipeline_executor.PipelineModule( pipeline_mod_factory) assert pipeline_module # Use the import function to create and initialize PipelineModule. pipeline_module_test = pipeline_executor.PipelineModule.load_library( config_file_name) assert pipeline_module_test.num_outputs == 2 input_map = pipeline_module_test.get_input_pipeline_map("data_b") assert input_map[0] == "1" and input_map[1] == "data_1" input_map = pipeline_module_test.get_input_pipeline_map("data_a") assert input_map[0] == "0" and input_map[1] == "data_0" module_index = pipeline_module_test.get_params_group_pipeline_map( "param_0") assert module_index == 0 # Using the parameters group name to set parameters. pipeline_module_test.set_params("param_0", customized_parameters) normal_outputs = [] for round in range(0, len(datas)): data = datas[round] # Getting the result without setting customized parameters. wrong_output = run_modules( mconfig["module_connection"], tvm.cpu(), "llvm", "data_0", data, mod2, "data_1", data, ) # Getting the result with setting customized parameters. normal_output = run_modules( mconfig["module_connection"], tvm.cpu(), "llvm", "data_0", data, mod2, "data_1", data, customized_parameters_mod, customized_parameters, ) # Appending the normal output into the list in order to do future correctness # checking. normal_outputs.append(normal_output) # Setting the input data into the pipeline executor. pipeline_module_test.set_input("data_a", data) pipeline_module_test.set_input("data_b", data) input_map = pipeline_module_test.get_input_pipeline_map( "data_a") # Checking whether the input setting of the first runtime is successful. # The input of the rest of runtime will go into a queue and we can not check # these input data here. if input_map[0] == "0": input_data = pipeline_module_test.get_input("data_a") tvm.testing.assert_allclose(data, input_data.numpy()) # Running the pipeline executor in the pipeline mode. pipeline_module_test.run() for k in range(0, len(datas)): statistic_time = 0 outputs = pipeline_module_test.get_output() while len(outputs) == 0: outputs = pipeline_module_test.get_output() statistic_time = statistic_time + 1 # Setting the timeout to 10 seconds. assert statistic_time < 5 time.sleep(1) for i in range(len(outputs)): tvm.testing.assert_allclose(normal_outputs[k][i], outputs[i].numpy()) assert not (normal_output[i] == wrong_output[i]).all() assert pipeline_module_test.num_executing_pipeline == round + 1 # Reset the cpu affinity after a test. reset_cpu_affinity(affinity)
def test_pipeline(): if pipeline_executor.pipeline_executor_enabled(): target_list = tvm.testing.enabled_targets() for target in target_list: # Get the three pipeline modules here. (mod1, mod2, mod3), dshape = get_mannual_mod() # Prepare batch data for pipeline computation. datas = [] for i in range(5): datas.append(np.full(dshape, 3 + i).astype("float32")) pipe_config = pipeline_executor.PipelineConfig() customized_parameters = recreate_parameters(mod2) # The global parameters group named "param_0" will be connected to "mod1" as parameters. pipe_config["param_group"]["param_0"].connect( pipe_config[mod2]["param"]) # The pipeline input named "data_0" will be connected to a input named "data_0" # of mod1. pipe_config["input"]["data_a"].connect( pipe_config[mod1]["input"]["data_0"]) # The pipeline Input named "data_1" will be connected to a input named "data_1" # of mod2. pipe_config["input"]["data_b"].connect( pipe_config[mod2]["input"]["data_1"]) # The mod1 output[0] will be connected to a input named "data_0" of mod2. pipe_config[mod1]["output"][0].connect( pipe_config[mod2]["input"]["data_0"]) # The mod1 output[1] will be connected to a input named "data_0" of mod3. pipe_config[mod1]["output"][1].connect( pipe_config[mod3]["input"]["data_0"]) # The mod2 output[2] will be connected to a input named "data_1" of mod3. pipe_config[mod2]["output"][0].connect( pipe_config[mod3]["input"]["data_1"]) # The mod1 output[2] will be connected to pipeline output[0]. pipe_config[mod1]["output"][2].connect(pipe_config["output"]["0"]) # The mod3 output[0] will be connected to pipeline output[1]. pipe_config[mod3]["output"][0].connect(pipe_config["output"]["1"]) print(pipe_config) # Print configueration (print(pipe_config)), the result looks like following. # # Inputs # |data_a: mod1:data_0 # |data_b: mod2:data_1 # # output # |output(1) : mod1.output(2) # |output(2) : mod3.output(0) # # connections # |mod1.output(0)-> mod2.data_0 # |mod1.output(1)-> mod3.data_0 # |mod2.output(0)-> mod3.data_1 # Set other parameters. pipe_config[mod1].target = target[0] pipe_config[mod1].dev = target[1] pipe_config[mod2].target = "llvm" pipe_config[mod2].dev = tvm.cpu(0) pipe_config[mod3].target = "llvm" pipe_config[mod3].dev = tvm.cpu(0) # Here is to check the correctness of the configuration generated by API. mconfig = pipe_config.get_config() assert mconfig["module_connection"] == get_manual_conf( [mod1, mod2, mod3], target) # Build and create a pipeline module. with tvm.transform.PassContext(opt_level=3): pipeline_mod_factory = pipeline_executor.build(pipe_config) # Export the parameter configuration to a file. directory_path = tvm.contrib.utils.tempdir().temp_dir # If the directory does not exist, create it. if not os.path.exists(directory_path): os.makedirs(directory_path) config_file_name = pipeline_mod_factory.export_library( directory_path) # Use the output of build to create and initialize PipelineModule. pipeline_module = pipeline_executor.PipelineModule( pipeline_mod_factory) assert pipeline_module # Use the import function to create and initialize PipelineModule. pipeline_module_test = pipeline_executor.PipelineModule.load_library( config_file_name) assert pipeline_module_test.num_outputs == 2 input_map = pipeline_module_test.get_input_pipeline_map("data_b") assert input_map[0] == "1" and input_map[1] == "data_1" input_map = pipeline_module_test.get_input_pipeline_map("data_a") assert input_map[0] == "0" and input_map[1] == "data_0" module_index = pipeline_module_test.get_params_group_pipeline_map( "param_0") assert module_index == 1 # Use the parameters group name to set parameters. pipeline_module_test.set_params("param_0", customized_parameters)