def test_migration_v1_to_v2(self) -> None: """Test Workloads List Migration from v1 to v2.""" migrator = WorkloadsListMigrator() migrator.workloads_json = os.path.join( os.path.dirname(__file__), "files", "workloads_list_v1.json", ) self.assertEqual(migrator.require_migration, True) migrator.migrate() self.assertEqual(migrator.require_migration, False) expected_json_path = os.path.join( os.path.dirname(__file__), "files", "workloads_list_v2.json", ) expected = _load_json_as_dict(expected_json_path) self.assertEqual(type(migrator.workloads_data), dict) self.assertDictEqual(migrator.workloads_data, expected) # type: ignore
def test_workload_migration_from_v2(self) -> None: """Test Workload v2 config migrator.""" workload_json_path = os.path.join( os.path.dirname(__file__), "files", "workload_v2_tuned.json", ) workload_migrator = WorkloadMigrator( workload_json_path=workload_json_path, ) workload_migrator.migrate() expected_json_path = os.path.join( os.path.dirname(__file__), "files", "workload_v2_tuned.json", ) expected = _load_json_as_dict(expected_json_path) self.assertDictEqual(workload_migrator.workload_data, expected) # type: ignore
def execute_benchmark(data: Dict[str, Any]) -> None: """ Execute benchmark. Expected data: { "id": "configuration_id", "workspace_path": "/path/to/workspace", "input_model": { "precision": "fp32", "path": "/localdisk/fp32.pb" }, "optimized_model": { "precision": "int8", "path": "/localdisk/int8.pb" } } """ from lpot.ux.utils.workload.workload import Workload request_id = str(data.get("id", "")) input_model = data.get("input_model", None) input_model.update({"model_type": "input_model"}) optimized_model = data.get("optimized_model", None) optimized_model.update({"model_type": "optimized_model"}) if not (request_id and input_model and optimized_model): message = "Missing request id, input or optimized model data." mq.post_error( "benchmark_finish", {"message": message, "code": 404, "id": request_id}, ) raise ClientErrorException(message) workdir = Workdir(request_id=request_id, overwrite=False) try: workload_path = workdir.workload_path workload_data = _load_json_as_dict( os.path.join(workload_path, "workload.json"), ) except Exception as err: mq.post_error( "benchmark_finish", {"message": repr(err), "code": 404, "id": request_id}, ) raise ClientErrorException(repr(err)) workload = Workload(workload_data) response_data: Dict[str, Any] = {"id": request_id, "execution_details": {}} mq.post_success( "benchmark_start", { "message": "started", "id": request_id, }, ) models = [input_model, optimized_model] benchmark_count = 0 benchmark_total = 0 for model_info in models: benchmark_modes: List[str] = model_info.get("mode", [Benchmarks.PERF]) if ( not workload.tune and Benchmarks.ACC not in benchmark_modes ): # Accuracy information is provided only in tuning benchmark_modes.append(Benchmarks.ACC) model_info.update({"benchmark_modes": benchmark_modes}) benchmark_total += len(benchmark_modes) for model_info in models: model_precision = model_info.get("precision", None) model_type = model_info.get("model_type", None) model_path = model_info.get("path", None) benchmark_modes = model_info.get("benchmark_modes", None) if not (model_precision and model_path and model_type and benchmark_modes): message = "Missing model precision, model path or model type." mq.post_error( "benchmark_finish", {"message": message, "code": 404, "id": request_id}, ) raise ClientErrorException(message) for benchmark_mode in benchmark_modes: benchmark_count += 1 response_data = benchmark_model_and_respond_to_ui( response_data=response_data, workload=workload, workdir=workdir, model=model_type, model_path=model_path, model_precision=model_precision, benchmark_mode=benchmark_mode, benchmark_count=benchmark_count, benchmark_total=benchmark_total, )
def execute_optimization(data: Dict[str, Any]) -> dict: """Get configuration.""" from lpot.ux.utils.workload.workload import Workload if not str(data.get("id", "")): message = "Missing request id." mq.post_error( "optimization_finish", {"message": message, "code": 404}, ) raise Exception(message) request_id: str = data["id"] workdir = Workdir(request_id=request_id, overwrite=False) workload_path: str = workdir.workload_path try: workload_data = _load_json_as_dict( os.path.join(workload_path, "workload.json"), ) except Exception as err: mq.post_error( "optimization_finish", {"message": repr(err), "code": 404, "id": request_id}, ) raise err workload = Workload(workload_data) optimization: Optimization = OptimizationFactory.get_optimization( workload, workdir.template_path, ) send_data = { "message": "started", "id": request_id, "size_input_model": get_size(optimization.input_graph), } workdir.clean_logs() workdir.update_data( request_id=request_id, model_path=optimization.input_graph, input_precision=optimization.input_precision, model_output_path=optimization.output_graph, output_precision=optimization.output_precision, status="wip", ) executor = Executor( workspace_path=workload_path, subject="optimization", data=send_data, log_name="output", ) proc = executor.call( optimization.command, ) optimization_time = executor.process_duration if optimization_time: optimization_time = round(optimization_time, 2) log.debug(f"Elapsed time: {optimization_time}") logs = [os.path.join(workload_path, "output.txt")] parser = OptimizationParser(logs) if proc.is_ok: response_data = parser.process() if isinstance(response_data, dict): response_data["id"] = request_id response_data["optimization_time"] = optimization_time response_data["size_optimized_model"] = get_size(optimization.output_graph) response_data["model_output_path"] = optimization.output_graph response_data["size_input_model"] = get_size(optimization.input_graph) response_data["is_custom_dataloader"] = bool(workdir.template_path) workdir.update_data( request_id=request_id, model_path=optimization.input_graph, model_output_path=optimization.output_graph, metric=response_data, status="success", execution_details={"optimization": optimization.serialize()}, input_precision=optimization.input_precision, output_precision=optimization.output_precision, ) response_data["execution_details"] = {"optimization": optimization.serialize()} log.debug(f"Parsed data is {json.dumps(response_data)}") mq.post_success("optimization_finish", response_data) return response_data else: log.debug("FAIL") workdir.update_data( request_id=request_id, model_path=optimization.input_graph, input_precision=optimization.input_precision, output_precision=optimization.output_precision, status="error", ) mq.post_failure("optimization_finish", {"message": "failed", "id": request_id}) raise ClientErrorException("Optimization failed during execution.")
def execute_benchmark(data: Dict[str, Any]) -> None: """ Execute benchmark. Expected data: { "id": "configuration_id", "workspace_path": "/path/to/workspace", "models": [ { "precision": "fp32", "path": "/localdisk/fp32.pb" }, { "precision": "int8", "path": "/localdisk/int8.pb" } ] } """ from lpot.ux.utils.workload.workload import Workload request_id = str(data.get("id", "")) models = data.get("models", None) if not (request_id and models): message = "Missing request id or model list." mq.post_error( "benchmark_finish", { "message": message, "code": 404, "id": request_id }, ) raise ClientErrorException(message) workdir = Workdir(request_id=request_id, overwrite=False) try: workload_path = workdir.workload_path workload_data = _load_json_as_dict( os.path.join(workload_path, "workload.json"), ) except Exception as err: mq.post_error( "benchmark_finish", { "message": repr(err), "code": 404, "id": request_id }, ) raise ClientErrorException(repr(err)) workload = Workload(workload_data) response_data: Dict[str, Any] = {"id": request_id, "execution_details": {}} mq.post_success( "benchmark_start", { "message": "started", "id": request_id, }, ) for idx, model_info in enumerate(models, start=1): model_precision = model_info.get("precision", None) model_path = model_info.get("path", None) benchmark_mode = model_info.get("mode", "performance") if not (model_precision and model_path): message = "Missing model precision or model path." mq.post_error( "benchmark_finish", { "message": message, "code": 404, "id": request_id }, ) raise ClientErrorException(message) benchmark: Benchmark = Benchmark( workload=workload, model_path=model_path, datatype=model_precision, mode=benchmark_mode, ) log_name = f"{model_precision}_{benchmark_mode}_benchmark" executor = Executor( workload_path, subject="benchmark", data={"id": request_id}, send_response=False, log_name=log_name, additional_log_names=["output.txt"], ) proc = executor.call(benchmark.command, ) logs = [os.path.join(workload_path, f"{log_name}.txt")] if proc.is_ok: parser = BenchmarkParser(logs) metrics = parser.process() metric = {} execution_details: Dict[str, Any] = {} throughput_field = f"perf_throughput_{model_precision}" if isinstance(metrics, dict): metric = {throughput_field: metrics.get(throughput_field, "")} execution_details = { f"{model_precision}_benchmark": benchmark.serialize(), } response_data.update({"progress": f"{idx}/{len(models)}"}) response_data.update(metric) response_data["execution_details"].update(execution_details) workdir.update_metrics( request_id=request_id, metric_data=metric, ) workdir.update_execution_details( request_id=request_id, execution_details=execution_details, ) log.debug(f"Parsed data is {json.dumps(response_data)}") mq.post_success("benchmark_progress", response_data) else: log.error("Benchmark failed.") mq.post_failure("benchmark_finish", { "message": "failed", "id": request_id }) raise ClientErrorException("Benchmark failed during execution.") mq.post_success("benchmark_finish", response_data)