示例#1
0
class TestExecutor(unittest.TestCase):
    """Executor tests."""
    def __init__(self, *args: str, **kwargs: str) -> None:
        """Executor tests constructor."""
        super().__init__(*args, **kwargs)
        self.executor = Executor(
            workspace_path="tmp_workdir",
            subject="test",
            data={
                "id": "abc",
                "some_key": "some_value"
            },
            send_response=False,
            log_name="my_log",
            additional_log_names=["additional_log1", "additional_log2"],
        )

    @classmethod
    def tearDownClass(cls) -> None:
        """Tear down environment for test."""
        shutil.rmtree("tmp_workdir", ignore_errors=True)

    def test_workdir_property(self) -> None:
        """Test if workdir property returns correct path."""
        self.assertEqual(self.executor.workdir, "tmp_workdir")

    def test_request_id_property(self) -> None:
        """Test if request_id property returns correct value."""
        self.assertEqual(self.executor.request_id, "abc")

    def test_log_name_property(self) -> None:
        """Test if log_name property returns correct value."""
        self.assertEqual(self.executor.log_name, "my_log")

    def test_additional_log_names_property(self) -> None:
        """Test if additional_log_names property returns correct value."""
        self.assertIs(type(self.executor.additional_log_names), list)
        self.assertEqual(
            self.executor.additional_log_names,
            ["additional_log1", "additional_log2"],
        )

    def test_is_not_multi_commands(self) -> None:
        """Test if execution type is recognized correctly."""
        result = self.executor.is_multi_commands(["echo", "Hello world!"])
        self.assertFalse(result)

    def test_is_multi_commands(self) -> None:
        """Test if multi command execution is recognized correctly."""
        result = self.executor.is_multi_commands([
            ["echo", "Hello"],
            ["echo", "world!"],
        ], )
        self.assertTrue(result)

    def test_process_call(self) -> None:
        """Test if multi command execution is recognized correctly."""
        print_phrase = "Hello world!"
        proc = self.executor.call(["echo", print_phrase])
        self.assertTrue(proc.is_ok)

        logs = self.executor.additional_log_names
        if self.executor.log_name is not None:
            logs.append(f"{self.executor.log_name}.txt")
        for log in logs:
            with open(os.path.join(self.executor.workdir, log),
                      "r") as log_file:
                self.assertEqual(log_file.readline().rstrip("\n"),
                                 print_phrase)
示例#2
0
def benchmark_model(
    response_data: dict,
    workload: Workload,
    workdir: Workdir,
    model: str,
    model_path: str,
    model_precision: str,
    benchmark_mode: str,
    benchmark_count: int,
    benchmark_total: int,
) -> dict:
    """Benchmark model and prepare response data."""
    request_id = response_data.get("id")

    benchmark: Benchmark = Benchmark(
        workload=workload,
        model_path=model_path,
        precision=model_precision,
        mode=benchmark_mode,
    )

    log_name = f"{model}_{benchmark_mode}_benchmark"

    executor = Executor(
        workload.workload_path,
        subject="benchmark",
        data={"id": request_id},
        send_response=False,
        log_name=log_name,
        additional_log_names=["output.txt"],
    )

    proc = executor.call(
        benchmark.command,
    )

    logs = [os.path.join(workload.workload_path, f"{log_name}.txt")]

    if not proc.is_ok:
        raise ClientErrorException("Benchmark failed during execution.")

    parser = BenchmarkParserFactory.get_parser(benchmark_mode, logs)
    metrics = parser.process()
    metric = {}
    execution_details: Dict[str, Any] = {}

    if benchmark_mode == Benchmarks.PERF:
        result_field = f"perf_throughput_{model}"
    elif benchmark_mode == Benchmarks.ACC:
        result_field = f"acc_{model}"
    else:
        raise InternalException(f"Benchmark mode {benchmark_mode} is not supported.")

    if isinstance(metrics, dict):
        metric = {result_field: metrics.get(result_field, "")}
        execution_details = response_data.get("execution_details", {})
        model_benchmark_details = execution_details.get(f"{model}_benchmark", {})
        model_benchmark_details.update(
            {
                benchmark_mode: benchmark.serialize(),
            },
        )

        response_data.update({"progress": f"{benchmark_count}/{benchmark_total}"})
        response_data.update(metric)
        response_data["execution_details"].update(
            {f"{model}_benchmark": model_benchmark_details},
        )
    workdir.update_metrics(
        request_id=request_id,
        metric_data=metric,
    )
    workdir.update_execution_details(
        request_id=request_id,
        execution_details=execution_details,
    )
    log.debug(f"Parsed data is {json.dumps(response_data)}")
    mq.post_success("benchmark_progress", response_data)

    return response_data
示例#3
0
def execute_optimization(data: Dict[str, Any]) -> dict:
    """Get configuration."""
    from lpot.ux.utils.workload.workload import Workload

    if not str(data.get("id", "")):
        message = "Missing request id."
        mq.post_error(
            "optimization_finish",
            {"message": message, "code": 404},
        )
        raise Exception(message)

    request_id: str = data["id"]
    workdir = Workdir(request_id=request_id, overwrite=False)
    workload_path: str = workdir.workload_path
    try:
        workload_data = _load_json_as_dict(
            os.path.join(workload_path, "workload.json"),
        )
    except Exception as err:
        mq.post_error(
            "optimization_finish",
            {"message": repr(err), "code": 404, "id": request_id},
        )
        raise err
    workload = Workload(workload_data)
    optimization: Optimization = OptimizationFactory.get_optimization(
        workload,
        workdir.template_path,
    )
    send_data = {
        "message": "started",
        "id": request_id,
        "size_input_model": get_size(optimization.input_graph),
    }
    workdir.clean_logs()
    workdir.update_data(
        request_id=request_id,
        model_path=optimization.input_graph,
        input_precision=optimization.input_precision,
        model_output_path=optimization.output_graph,
        output_precision=optimization.output_precision,
        status="wip",
    )

    executor = Executor(
        workspace_path=workload_path,
        subject="optimization",
        data=send_data,
        log_name="output",
    )

    proc = executor.call(
        optimization.command,
    )
    optimization_time = executor.process_duration
    if optimization_time:
        optimization_time = round(optimization_time, 2)
    log.debug(f"Elapsed time: {optimization_time}")
    logs = [os.path.join(workload_path, "output.txt")]
    parser = OptimizationParser(logs)
    if proc.is_ok:
        response_data = parser.process()

        if isinstance(response_data, dict):
            response_data["id"] = request_id
            response_data["optimization_time"] = optimization_time
            response_data["size_optimized_model"] = get_size(optimization.output_graph)
            response_data["model_output_path"] = optimization.output_graph
            response_data["size_input_model"] = get_size(optimization.input_graph)
            response_data["is_custom_dataloader"] = bool(workdir.template_path)

            workdir.update_data(
                request_id=request_id,
                model_path=optimization.input_graph,
                model_output_path=optimization.output_graph,
                metric=response_data,
                status="success",
                execution_details={"optimization": optimization.serialize()},
                input_precision=optimization.input_precision,
                output_precision=optimization.output_precision,
            )
            response_data["execution_details"] = {"optimization": optimization.serialize()}

        log.debug(f"Parsed data is {json.dumps(response_data)}")
        mq.post_success("optimization_finish", response_data)
        return response_data
    else:
        log.debug("FAIL")
        workdir.update_data(
            request_id=request_id,
            model_path=optimization.input_graph,
            input_precision=optimization.input_precision,
            output_precision=optimization.output_precision,
            status="error",
        )
        mq.post_failure("optimization_finish", {"message": "failed", "id": request_id})
        raise ClientErrorException("Optimization failed during execution.")
示例#4
0
def execute_tuning(data: Dict[str, Any]) -> dict:
    """Get configuration."""
    from lpot.ux.utils.workload.workload import Workload

    if not str(data.get("id", "")):
        message = "Missing request id."
        mq.post_error(
            "tuning_finish",
            {
                "message": message,
                "code": 404
            },
        )
        raise Exception(message)

    request_id: str = data["id"]
    workdir = Workdir(request_id=request_id)
    workload_path: str = workdir.workload_path
    try:
        workload_data = load_json(os.path.join(workload_path,
                                               "workload.json"), )
    except Exception as err:
        mq.post_error(
            "tuning_finish",
            {
                "message": repr(err),
                "code": 404,
                "id": request_id
            },
        )
        raise err
    workload = Workload(workload_data)
    tuning: Tuning = Tuning(workload, workdir.workload_path,
                            workdir.template_path)
    send_data = {
        "message": "started",
        "id": request_id,
        "size_fp32": get_size(tuning.model_path),
    }
    workdir.clean_logs()
    workdir.update_data(
        request_id=request_id,
        model_path=tuning.model_path,
        model_output_path=tuning.model_output_path,
        status="wip",
    )

    executor = Executor(
        workspace_path=workload_path,
        subject="tuning",
        data=send_data,
        log_name="output",
    )

    proc = executor.call(tuning.command, )
    tuning_time = executor.process_duration
    if tuning_time:
        tuning_time = round(tuning_time, 2)
    log.debug(f"Elapsed time: {tuning_time}")
    logs = [os.path.join(workload_path, "output.txt")]
    parser = Parser(logs)
    if proc.is_ok:
        response_data = parser.process()

        if isinstance(response_data, dict):
            response_data["id"] = request_id
            response_data["tuning_time"] = tuning_time
            response_data["size_int8"] = get_size(tuning.model_output_path)
            response_data["model_output_path"] = tuning.model_output_path
            response_data["size_fp32"] = get_size(tuning.model_path)
            response_data["is_custom_dataloader"] = bool(workdir.template_path)

            workdir.update_data(
                request_id=request_id,
                model_path=tuning.model_path,
                model_output_path=tuning.model_output_path,
                metric=response_data,
                status="success",
                execution_details={"tuning": tuning.serialize()},
            )
            response_data["execution_details"] = {"tuning": tuning.serialize()}

        log.debug(f"Parsed data is {json.dumps(response_data)}")
        mq.post_success("tuning_finish", response_data)
        return response_data
    else:
        log.debug("FAIL")
        workdir.update_data(
            request_id=request_id,
            model_path=tuning.model_path,
            status="error",
        )
        mq.post_failure("tuning_finish", {
            "message": "failed",
            "id": request_id
        })
        raise ClientErrorException("Tuning failed during execution.")
示例#5
0
def execute_benchmark(data: Dict[str, Any]) -> None:
    """
    Execute benchmark.

    Expected data:
    {
        "id": "configuration_id",
        "workspace_path": "/path/to/workspace",
        "models": [
            {
                "precision": "fp32",
                "path": "/localdisk/fp32.pb"
            },
            {
                "precision": "int8",
                "path": "/localdisk/int8.pb"
            }
        ]
    }
    """
    from lpot.ux.utils.workload.workload import Workload

    request_id = str(data.get("id", ""))
    models = data.get("models", None)

    if not (request_id and models):
        message = "Missing request id or model list."
        mq.post_error(
            "benchmark_finish",
            {"message": message, "code": 404, "id": request_id},
        )
        raise ClientErrorException(message)

    workdir = Workdir(request_id=request_id, overwrite=False)
    try:
        workload_path = workdir.workload_path
        workload_data = load_json(
            os.path.join(workload_path, "workload.json"),
        )
    except Exception as err:
        mq.post_error(
            "benchmark_finish",
            {"message": repr(err), "code": 404, "id": request_id},
        )
        raise ClientErrorException(repr(err))

    workload = Workload(workload_data)

    response_data: Dict[str, Any] = {"id": request_id, "execution_details": {}}

    mq.post_success(
        "benchmark_start",
        {
            "message": "started",
            "id": request_id,
        },
    )

    for idx, model_info in enumerate(models, start=1):
        model_precision = model_info.get("precision", None)
        model_path = model_info.get("path", None)
        benchmark_mode = model_info.get("mode", "performance")
        if not (model_precision and model_path):
            message = "Missing model precision or model path."
            mq.post_error(
                "benchmark_finish",
                {"message": message, "code": 404, "id": request_id},
            )
            raise ClientErrorException(message)

        benchmark: Benchmark = Benchmark(
            workload=workload,
            model_path=model_path,
            datatype=model_precision,
            mode=benchmark_mode,
        )

        log_name = f"{model_precision}_{benchmark_mode}_benchmark"

        executor = Executor(
            workload_path,
            subject="benchmark",
            data={"id": request_id},
            send_response=False,
            log_name=log_name,
            additional_log_names=["output.txt"],
        )

        proc = executor.call(
            benchmark.command,
        )

        logs = [os.path.join(workload_path, f"{log_name}.txt")]

        if proc.is_ok:
            parser = Parser(logs)
            metrics = parser.process()
            metric = {}
            execution_details: Dict[str, Any] = {}
            throughput_field = f"perf_throughput_{model_precision}"
            if isinstance(metrics, dict):
                metric = {throughput_field: metrics.get(throughput_field, "")}
                execution_details = {
                    f"{model_precision}_benchmark": benchmark.serialize(),
                }
                response_data.update({"progress": f"{idx}/{len(models)}"})
                response_data.update(metric)
                response_data["execution_details"].update(execution_details)
            workdir.update_metrics(
                request_id=request_id,
                metric_data=metric,
            )
            workdir.update_execution_details(
                request_id=request_id,
                execution_details=execution_details,
            )
            log.debug(f"Parsed data is {json.dumps(response_data)}")
            mq.post_success("benchmark_progress", response_data)
        else:
            log.error("Benchmark failed.")
            mq.post_failure("benchmark_finish", {"message": "failed", "id": request_id})
            raise ClientErrorException("Benchmark failed during execution.")

    mq.post_success("benchmark_finish", response_data)