コード例 #1
0
ファイル: execute_benchmark.py プロジェクト: intel/lpot
def benchmark_model(
    response_data: dict,
    workload: Workload,
    workdir: Workdir,
    model: str,
    model_path: str,
    model_precision: str,
    benchmark_mode: str,
    benchmark_count: int,
    benchmark_total: int,
) -> dict:
    """Benchmark model and prepare response data."""
    request_id = response_data.get("id")

    benchmark: Benchmark = Benchmark(
        workload=workload,
        model_path=model_path,
        precision=model_precision,
        mode=benchmark_mode,
    )

    log_name = f"{model}_{benchmark_mode}_benchmark"

    executor = Executor(
        workload.workload_path,
        subject="benchmark",
        data={"id": request_id},
        send_response=False,
        log_name=log_name,
        additional_log_names=["output.txt"],
    )

    proc = executor.call(
        benchmark.command,
    )

    logs = [os.path.join(workload.workload_path, f"{log_name}.txt")]

    if not proc.is_ok:
        raise ClientErrorException("Benchmark failed during execution.")

    parser = BenchmarkParserFactory.get_parser(benchmark_mode, logs)
    metrics = parser.process()
    metric = {}
    execution_details: Dict[str, Any] = {}

    if benchmark_mode == Benchmarks.PERF:
        result_field = f"perf_throughput_{model}"
    elif benchmark_mode == Benchmarks.ACC:
        result_field = f"acc_{model}"
    else:
        raise InternalException(f"Benchmark mode {benchmark_mode} is not supported.")

    if isinstance(metrics, dict):
        metric = {result_field: metrics.get(result_field, "")}
        execution_details = response_data.get("execution_details", {})
        model_benchmark_details = execution_details.get(f"{model}_benchmark", {})
        model_benchmark_details.update(
            {
                benchmark_mode: benchmark.serialize(),
            },
        )

        response_data.update({"progress": f"{benchmark_count}/{benchmark_total}"})
        response_data.update(metric)
        response_data["execution_details"].update(
            {f"{model}_benchmark": model_benchmark_details},
        )
    workdir.update_metrics(
        request_id=request_id,
        metric_data=metric,
    )
    workdir.update_execution_details(
        request_id=request_id,
        execution_details=execution_details,
    )
    log.debug(f"Parsed data is {json.dumps(response_data)}")
    mq.post_success("benchmark_progress", response_data)

    return response_data
コード例 #2
0
def execute_benchmark(data: Dict[str, Any]) -> None:
    """
    Execute benchmark.

    Expected data:
    {
        "id": "configuration_id",
        "workspace_path": "/path/to/workspace",
        "models": [
            {
                "precision": "fp32",
                "path": "/localdisk/fp32.pb"
            },
            {
                "precision": "int8",
                "path": "/localdisk/int8.pb"
            }
        ]
    }
    """
    from lpot.ux.utils.workload.workload import Workload

    request_id = str(data.get("id", ""))
    models = data.get("models", None)

    if not (request_id and models):
        message = "Missing request id or model list."
        mq.post_error(
            "benchmark_finish",
            {"message": message, "code": 404, "id": request_id},
        )
        raise ClientErrorException(message)

    workdir = Workdir(request_id=request_id, overwrite=False)
    try:
        workload_path = workdir.workload_path
        workload_data = load_json(
            os.path.join(workload_path, "workload.json"),
        )
    except Exception as err:
        mq.post_error(
            "benchmark_finish",
            {"message": repr(err), "code": 404, "id": request_id},
        )
        raise ClientErrorException(repr(err))

    workload = Workload(workload_data)

    response_data: Dict[str, Any] = {"id": request_id, "execution_details": {}}

    mq.post_success(
        "benchmark_start",
        {
            "message": "started",
            "id": request_id,
        },
    )

    for idx, model_info in enumerate(models, start=1):
        model_precision = model_info.get("precision", None)
        model_path = model_info.get("path", None)
        benchmark_mode = model_info.get("mode", "performance")
        if not (model_precision and model_path):
            message = "Missing model precision or model path."
            mq.post_error(
                "benchmark_finish",
                {"message": message, "code": 404, "id": request_id},
            )
            raise ClientErrorException(message)

        benchmark: Benchmark = Benchmark(
            workload=workload,
            model_path=model_path,
            datatype=model_precision,
            mode=benchmark_mode,
        )

        log_name = f"{model_precision}_{benchmark_mode}_benchmark"

        executor = Executor(
            workload_path,
            subject="benchmark",
            data={"id": request_id},
            send_response=False,
            log_name=log_name,
            additional_log_names=["output.txt"],
        )

        proc = executor.call(
            benchmark.command,
        )

        logs = [os.path.join(workload_path, f"{log_name}.txt")]

        if proc.is_ok:
            parser = Parser(logs)
            metrics = parser.process()
            metric = {}
            execution_details: Dict[str, Any] = {}
            throughput_field = f"perf_throughput_{model_precision}"
            if isinstance(metrics, dict):
                metric = {throughput_field: metrics.get(throughput_field, "")}
                execution_details = {
                    f"{model_precision}_benchmark": benchmark.serialize(),
                }
                response_data.update({"progress": f"{idx}/{len(models)}"})
                response_data.update(metric)
                response_data["execution_details"].update(execution_details)
            workdir.update_metrics(
                request_id=request_id,
                metric_data=metric,
            )
            workdir.update_execution_details(
                request_id=request_id,
                execution_details=execution_details,
            )
            log.debug(f"Parsed data is {json.dumps(response_data)}")
            mq.post_success("benchmark_progress", response_data)
        else:
            log.error("Benchmark failed.")
            mq.post_failure("benchmark_finish", {"message": "failed", "id": request_id})
            raise ClientErrorException("Benchmark failed during execution.")

    mq.post_success("benchmark_finish", response_data)