コード例 #1
0
    def test_parsing_simple_file(self, mocked_open: MagicMock) -> None:
        """Test parsing of file."""
        mocked_open.return_value.__enter__.return_value = [
            "Foo bar baz",
            "performance mode benchmark result:",
            "2021-04-14 05:16:09 [INFO] Accuracy is 0.1234567",
            "2021-04-14 05:16:09 [INFO] Batch size = 1234",
            "2021-04-14 05:16:09 [INFO] Latency: 2.34567 ms",
            "2021-04-14 05:16:09 [INFO] Throughput: 123.45678 images/sec",
            "2021-04-14 05:16:10 [INFO] performance mode benchmark done!",
            "2021-04-14 05:16:10 [INFO]",
            "performance mode benchmark result:",
            "a b c d",
        ]

        benchmark_parser = BenchmarkParserFactory.get_parser(
            benchmark_mode="performance",
            logs=["file.log"],
        )
        parsed = benchmark_parser.process()

        self.assertEqual(
            {
                "perf_throughput_input_model": 123.4568,
                "perf_throughput_optimized_model": 123.4568,
                "perf_latency_input_model": 2.3457,
                "perf_latency_optimized_model": 2.3457,
            },
            parsed,
        )
コード例 #2
0
    def test_parsing_empty_file_list(self) -> None:
        """Test parsing of none files."""
        benchmark_parser = BenchmarkParserFactory.get_parser(
            benchmark_mode="performance",
            logs=[],
        )
        parsed = benchmark_parser.process()

        self.assertEqual({}, parsed)
コード例 #3
0
    def test_parsing_empty_files(self, mocked_open: MagicMock) -> None:
        """Test parsing of files without any lines."""
        mocked_open.return_value.__enter__.return_value = []

        benchmark_parser = BenchmarkParserFactory.get_parser(
            benchmark_mode="performance",
            logs=["file.log"],
        )
        parsed = benchmark_parser.process()

        self.assertEqual({}, parsed)
コード例 #4
0
ファイル: execute_benchmark.py プロジェクト: intel/lpot
def benchmark_model(
    response_data: dict,
    workload: Workload,
    workdir: Workdir,
    model: str,
    model_path: str,
    model_precision: str,
    benchmark_mode: str,
    benchmark_count: int,
    benchmark_total: int,
) -> dict:
    """Benchmark model and prepare response data."""
    request_id = response_data.get("id")

    benchmark: Benchmark = Benchmark(
        workload=workload,
        model_path=model_path,
        precision=model_precision,
        mode=benchmark_mode,
    )

    log_name = f"{model}_{benchmark_mode}_benchmark"

    executor = Executor(
        workload.workload_path,
        subject="benchmark",
        data={"id": request_id},
        send_response=False,
        log_name=log_name,
        additional_log_names=["output.txt"],
    )

    proc = executor.call(
        benchmark.command,
    )

    logs = [os.path.join(workload.workload_path, f"{log_name}.txt")]

    if not proc.is_ok:
        raise ClientErrorException("Benchmark failed during execution.")

    parser = BenchmarkParserFactory.get_parser(benchmark_mode, logs)
    metrics = parser.process()
    metric = {}
    execution_details: Dict[str, Any] = {}

    if benchmark_mode == Benchmarks.PERF:
        result_field = f"perf_throughput_{model}"
    elif benchmark_mode == Benchmarks.ACC:
        result_field = f"acc_{model}"
    else:
        raise InternalException(f"Benchmark mode {benchmark_mode} is not supported.")

    if isinstance(metrics, dict):
        metric = {result_field: metrics.get(result_field, "")}
        execution_details = response_data.get("execution_details", {})
        model_benchmark_details = execution_details.get(f"{model}_benchmark", {})
        model_benchmark_details.update(
            {
                benchmark_mode: benchmark.serialize(),
            },
        )

        response_data.update({"progress": f"{benchmark_count}/{benchmark_total}"})
        response_data.update(metric)
        response_data["execution_details"].update(
            {f"{model}_benchmark": model_benchmark_details},
        )
    workdir.update_metrics(
        request_id=request_id,
        metric_data=metric,
    )
    workdir.update_execution_details(
        request_id=request_id,
        execution_details=execution_details,
    )
    log.debug(f"Parsed data is {json.dumps(response_data)}")
    mq.post_success("benchmark_progress", response_data)

    return response_data