Exemplo n.º 1
0
 def _migrate_workload(self, migration_version: int) -> None:
     """Migrate workload one version up."""
     print(f"Migrate called with {migration_version} migration version.")
     migrate = self.version_migrators.get(migration_version, None)
     if migrate is None:
         raise InternalException(
             f"Could not parse workload from version {migration_version}")
     migrate()
Exemplo n.º 2
0
    def test_from_exception_for_internal_exception(self) -> None:
        """Test from_exception for InternalException."""
        message = "Domain code crashed!"

        response = ResponseGenerator.from_exception(InternalException(message))

        self.assertEqual(500, response.status_code)
        self.assertEqual(message, response.data.decode("utf-8"))
Exemplo n.º 3
0
 def _migrate_workloads_list(self, migration_version: int) -> None:
     """Migrate workloads list one version up."""
     migrate_workloads = self.version_migrators.get(migration_version, None)
     if migrate_workloads is None:
         raise InternalException(
             f"Could not migrate workloads list to version {migration_version}",
         )
     migrate_workloads()
Exemplo n.º 4
0
 def get_parser(benchmark_mode: str, logs: List[str]) -> Parser:
     """Get benchmark parser for specified mode."""
     parser_map = {
         Benchmarks.PERF: PerformanceParser,
         Benchmarks.ACC: AccuracyParser,
     }
     parser = parser_map.get(benchmark_mode, None)
     if parser is None:
         raise InternalException(
             f"Could not find optimization class for {benchmark_mode}")
     return parser(logs)
Exemplo n.º 5
0
 def get_optimization(workload: Workload,
                      template_path: Optional[str] = None) -> Optimization:
     """Get optimization for specified workload."""
     optimization_map = {
         Optimizations.TUNING: Tuning,
         Optimizations.GRAPH: GraphOptimization,
     }
     optimization = optimization_map.get(workload.mode, None)
     if optimization is None:
         raise InternalException(
             f"Could not find optimization class for {workload.mode}")
     log.debug(f"Initializing {optimization.__name__} class.")
     return optimization(workload, template_path)
Exemplo n.º 6
0
    def delete_workload(self, workload_id: str) -> None:
        """Delete workload by ID."""
        if workload_id in self.workloads_data.get("workloads", {}):
            try:
                workload = self.workloads_data.get("workloads", {}).get(workload_id, {})
                workload_path = workload.get("workload_path", None)
                del self.workloads_data.get("workloads", {})[workload_id]
                self.dump()
                if workload_path:
                    shutil.rmtree(workload_path)
            except Exception as e:
                raise InternalException(
                    f"Error while removing workload {workload_id}:\n{str(e)}",
                )

        else:
            raise NotFoundException(f"Can't find workload ID {workload_id}.")
Exemplo n.º 7
0
 def command(self, value: List[str]) -> None:
     """Set optimization command."""
     if not isinstance(value, list):
         raise InternalException("Command should be a list.")
     self._command = value
Exemplo n.º 8
0
def benchmark_model(
    response_data: dict,
    workload: Workload,
    workdir: Workdir,
    model: str,
    model_path: str,
    model_precision: str,
    benchmark_mode: str,
    benchmark_count: int,
    benchmark_total: int,
) -> dict:
    """Benchmark model and prepare response data."""
    request_id = response_data.get("id")

    benchmark: Benchmark = Benchmark(
        workload=workload,
        model_path=model_path,
        precision=model_precision,
        mode=benchmark_mode,
    )

    log_name = f"{model}_{benchmark_mode}_benchmark"

    executor = Executor(
        workload.workload_path,
        subject="benchmark",
        data={"id": request_id},
        send_response=False,
        log_name=log_name,
        additional_log_names=["output.txt"],
    )

    proc = executor.call(
        benchmark.command,
    )

    logs = [os.path.join(workload.workload_path, f"{log_name}.txt")]

    if not proc.is_ok:
        raise ClientErrorException("Benchmark failed during execution.")

    parser = BenchmarkParserFactory.get_parser(benchmark_mode, logs)
    metrics = parser.process()
    metric = {}
    execution_details: Dict[str, Any] = {}

    if benchmark_mode == Benchmarks.PERF:
        result_field = f"perf_throughput_{model}"
    elif benchmark_mode == Benchmarks.ACC:
        result_field = f"acc_{model}"
    else:
        raise InternalException(f"Benchmark mode {benchmark_mode} is not supported.")

    if isinstance(metrics, dict):
        metric = {result_field: metrics.get(result_field, "")}
        execution_details = response_data.get("execution_details", {})
        model_benchmark_details = execution_details.get(f"{model}_benchmark", {})
        model_benchmark_details.update(
            {
                benchmark_mode: benchmark.serialize(),
            },
        )

        response_data.update({"progress": f"{benchmark_count}/{benchmark_total}"})
        response_data.update(metric)
        response_data["execution_details"].update(
            {f"{model}_benchmark": model_benchmark_details},
        )
    workdir.update_metrics(
        request_id=request_id,
        metric_data=metric,
    )
    workdir.update_execution_details(
        request_id=request_id,
        execution_details=execution_details,
    )
    log.debug(f"Parsed data is {json.dumps(response_data)}")
    mq.post_success("benchmark_progress", response_data)

    return response_data