Exemplo n.º 1
0
def get_workloads_list(data: dict) -> Dict[str, Any]:
    """Return workloads list."""
    workspace_path = os.environ["HOME"]
    if data.get("workspace_path"):
        workspace_path = os.environ["HOME"]
    workdir = Workdir(workspace_path=workspace_path)

    return workdir.map_to_response()
Exemplo n.º 2
0
def get_default_path(data: Dict[str, Any]) -> Dict[str, Any]:
    """Get paths repository or workspace."""
    workdir = Workdir()
    path = os.environ["HOME"]
    if os.path.isfile(workdir.workloads_json):
        path = workdir.get_active_workspace()
    else:
        workdir.set_active_workspace(path)

    return {"path": path}
Exemplo n.º 3
0
def set_workspace(data: Dict[str, Any]) -> Dict[str, Any]:
    """Set workspace."""
    workspace_path = data.get("path", None)

    if not workspace_path:
        raise ClientErrorException("Parameter 'path' is missing in request.")

    os.makedirs(workspace_path, exist_ok=True)
    workdir = Workdir()
    workdir.set_active_workspace(workspace_path)

    return {"message": "SUCCESS"}
Exemplo n.º 4
0
 def test_delete_workload_failure(self) -> None:
     """Test workload deletion - error due to exception."""
     with patch("os.path.isfile") as fake_isfile, patch(
             "shutil.rmtree", ) as fake_rmtree, patch.object(
                 Workdir, "load", autospec=True) as fake_load, patch.object(
                     Workdir,
                     "dump",
                 ) as fake_dump, self.assertRaises(InternalException, ):
         fake_isfile.return_value = True
         fake_rmtree.side_effect = _fake_raise_exception_mock
         fake_load.side_effect = _fake_load_mock
         fake_dump.return_value = None
         workdir = Workdir(workspace_path="/tmp/lpot")
         workdir.delete_workload("workload_1")
Exemplo n.º 5
0
    def get_code_template(data: dict) -> Response:
        """Get code template file for requested Workload."""
        workload_id = RequestDataProcessor.get_string_value(data, "workload_id")
        workdir = Workdir()
        workload_data = workdir.get_workload_data(workload_id)
        code_template_path = workload_data.get("code_template_path")

        if not code_template_path:
            raise NotFoundException(f"Unable to find code template file for {workload_id}")

        return ResponseGenerator.serve_from_filesystem(
            path=code_template_path,
            mimetype="text/x-python",
        )
Exemplo n.º 6
0
 def test_delete_workload_success(self) -> None:
     """Test workload deletion - success."""
     with patch("os.path.isfile") as fake_isfile, patch(
             "shutil.rmtree", ) as fake_rmtree, patch.object(
                 Workdir, "load", autospec=True) as fake_load, patch.object(
                     Workdir,
                     "dump",
                 ) as fake_dump:
         fake_isfile.return_value = True
         fake_rmtree.return_value = None
         fake_load.side_effect = _fake_load_mock
         fake_dump.return_value = None
         workdir = Workdir(workspace_path="/tmp/lpot")
         workdir.delete_workload("workload_1")
         self.assertNotIn("workload_1",
                          workdir.workloads_data.get("workloads", {}))
         self.assertIn("workload_2",
                       workdir.workloads_data.get("workloads", {}))
Exemplo n.º 7
0
def generate_template(workload: Workload, workdir: Workdir, type: str) -> None:
    """Generate code templates."""
    correct_paths = {
        "config_path": workload.config_path,
        "model_path": workload.model_path,
        "model_output_path": workload.model_output_path,
    }

    generated_template_path = os.path.join(workload.workload_path, "code_template.py")
    path_to_templates = os.path.join(
        os.path.dirname(__file__),
        "..",
        "..",
        "utils",
        "templates",
        f"{type}_template.txt",
    )
    copy(path_to_templates, generated_template_path)
    replace_with_values(correct_paths, generated_template_path)
    workdir.set_code_template_path(generated_template_path)
Exemplo n.º 8
0
    def get_output(data: dict) -> Response:
        """Get config file for requested Workload."""
        workload_id = RequestDataProcessor.get_string_value(data, "workload_id")
        workdir = Workdir()
        workload_data = workdir.get_workload_data(workload_id)
        log_path = workload_data.get("log_path")

        if not log_path:
            raise NotFoundException(f"Unable to find output log for {workload_id}")

        try:
            response = ResponseGenerator.serve_from_filesystem(
                path=log_path,
                mimetype="text/plain",
            )
        except Exception as e:
            response = ResponseGenerator.from_exception(e)

        return ResponseGenerator.add_refresh(
            response=response,
            refresh_time=3,
        )
Exemplo n.º 9
0
def save_workload(
    data: Dict[str, Any],
) -> Union[Dict[str, Any], List[Dict[str, Any]]]:
    """Get configuration."""
    parser = ConfigurationParser()
    parsed_data = parser.parse(data)

    workload = Workload(parsed_data)
    workload.dump()

    workdir = Workdir(
        workspace_path=data["workspace_path"],
        request_id=data["id"],
        model_path=data["model_path"],
    )

    update_config(workload, parsed_data, workdir)
    workload.config.dump(os.path.join(workdir.workload_path, workload.config_name))
    return workload.serialize()
Exemplo n.º 10
0
def execute_tuning(data: Dict[str, Any]) -> dict:
    """Get configuration."""
    from lpot.ux.utils.workload.workload import Workload

    if not str(data.get("id", "")):
        message = "Missing request id."
        mq.post_error(
            "tuning_finish",
            {
                "message": message,
                "code": 404
            },
        )
        raise Exception(message)

    request_id: str = data["id"]
    workdir = Workdir(request_id=request_id)
    workload_path: str = workdir.workload_path
    try:
        workload_data = load_json(os.path.join(workload_path,
                                               "workload.json"), )
    except Exception as err:
        mq.post_error(
            "tuning_finish",
            {
                "message": repr(err),
                "code": 404,
                "id": request_id
            },
        )
        raise err
    workload = Workload(workload_data)
    tuning: Tuning = Tuning(workload, workdir.workload_path,
                            workdir.template_path)
    send_data = {
        "message": "started",
        "id": request_id,
        "size_fp32": get_size(tuning.model_path),
    }
    workdir.clean_logs()
    workdir.update_data(
        request_id=request_id,
        model_path=tuning.model_path,
        model_output_path=tuning.model_output_path,
        status="wip",
    )

    executor = Executor(
        workspace_path=workload_path,
        subject="tuning",
        data=send_data,
        log_name="output",
    )

    proc = executor.call(tuning.command, )
    tuning_time = executor.process_duration
    if tuning_time:
        tuning_time = round(tuning_time, 2)
    log.debug(f"Elapsed time: {tuning_time}")
    logs = [os.path.join(workload_path, "output.txt")]
    parser = Parser(logs)
    if proc.is_ok:
        response_data = parser.process()

        if isinstance(response_data, dict):
            response_data["id"] = request_id
            response_data["tuning_time"] = tuning_time
            response_data["size_int8"] = get_size(tuning.model_output_path)
            response_data["model_output_path"] = tuning.model_output_path
            response_data["size_fp32"] = get_size(tuning.model_path)
            response_data["is_custom_dataloader"] = bool(workdir.template_path)

            workdir.update_data(
                request_id=request_id,
                model_path=tuning.model_path,
                model_output_path=tuning.model_output_path,
                metric=response_data,
                status="success",
                execution_details={"tuning": tuning.serialize()},
            )
            response_data["execution_details"] = {"tuning": tuning.serialize()}

        log.debug(f"Parsed data is {json.dumps(response_data)}")
        mq.post_success("tuning_finish", response_data)
        return response_data
    else:
        log.debug("FAIL")
        workdir.update_data(
            request_id=request_id,
            model_path=tuning.model_path,
            status="error",
        )
        mq.post_failure("tuning_finish", {
            "message": "failed",
            "id": request_id
        })
        raise ClientErrorException("Tuning failed during execution.")
Exemplo n.º 11
0
def execute_benchmark(data: Dict[str, Any]) -> None:
    """
    Execute benchmark.

    Expected data:
    {
        "id": "configuration_id",
        "workspace_path": "/path/to/workspace",
        "input_model": {
            "precision": "fp32",
            "path": "/localdisk/fp32.pb"
        },
        "optimized_model": {
            "precision": "int8",
            "path": "/localdisk/int8.pb"
        }
    }
    """
    from lpot.ux.utils.workload.workload import Workload

    request_id = str(data.get("id", ""))
    input_model = data.get("input_model", None)
    input_model.update({"model_type": "input_model"})

    optimized_model = data.get("optimized_model", None)
    optimized_model.update({"model_type": "optimized_model"})

    if not (request_id and input_model and optimized_model):
        message = "Missing request id, input or optimized model data."
        mq.post_error(
            "benchmark_finish",
            {"message": message, "code": 404, "id": request_id},
        )
        raise ClientErrorException(message)

    workdir = Workdir(request_id=request_id, overwrite=False)
    try:
        workload_path = workdir.workload_path
        workload_data = _load_json_as_dict(
            os.path.join(workload_path, "workload.json"),
        )
    except Exception as err:
        mq.post_error(
            "benchmark_finish",
            {"message": repr(err), "code": 404, "id": request_id},
        )
        raise ClientErrorException(repr(err))

    workload = Workload(workload_data)

    response_data: Dict[str, Any] = {"id": request_id, "execution_details": {}}

    mq.post_success(
        "benchmark_start",
        {
            "message": "started",
            "id": request_id,
        },
    )

    models = [input_model, optimized_model]

    benchmark_count = 0
    benchmark_total = 0

    for model_info in models:
        benchmark_modes: List[str] = model_info.get("mode", [Benchmarks.PERF])
        if (
            not workload.tune and Benchmarks.ACC not in benchmark_modes
        ):  # Accuracy information is provided only in tuning
            benchmark_modes.append(Benchmarks.ACC)
        model_info.update({"benchmark_modes": benchmark_modes})
        benchmark_total += len(benchmark_modes)

    for model_info in models:
        model_precision = model_info.get("precision", None)
        model_type = model_info.get("model_type", None)
        model_path = model_info.get("path", None)
        benchmark_modes = model_info.get("benchmark_modes", None)

        if not (model_precision and model_path and model_type and benchmark_modes):
            message = "Missing model precision, model path or model type."
            mq.post_error(
                "benchmark_finish",
                {"message": message, "code": 404, "id": request_id},
            )
            raise ClientErrorException(message)

        for benchmark_mode in benchmark_modes:
            benchmark_count += 1
            response_data = benchmark_model_and_respond_to_ui(
                response_data=response_data,
                workload=workload,
                workdir=workdir,
                model=model_type,
                model_path=model_path,
                model_precision=model_precision,
                benchmark_mode=benchmark_mode,
                benchmark_count=benchmark_count,
                benchmark_total=benchmark_total,
            )
Exemplo n.º 12
0
def execute_benchmark(data: Dict[str, Any]) -> None:
    """
    Execute benchmark.

    Expected data:
    {
        "id": "configuration_id",
        "workspace_path": "/path/to/workspace",
        "models": [
            {
                "precision": "fp32",
                "path": "/localdisk/fp32.pb"
            },
            {
                "precision": "int8",
                "path": "/localdisk/int8.pb"
            }
        ]
    }
    """
    from lpot.ux.utils.workload.workload import Workload

    request_id = str(data.get("id", ""))
    models = data.get("models", None)

    if not (request_id and models):
        message = "Missing request id or model list."
        mq.post_error(
            "benchmark_finish",
            {"message": message, "code": 404, "id": request_id},
        )
        raise ClientErrorException(message)

    workdir = Workdir(request_id=request_id, overwrite=False)
    try:
        workload_path = workdir.workload_path
        workload_data = load_json(
            os.path.join(workload_path, "workload.json"),
        )
    except Exception as err:
        mq.post_error(
            "benchmark_finish",
            {"message": repr(err), "code": 404, "id": request_id},
        )
        raise ClientErrorException(repr(err))

    workload = Workload(workload_data)

    response_data: Dict[str, Any] = {"id": request_id, "execution_details": {}}

    mq.post_success(
        "benchmark_start",
        {
            "message": "started",
            "id": request_id,
        },
    )

    for idx, model_info in enumerate(models, start=1):
        model_precision = model_info.get("precision", None)
        model_path = model_info.get("path", None)
        benchmark_mode = model_info.get("mode", "performance")
        if not (model_precision and model_path):
            message = "Missing model precision or model path."
            mq.post_error(
                "benchmark_finish",
                {"message": message, "code": 404, "id": request_id},
            )
            raise ClientErrorException(message)

        benchmark: Benchmark = Benchmark(
            workload=workload,
            model_path=model_path,
            datatype=model_precision,
            mode=benchmark_mode,
        )

        log_name = f"{model_precision}_{benchmark_mode}_benchmark"

        executor = Executor(
            workload_path,
            subject="benchmark",
            data={"id": request_id},
            send_response=False,
            log_name=log_name,
            additional_log_names=["output.txt"],
        )

        proc = executor.call(
            benchmark.command,
        )

        logs = [os.path.join(workload_path, f"{log_name}.txt")]

        if proc.is_ok:
            parser = Parser(logs)
            metrics = parser.process()
            metric = {}
            execution_details: Dict[str, Any] = {}
            throughput_field = f"perf_throughput_{model_precision}"
            if isinstance(metrics, dict):
                metric = {throughput_field: metrics.get(throughput_field, "")}
                execution_details = {
                    f"{model_precision}_benchmark": benchmark.serialize(),
                }
                response_data.update({"progress": f"{idx}/{len(models)}"})
                response_data.update(metric)
                response_data["execution_details"].update(execution_details)
            workdir.update_metrics(
                request_id=request_id,
                metric_data=metric,
            )
            workdir.update_execution_details(
                request_id=request_id,
                execution_details=execution_details,
            )
            log.debug(f"Parsed data is {json.dumps(response_data)}")
            mq.post_success("benchmark_progress", response_data)
        else:
            log.error("Benchmark failed.")
            mq.post_failure("benchmark_finish", {"message": "failed", "id": request_id})
            raise ClientErrorException("Benchmark failed during execution.")

    mq.post_success("benchmark_finish", response_data)
Exemplo n.º 13
0
def benchmark_model(
    response_data: dict,
    workload: Workload,
    workdir: Workdir,
    model: str,
    model_path: str,
    model_precision: str,
    benchmark_mode: str,
    benchmark_count: int,
    benchmark_total: int,
) -> dict:
    """Benchmark model and prepare response data."""
    request_id = response_data.get("id")

    benchmark: Benchmark = Benchmark(
        workload=workload,
        model_path=model_path,
        precision=model_precision,
        mode=benchmark_mode,
    )

    log_name = f"{model}_{benchmark_mode}_benchmark"

    executor = Executor(
        workload.workload_path,
        subject="benchmark",
        data={"id": request_id},
        send_response=False,
        log_name=log_name,
        additional_log_names=["output.txt"],
    )

    proc = executor.call(
        benchmark.command,
    )

    logs = [os.path.join(workload.workload_path, f"{log_name}.txt")]

    if not proc.is_ok:
        raise ClientErrorException("Benchmark failed during execution.")

    parser = BenchmarkParserFactory.get_parser(benchmark_mode, logs)
    metrics = parser.process()
    metric = {}
    execution_details: Dict[str, Any] = {}

    if benchmark_mode == Benchmarks.PERF:
        result_field = f"perf_throughput_{model}"
    elif benchmark_mode == Benchmarks.ACC:
        result_field = f"acc_{model}"
    else:
        raise InternalException(f"Benchmark mode {benchmark_mode} is not supported.")

    if isinstance(metrics, dict):
        metric = {result_field: metrics.get(result_field, "")}
        execution_details = response_data.get("execution_details", {})
        model_benchmark_details = execution_details.get(f"{model}_benchmark", {})
        model_benchmark_details.update(
            {
                benchmark_mode: benchmark.serialize(),
            },
        )

        response_data.update({"progress": f"{benchmark_count}/{benchmark_total}"})
        response_data.update(metric)
        response_data["execution_details"].update(
            {f"{model}_benchmark": model_benchmark_details},
        )
    workdir.update_metrics(
        request_id=request_id,
        metric_data=metric,
    )
    workdir.update_execution_details(
        request_id=request_id,
        execution_details=execution_details,
    )
    log.debug(f"Parsed data is {json.dumps(response_data)}")
    mq.post_success("benchmark_progress", response_data)

    return response_data
Exemplo n.º 14
0
def delete_workload(data: dict) -> Dict[str, Any]:
    """Delete workload based on ID."""
    workdir = Workdir(workspace_path=os.environ["HOME"])
    workdir.delete_workload(data["request_id"])
    return {"message": "SUCCESS"}
Exemplo n.º 15
0
def execute_optimization(data: Dict[str, Any]) -> dict:
    """Get configuration."""
    from lpot.ux.utils.workload.workload import Workload

    if not str(data.get("id", "")):
        message = "Missing request id."
        mq.post_error(
            "optimization_finish",
            {"message": message, "code": 404},
        )
        raise Exception(message)

    request_id: str = data["id"]
    workdir = Workdir(request_id=request_id, overwrite=False)
    workload_path: str = workdir.workload_path
    try:
        workload_data = _load_json_as_dict(
            os.path.join(workload_path, "workload.json"),
        )
    except Exception as err:
        mq.post_error(
            "optimization_finish",
            {"message": repr(err), "code": 404, "id": request_id},
        )
        raise err
    workload = Workload(workload_data)
    optimization: Optimization = OptimizationFactory.get_optimization(
        workload,
        workdir.template_path,
    )
    send_data = {
        "message": "started",
        "id": request_id,
        "size_input_model": get_size(optimization.input_graph),
    }
    workdir.clean_logs()
    workdir.update_data(
        request_id=request_id,
        model_path=optimization.input_graph,
        input_precision=optimization.input_precision,
        model_output_path=optimization.output_graph,
        output_precision=optimization.output_precision,
        status="wip",
    )

    executor = Executor(
        workspace_path=workload_path,
        subject="optimization",
        data=send_data,
        log_name="output",
    )

    proc = executor.call(
        optimization.command,
    )
    optimization_time = executor.process_duration
    if optimization_time:
        optimization_time = round(optimization_time, 2)
    log.debug(f"Elapsed time: {optimization_time}")
    logs = [os.path.join(workload_path, "output.txt")]
    parser = OptimizationParser(logs)
    if proc.is_ok:
        response_data = parser.process()

        if isinstance(response_data, dict):
            response_data["id"] = request_id
            response_data["optimization_time"] = optimization_time
            response_data["size_optimized_model"] = get_size(optimization.output_graph)
            response_data["model_output_path"] = optimization.output_graph
            response_data["size_input_model"] = get_size(optimization.input_graph)
            response_data["is_custom_dataloader"] = bool(workdir.template_path)

            workdir.update_data(
                request_id=request_id,
                model_path=optimization.input_graph,
                model_output_path=optimization.output_graph,
                metric=response_data,
                status="success",
                execution_details={"optimization": optimization.serialize()},
                input_precision=optimization.input_precision,
                output_precision=optimization.output_precision,
            )
            response_data["execution_details"] = {"optimization": optimization.serialize()}

        log.debug(f"Parsed data is {json.dumps(response_data)}")
        mq.post_success("optimization_finish", response_data)
        return response_data
    else:
        log.debug("FAIL")
        workdir.update_data(
            request_id=request_id,
            model_path=optimization.input_graph,
            input_precision=optimization.input_precision,
            output_precision=optimization.output_precision,
            status="error",
        )
        mq.post_failure("optimization_finish", {"message": "failed", "id": request_id})
        raise ClientErrorException("Optimization failed during execution.")
Exemplo n.º 16
0
def list_models(data: dict) -> List[Dict[str, Any]]:
    """Process download model request."""
    workspace_path = Workdir().get_active_workspace()
    model_list = get_available_models(workspace_path)
    return model_list
Exemplo n.º 17
0
def clean_workloads_wip_status() -> None:
    """Clean WIP status for workloads in workloads_list.json."""
    workdir = Workdir(workspace_path=os.environ["HOME"])
    workdir.clean_status(status_to_clean="wip")