def _run_entry_point(command, work_dir, experiment_id, run_id): """ Run an entry point command in a subprocess, returning a SubmittedRun that can be used to query the run's status. :param command: Entry point command to run :param work_dir: Working directory in which to run the command :param run_id: MLflow run ID associated with the entry point execution. """ env = os.environ.copy() env.update(get_run_env_vars(run_id, experiment_id)) env.update(get_databricks_env_vars(tracking_uri=mlflow.get_tracking_uri())) _logger.info("=== Running command '%s' in run with ID '%s' === ", command, run_id) # in case os name is not 'nt', we are not running on windows. It introduces # bash command otherwise. if os.name != "nt": process = subprocess.Popen(["bash", "-c", command], close_fds=True, cwd=work_dir, env=env) else: # process = subprocess.Popen(command, close_fds=True, cwd=work_dir, env=env) process = subprocess.Popen(["cmd", "/c", command], close_fds=True, cwd=work_dir, env=env) return LocalSubmittedRun(run_id, process)
def _invoke_mlflow_run_subprocess( work_dir, entry_point, parameters, experiment_id, use_conda, storage_dir, run_id): """ Run an MLflow project asynchronously by invoking ``mlflow run`` in a subprocess, returning a SubmittedRun that can be used to query run status. """ _logger.info("=== Asynchronously launching MLflow run with ID %s ===", run_id) mlflow_run_arr = _build_mlflow_run_cmd( uri=work_dir, entry_point=entry_point, storage_dir=storage_dir, use_conda=use_conda, run_id=run_id, parameters=parameters) mlflow_run_subprocess = _run_mlflow_run_cmd( mlflow_run_arr, _get_run_env_vars(run_id, experiment_id)) return LocalSubmittedRun(run_id, mlflow_run_subprocess)
def _run_entry_point(command, work_dir, experiment_id, run_id): """ Run an entry point command in a subprocess, returning a SubmittedRun that can be used to query the run's status. :param command: Entry point command to run :param work_dir: Working directory in which to run the command :param run_id: MLflow run ID associated with the entry point execution. """ env = os.environ.copy() env.update(_get_run_env_vars(run_id, experiment_id)) eprint("=== Running command '%s' in run with ID '%s' === " % (command, run_id)) process = subprocess.Popen(["bash", "-c", command], close_fds=True, cwd=work_dir, env=env) return LocalSubmittedRun(run_id, process)
def _invoke_mlflow_run_subprocess( work_dir, entry_point, parameters, experiment_id, env_manager, docker_args, storage_dir, run_id ): """ Run an MLflow project asynchronously by invoking ``mlflow run`` in a subprocess, returning a SubmittedRun that can be used to query run status. """ _logger.info("=== Asynchronously launching MLflow run with ID %s ===", run_id) mlflow_run_arr = _build_mlflow_run_cmd( uri=work_dir, entry_point=entry_point, docker_args=docker_args, storage_dir=storage_dir, env_manager=env_manager, run_id=run_id, parameters=parameters, ) env_vars = get_run_env_vars(run_id, experiment_id) env_vars.update(get_databricks_env_vars(mlflow.get_tracking_uri())) mlflow_run_subprocess = _run_mlflow_run_cmd(mlflow_run_arr, env_vars) return LocalSubmittedRun(run_id, mlflow_run_subprocess)
def _invoke_mlflow_run_subprocess(work_dir, entry_point, parameters, experiment_id, use_conda, storage_dir, run_id): """ Run an MLflow project asynchronously by invoking ``mlflow run`` in a subprocess, returning a SubmittedRun that can be used to query run status. """ eprint("=== Asynchronously launching MLflow run with ID %s ===" % run_id) # Add the run id into a magic environment variable that the subprocess will read, # causing it to reuse the run. env_map = { tracking._RUN_ID_ENV_VAR: run_id, tracking._TRACKING_URI_ENV_VAR: tracking.get_tracking_uri(), tracking._EXPERIMENT_ID_ENV_VAR: str(experiment_id), } mlflow_run_arr = _build_mlflow_run_cmd(uri=work_dir, entry_point=entry_point, storage_dir=storage_dir, use_conda=use_conda, run_id=run_id, parameters=parameters) mlflow_run_subprocess = _run_mlflow_run_cmd(mlflow_run_arr, env_map) return LocalSubmittedRun(run_id, mlflow_run_subprocess)