def test_plugin_backend(): with mock.patch("entrypoints.get_single") as mock_get_single: mock_entry_point = mock.MagicMock(spec=entrypoints.EntryPoint) mock_get_single.return_value = mock_entry_point loader.load_backend("my_plugin") # Check calls to entrypoints mock_get_single.assert_called_with(loader.ENTRYPOINT_GROUP_NAME, "my_plugin") # Check backend has been built mock_entry_point.load.assert_called_once()
def test_plugin_does_not_exist(): def raise_entrypoint_exception(group, name): raise entrypoints.NoSuchEntryPoint(group, name) with mock.patch("entrypoints.get_single") as mock_get_single: mock_get_single.side_effect = raise_entrypoint_exception backend = loader.load_backend('my_plugin') assert backend is None
def _run(uri, experiment_id, entry_point="main", version=None, parameters=None, docker_args=None, backend_name=None, backend_config=None, use_conda=True, storage_dir=None, synchronous=True): """ Helper that delegates to the project-running method corresponding to the passed-in backend. Returns a ``SubmittedRun`` corresponding to the project run. """ tracking_store_uri = tracking.get_tracking_uri() if backend_name: backend = loader.load_backend(backend_name) if backend: submitted_run = backend.run(uri, entry_point, parameters, version, backend_config, experiment_id, tracking_store_uri) tracking.MlflowClient().set_tag(submitted_run.run_id, MLFLOW_PROJECT_BACKEND, backend_name) return submitted_run work_dir = fetch_and_validate_project(uri, version, entry_point, parameters) project = load_project(work_dir) _validate_execution_environment(project, backend_name) existing_run_id = None if backend_name == "local" and _MLFLOW_LOCAL_BACKEND_RUN_ID_CONFIG in backend_config: existing_run_id = backend_config[_MLFLOW_LOCAL_BACKEND_RUN_ID_CONFIG] active_run = get_or_create_run(existing_run_id, uri, experiment_id, work_dir, version, entry_point, parameters) if backend_name == "databricks": tracking.MlflowClient().set_tag(active_run.info.run_id, MLFLOW_PROJECT_BACKEND, "databricks") from mlflow.projects.databricks import run_databricks return run_databricks(remote_run=active_run, uri=uri, entry_point=entry_point, work_dir=work_dir, parameters=parameters, experiment_id=experiment_id, cluster_spec=backend_config) elif backend_name == "local" or backend_name is None: tracking.MlflowClient().set_tag(active_run.info.run_id, MLFLOW_PROJECT_BACKEND, "local") command_args = [] command_separator = " " # If a docker_env attribute is defined in MLproject then it takes precedence over conda yaml # environments, so the project will be executed inside a docker container. if project.docker_env: tracking.MlflowClient().set_tag(active_run.info.run_id, MLFLOW_PROJECT_ENV, "docker") _validate_docker_env(project) _validate_docker_installation() image = _build_docker_image( work_dir=work_dir, repository_uri=project.name, base_image=project.docker_env.get('image'), run_id=active_run.info.run_id) command_args += _get_docker_command( image=image, active_run=active_run, docker_args=docker_args, volumes=project.docker_env.get("volumes"), user_env_vars=project.docker_env.get("environment")) # Synchronously create a conda environment (even though this may take some time) # to avoid failures due to multiple concurrent attempts to create the same conda env. elif use_conda: tracking.MlflowClient().set_tag(active_run.info.run_id, MLFLOW_PROJECT_ENV, "conda") command_separator = " && " conda_env_name = _get_or_create_conda_env(project.conda_env_path) command_args += _get_conda_command(conda_env_name) # In synchronous mode, run the entry point command in a blocking fashion, sending status # updates to the tracking server when finished. Note that the run state may not be # persisted to the tracking server if interrupted if synchronous: command_args += _get_entry_point_command(project, entry_point, parameters, storage_dir) command_str = command_separator.join(command_args) return _run_entry_point(command_str, work_dir, experiment_id, run_id=active_run.info.run_id) # Otherwise, invoke `mlflow run` in a subprocess return _invoke_mlflow_run_subprocess(work_dir=work_dir, entry_point=entry_point, parameters=parameters, experiment_id=experiment_id, use_conda=use_conda, storage_dir=storage_dir, run_id=active_run.info.run_id) elif backend_name == "kubernetes": from mlflow.projects import kubernetes as kb tracking.MlflowClient().set_tag(active_run.info.run_id, MLFLOW_PROJECT_ENV, "docker") tracking.MlflowClient().set_tag(active_run.info.run_id, MLFLOW_PROJECT_BACKEND, "kubernetes") _validate_docker_env(project) _validate_docker_installation() kube_config = _parse_kubernetes_config(backend_config) image = _build_docker_image( work_dir=work_dir, repository_uri=kube_config["repository-uri"], base_image=project.docker_env.get('image'), run_id=active_run.info.run_id) image_digest = kb.push_image_to_registry(image.tags[0]) submitted_run = kb.run_kubernetes_job( project.name, active_run, image.tags[0], image_digest, _get_entry_point_command(project, entry_point, parameters, storage_dir), _get_run_env_vars(run_id=active_run.info.run_uuid, experiment_id=active_run.info.experiment_id), kube_config.get('kube-context', None), kube_config['kube-job-template']) return submitted_run supported_backends = ["local", "databricks", "kubernetes"] raise ExecutionException("Got unsupported execution mode %s. Supported " "values: %s" % (backend_name, supported_backends))
def _run( uri, experiment_id, entry_point, version, parameters, docker_args, backend_name, backend_config, use_conda, storage_dir, synchronous, ): """ Helper that delegates to the project-running method corresponding to the passed-in backend. Returns a ``SubmittedRun`` corresponding to the project run. """ tracking_store_uri = tracking.get_tracking_uri() backend_config[PROJECT_USE_CONDA] = use_conda backend_config[PROJECT_SYNCHRONOUS] = synchronous backend_config[PROJECT_DOCKER_ARGS] = docker_args backend_config[PROJECT_STORAGE_DIR] = storage_dir # TODO: remove this check once kubernetes execution has been refactored if backend_name not in {"databricks", "kubernetes"}: backend = loader.load_backend(backend_name) if backend: submitted_run = backend.run( uri, entry_point, parameters, version, backend_config, tracking_store_uri, experiment_id, ) tracking.MlflowClient().set_tag(submitted_run.run_id, MLFLOW_PROJECT_BACKEND, backend_name) return submitted_run work_dir = fetch_and_validate_project(uri, version, entry_point, parameters) project = load_project(work_dir) _validate_execution_environment(project, backend_name) active_run = get_or_create_run(None, uri, experiment_id, work_dir, version, entry_point, parameters) if backend_name == "databricks": tracking.MlflowClient().set_tag(active_run.info.run_id, MLFLOW_PROJECT_BACKEND, "databricks") from mlflow.projects.databricks import run_databricks return run_databricks( remote_run=active_run, uri=uri, entry_point=entry_point, work_dir=work_dir, parameters=parameters, experiment_id=experiment_id, cluster_spec=backend_config, ) elif backend_name == "kubernetes": from mlflow.projects.docker import ( build_docker_image, validate_docker_env, validate_docker_installation, ) from mlflow.projects import kubernetes as kb tracking.MlflowClient().set_tag(active_run.info.run_id, MLFLOW_PROJECT_ENV, "docker") tracking.MlflowClient().set_tag(active_run.info.run_id, MLFLOW_PROJECT_BACKEND, "kubernetes") validate_docker_env(project) validate_docker_installation() kube_config = _parse_kubernetes_config(backend_config) image = build_docker_image( work_dir=work_dir, repository_uri=kube_config["repository-uri"], base_image=project.docker_env.get("image"), run_id=active_run.info.run_id, ) image_digest = kb.push_image_to_registry(image.tags[0]) submitted_run = kb.run_kubernetes_job( project.name, active_run, image.tags[0], image_digest, get_entry_point_command(project, entry_point, parameters, storage_dir), get_run_env_vars(run_id=active_run.info.run_uuid, experiment_id=active_run.info.experiment_id), kube_config.get("kube-context", None), kube_config["kube-job-template"], ) return submitted_run supported_backends = ["databricks", "kubernetes"] + list( loader.MLFLOW_BACKENDS.keys()) raise ExecutionException("Got unsupported execution mode %s. Supported " "values: %s" % (backend_name, supported_backends))