Ejemplo n.º 1
0
    def _get_mlflow_tags(filename=None, manual_tags=None):
        # Can specify filename as string
        # for example for Jupyter where os.path.basename(__file__)
        # does not work

        # Use specified filename if provided
        # Otherwise resolve automatically
        if filename:
            source_name = filename
        else:
            source_name = resolve_tags()["mlflow.source.name"]

        # Use specified working directory if provided

        work_dir = os.getcwd()

        source_version = mlflow_utils._get_git_commit(work_dir)
        tags = {
            MLFLOW_USER: mlflow_utils._get_user(),
            MLFLOW_SOURCE_NAME: source_name,
        }
        if source_version is not None:
            tags[MLFLOW_GIT_COMMIT] = source_version

        repo_url = mlflow_utils._get_git_repo_url(work_dir)
        if repo_url is not None:
            tags[MLFLOW_GIT_REPO_URL] = repo_url
            tags[LEGACY_MLFLOW_GIT_REPO_URL] = repo_url

        if manual_tags:
            for k, v in manual_tags.items():
                tags[k] = v

        return tags
Ejemplo n.º 2
0
def get_mlflow_tags(uri, experiment_id, work_dir, filename):

    source_name = filename

    source_version = mlflow_utils._get_git_commit(work_dir)
    existing_run = fluent.active_run()
    if existing_run:
        parent_run_id = existing_run.info.run_id
    else:
        parent_run_id = None

    tags = {
        MLFLOW_USER: mlflow_utils._get_user(),
        MLFLOW_SOURCE_NAME: source_name,
    }
    if source_version is not None:
        tags[MLFLOW_GIT_COMMIT] = source_version
    if parent_run_id is not None:
        tags[MLFLOW_PARENT_RUN_ID] = parent_run_id

    repo_url = mlflow_utils._get_git_repo_url(work_dir)
    if repo_url is not None:
        tags[MLFLOW_GIT_REPO_URL] = repo_url
        tags[LEGACY_MLFLOW_GIT_REPO_URL] = repo_url

    return tags
Ejemplo n.º 3
0
def _run(uri,
         experiment_id,
         entry_point="main",
         version=None,
         parameters=None,
         backend=None,
         backend_config=None,
         use_conda=True,
         storage_dir=None,
         synchronous=True,
         run_id=None):
    """
    Helper that delegates to the project-running method corresponding to the passed-in backend.
    Returns a ``SubmittedRun`` corresponding to the project run.
    """

    parameters = parameters or {}
    work_dir = _fetch_project(uri=uri, force_tempdir=False, version=version)
    project = _project_spec.load_project(work_dir)
    _validate_execution_environment(project, backend)
    project.get_entry_point(entry_point)._validate_parameters(parameters)
    if run_id:
        active_run = tracking.MlflowClient().get_run(run_id)
    else:
        active_run = _create_run(uri, experiment_id, work_dir, entry_point)

    # Consolidate parameters for logging.
    # `storage_dir` is `None` since we want to log actual path not downloaded local path
    entry_point_obj = project.get_entry_point(entry_point)
    final_params, extra_params = entry_point_obj.compute_parameters(
        parameters, storage_dir=None)
    for key, value in (list(final_params.items()) +
                       list(extra_params.items())):
        tracking.MlflowClient().log_param(active_run.info.run_id, key, value)

    repo_url = _get_git_repo_url(work_dir)
    if repo_url is not None:
        for tag in [MLFLOW_GIT_REPO_URL, LEGACY_MLFLOW_GIT_REPO_URL]:
            tracking.MlflowClient().set_tag(active_run.info.run_id, tag,
                                            repo_url)

    # Add branch name tag if a branch is specified through -version
    if _is_valid_branch_name(work_dir, version):
        for tag in [MLFLOW_GIT_BRANCH, LEGACY_MLFLOW_GIT_BRANCH_NAME]:
            tracking.MlflowClient().set_tag(active_run.info.run_id, tag,
                                            version)

    if backend == "databricks":
        tracking.MlflowClient().set_tag(active_run.info.run_id,
                                        MLFLOW_PROJECT_BACKEND, "databricks")
        from mlflow.projects.databricks import run_databricks
        return run_databricks(remote_run=active_run,
                              uri=uri,
                              entry_point=entry_point,
                              work_dir=work_dir,
                              parameters=parameters,
                              experiment_id=experiment_id,
                              cluster_spec=backend_config)

    elif backend == "local" or backend is None:
        command_args = []
        command_separator = " "
        # If a docker_env attribute is defined in MLproject then it takes precedence over conda yaml
        # environments, so the project will be executed inside a docker container.
        if project.docker_env:
            tracking.MlflowClient().set_tag(active_run.info.run_id,
                                            MLFLOW_PROJECT_ENV, "docker")
            tracking.MlflowClient().set_tag(active_run.info.run_id,
                                            MLFLOW_PROJECT_BACKEND, "local")
            _validate_docker_env(project)
            _validate_docker_installation()
            image = _build_docker_image(
                work_dir=work_dir,
                repository_uri=project.name,
                base_image=project.docker_env.get('image'),
                run_id=active_run.info.run_id)
            command_args += _get_docker_command(
                image=image,
                active_run=active_run,
                volumes=project.docker_env.get("volumes"),
                user_env_vars=project.docker_env.get("environment"))
        # Synchronously create a conda environment (even though this may take some time)
        # to avoid failures due to multiple concurrent attempts to create the same conda env.
        elif use_conda:
            tracking.MlflowClient().set_tag(active_run.info.run_id,
                                            MLFLOW_PROJECT_ENV, "conda")
            tracking.MlflowClient().set_tag(active_run.info.run_id,
                                            MLFLOW_PROJECT_BACKEND, "local")
            command_separator = " && "
            conda_env_name = _get_or_create_conda_env(project.conda_env_path)
            command_args += _get_conda_command(conda_env_name)
        # In synchronous mode, run the entry point command in a blocking fashion, sending status
        # updates to the tracking server when finished. Note that the run state may not be
        # persisted to the tracking server if interrupted
        if synchronous:
            command_args += _get_entry_point_command(project, entry_point,
                                                     parameters, storage_dir)
            command_str = command_separator.join(command_args)
            return _run_entry_point(command_str,
                                    work_dir,
                                    experiment_id,
                                    run_id=active_run.info.run_id)
        # Otherwise, invoke `mlflow run` in a subprocess
        return _invoke_mlflow_run_subprocess(work_dir=work_dir,
                                             entry_point=entry_point,
                                             parameters=parameters,
                                             experiment_id=experiment_id,
                                             use_conda=use_conda,
                                             storage_dir=storage_dir,
                                             run_id=active_run.info.run_id)
    elif backend == "kubernetes":
        from mlflow.projects import kubernetes as kb
        tracking.MlflowClient().set_tag(active_run.info.run_id,
                                        MLFLOW_PROJECT_ENV, "docker")
        tracking.MlflowClient().set_tag(active_run.info.run_id,
                                        MLFLOW_PROJECT_BACKEND, "kubernetes")
        _validate_docker_env(project)
        _validate_docker_installation()
        kube_config = _parse_kubernetes_config(backend_config)
        image = _build_docker_image(
            work_dir=work_dir,
            repository_uri=kube_config["repository-uri"],
            base_image=project.docker_env.get('image'),
            run_id=active_run.info.run_id)
        image_digest = kb.push_image_to_registry(image.tags[0])
        submitted_run = kb.run_kubernetes_job(
            project.name, active_run, image.tags[0], image_digest,
            _get_entry_point_command(project, entry_point, parameters,
                                     storage_dir),
            _get_run_env_vars(run_id=active_run.info.run_uuid,
                              experiment_id=active_run.info.experiment_id),
            kube_config['kube-context'], kube_config['kube-job-template'])
        return submitted_run

    supported_backends = ["local", "databricks", "kubernetes"]
    raise ExecutionException("Got unsupported execution mode %s. Supported "
                             "values: %s" % (backend, supported_backends))