示例#1
0
def _validate_docker_installation():
    """
    Verify if Docker is installed on host machine.
    """
    try:
        docker_path = "docker"
        process.exec_cmd([docker_path, "--help"], throw_on_error=False)
    except EnvironmentError:
        raise ExecutionException(
            "Could not find Docker executable. "
            "Ensure Docker is installed as per the instructions "
            "at https://docs.docker.com/install/overview/.")
示例#2
0
def _run_project(project, entry_point, work_dir, parameters, use_conda,
                 storage_dir, experiment_id):
    """Locally run a project that has been checked out in `work_dir`."""
    mlflow.set_tracking_uri('..\\')  #added by cliicy
    if storage_dir is not None and not os.path.exists(storage_dir):
        os.makedirs(storage_dir)
    storage_dir_for_run = tempfile.mkdtemp(dir=storage_dir)
    print(
        "=== Created directory %s for downloading remote URIs passed to arguments of "
        "type 'path' ===" % storage_dir_for_run)
    # Try to build the command first in case the user mis-specified parameters
    run_project_command = project.get_entry_point(entry_point).compute_command(
        parameters, storage_dir_for_run)
    commands = []

    # Create a new run and log every provided parameter into it.
    active_run = tracking.start_run(
        experiment_id=experiment_id,
        source_name=project.uri,
        source_version=tracking._get_git_commit(work_dir),
        entry_point_name=entry_point,
        source_type=SourceType.PROJECT)
    for key, value in parameters.items():
        active_run.log_param(Param(key, value))
    # Add the run id into a magic environment variable that the subprocess will read,
    # causing it to reuse the run.
    exp_id = experiment_id or tracking._get_experiment_id()
    env_map = {
        tracking._RUN_NAME_ENV_VAR: active_run.run_info.run_uuid,
        tracking._TRACKING_URI_ENV_VAR: tracking.get_tracking_uri(),
        tracking._EXPERIMENT_ID_ENV_VAR: str(exp_id),
    }

    commands.append(run_project_command)
    command = " && ".join(commands)
    print("=== Running command: %s ===" % command)
    try:
        command = "python my_train.py 0.4 0.1"
        print("will run command aaaaa " + command + " " + work_dir + " aaaaa ")
        process.exec_cmd(command,
                         cwd=work_dir,
                         stream_output=True,
                         env=env_map)
        #process.exec_cmd([os.environ.get("SHELL", "bash"), "-c", command], cwd=work_dir,
        #                 stream_output=True, env=env_map)
        tracking.end_run()
        print("=== Run succeeded ===")
    except process.ShellCommandException:
        tracking.end_run("FAILED")
        print("=== Run failed ===")
示例#3
0
def _run_project(project, entry_point, work_dir, parameters, use_conda,
                 storage_dir, experiment_id):
    """Locally run a project that has been checked out in `work_dir`."""
    storage_dir_for_run = _get_storage_dir(storage_dir)
    eprint(
        "=== Created directory %s for downloading remote URIs passed to arguments of "
        "type 'path' ===" % storage_dir_for_run)
    # Try to build the command first in case the user mis-specified parameters
    run_project_command = project.get_entry_point(entry_point)\
        .compute_command(parameters, storage_dir_for_run)
    commands = []
    if use_conda:
        conda_env_path = os.path.abspath(
            os.path.join(work_dir, project.conda_env))
        _maybe_create_conda_env(conda_env_path)
        commands.append("source activate %s" %
                        _get_conda_env_name(conda_env_path))

    # Create a new run and log every provided parameter into it.
    active_run = tracking.start_run(
        experiment_id=experiment_id,
        source_name=project.uri,
        source_version=tracking._get_git_commit(work_dir),
        entry_point_name=entry_point,
        source_type=SourceType.PROJECT)
    if parameters is not None:
        for key, value in parameters.items():
            active_run.log_param(Param(key, value))
    # Add the run id into a magic environment variable that the subprocess will read,
    # causing it to reuse the run.
    exp_id = experiment_id or tracking._get_experiment_id()
    env_map = {
        tracking._RUN_NAME_ENV_VAR: active_run.run_info.run_uuid,
        tracking._TRACKING_URI_ENV_VAR: tracking.get_tracking_uri(),
        tracking._EXPERIMENT_ID_ENV_VAR: str(exp_id),
    }

    commands.append(run_project_command)
    command = " && ".join(commands)
    eprint("=== Running command: %s ===" % command)
    try:
        process.exec_cmd([os.environ.get("SHELL", "bash"), "-c", command],
                         cwd=work_dir,
                         stream_output=True,
                         env=env_map)
        tracking.end_run()
        eprint("=== Run succeeded ===")
    except process.ShellCommandException:
        tracking.end_run("FAILED")
        eprint("=== Run failed ===")
示例#4
0
def _maybe_create_conda_env(conda_env_path):
    conda_env = _get_conda_env_name(conda_env_path)
    try:
        process.exec_cmd(["conda", "--help"], throw_on_error=False)
    except EnvironmentError:
        raise ExecutionException('conda is not installed properly. Please follow the instructions '
                                 'on https://conda.io/docs/user-guide/install/index.html')
    (_, stdout, _) = process.exec_cmd(["conda", "env", "list", "--json"])
    env_names = [os.path.basename(env) for env in json.loads(stdout)['envs']]

    conda_action = 'create'
    if conda_env not in env_names:
        eprint('=== Creating conda environment %s ===' % conda_env)
        process.exec_cmd(["conda", "env", conda_action, "-n", conda_env, "--file",
                          conda_env_path], stream_output=True)
def test_run_local_conda_env(tracking_uri_mock):  # pylint: disable=unused-argument
    with open(os.path.join(TEST_PROJECT_DIR, "conda.yaml"), "r") as handle:
        conda_env_contents = handle.read()
    expected_env_name = "mlflow-%s" % hashlib.sha1(
        conda_env_contents.encode("utf-8")).hexdigest()
    try:
        process.exec_cmd(
            cmd=["conda", "env", "remove", "--name", expected_env_name])
    except process.ShellCommandException:
        logging_utils.eprint(
            "Unable to remove conda environment %s. The environment may not have been present, "
            "continuing with running the test." % expected_env_name)
    invoke_cli_runner(cli.run, [
        TEST_PROJECT_DIR, "-e", "check_conda_env", "-P",
        "conda_env_name=%s" % expected_env_name
    ])
示例#6
0
def _get_or_create_conda_env(conda_env_path, env_id=None):
    """
    Given a `Project`, creates a conda environment containing the project's dependencies if such a
    conda environment doesn't already exist. Returns the name of the conda environment.
    :param conda_env_path: Path to a conda yaml file.
    :param env_id: Optional string that is added to the contents of the yaml file before
                   calculating the hash. It can be used to distinguish environments that have the
                   same conda dependencies but are supposed to be different based on the context.
                   For example, when serving the model we may install additional dependencies to the
                   environment after the environment has been activated.
    """
    conda_path = _get_conda_bin_executable("conda")
    try:
        process.exec_cmd([conda_path, "--help"], throw_on_error=False)
    except EnvironmentError:
        raise ExecutionException("Could not find Conda executable at {0}. "
                                 "Ensure Conda is installed as per the instructions "
                                 "at https://conda.io/docs/user-guide/install/index.html. You can "
                                 "also configure MLflow to look for a specific Conda executable "
                                 "by setting the {1} environment variable to the path of the Conda "
                                 "executable".format(conda_path, MLFLOW_CONDA_HOME))
    (_, stdout, _) = process.exec_cmd([conda_path, "env", "list", "--json"])
    env_names = [os.path.basename(env) for env in json.loads(stdout)['envs']]
    project_env_name = _get_conda_env_name(conda_env_path, env_id)
    if project_env_name not in env_names:
        _logger.info('=== Creating conda environment %s ===', project_env_name)
        if conda_env_path:
            process.exec_cmd([conda_path, "env", "create", "-n", project_env_name, "--file",
                              conda_env_path], stream_output=True)
        else:
            process.exec_cmd(
                [conda_path, "create", "-n", project_env_name, "python"], stream_output=True)
    return project_env_name
示例#7
0
def _get_or_create_conda_env(conda_env_path):
    """
    Given a `Project`, creates a conda environment containing the project's dependencies if such a
    conda environment doesn't already exist. Returns the name of the conda environment.
    """
    conda_path = _get_conda_bin_executable("conda")
    try:
        process.exec_cmd([conda_path, "--help"], throw_on_error=False)
    except EnvironmentError:
        raise ExecutionException(
            "Could not find Conda executable at {0}. "
            "Ensure Conda is installed as per the instructions "
            "at https://conda.io/docs/user-guide/install/index.html. You can "
            "also configure MLflow to look for a specific Conda executable "
            "by setting the {1} environment variable to the path of the Conda "
            "executable".format(conda_path, MLFLOW_CONDA_HOME))
    (_, stdout, _) = process.exec_cmd([conda_path, "env", "list", "--json"])
    env_names = [os.path.basename(env) for env in json.loads(stdout)['envs']]
    project_env_name = _get_conda_env_name(conda_env_path)
    if project_env_name not in env_names:
        eprint('=== Creating conda environment %s ===' % project_env_name)
        if conda_env_path:
            process.exec_cmd([
                conda_path, "env", "create", "-n", project_env_name, "--file",
                conda_env_path
            ],
                             stream_output=True)
        else:
            process.exec_cmd(
                [conda_path, "create", "-n", project_env_name, "python"],
                stream_output=True)
    return project_env_name
示例#8
0
def _run_server(
    file_store_path,
    default_artifact_root,
    serve_artifacts,
    artifacts_only,
    artifacts_destination,
    host,
    port,
    static_prefix=None,
    workers=None,
    gunicorn_opts=None,
    waitress_opts=None,
    expose_prometheus=None,
):
    """
    Run the MLflow server, wrapping it in gunicorn or waitress on windows
    :param static_prefix: If set, the index.html asset will be served from the path static_prefix.
                          If left None, the index.html asset will be served from the root path.
    :return: None
    """
    env_map = {}
    if file_store_path:
        env_map[BACKEND_STORE_URI_ENV_VAR] = file_store_path
    if default_artifact_root:
        env_map[ARTIFACT_ROOT_ENV_VAR] = default_artifact_root
    if serve_artifacts:
        env_map[SERVE_ARTIFACTS_ENV_VAR] = "true"
    if artifacts_only:
        env_map[ARTIFACTS_ONLY_ENV_VAR] = "true"
    if artifacts_destination:
        env_map[ARTIFACTS_DESTINATION_ENV_VAR] = artifacts_destination
    if static_prefix:
        env_map[STATIC_PREFIX_ENV_VAR] = static_prefix

    if expose_prometheus:
        env_map[PROMETHEUS_EXPORTER_ENV_VAR] = expose_prometheus

    # TODO: eventually may want waitress on non-win32
    if sys.platform == "win32":
        full_command = _build_waitress_command(waitress_opts, host, port)
    else:
        full_command = _build_gunicorn_command(gunicorn_opts, host, port,
                                               workers or 4)
    exec_cmd(full_command, env=env_map, stream_output=True)
示例#9
0
def _maybe_create_conda_env(conda_env_path):
    conda_env = _get_conda_env_name(conda_env_path)
    conda_path = _conda_executable()
    try:
        process.exec_cmd([conda_path, "--help"], throw_on_error=False)
    except EnvironmentError:
        raise ExecutionException("Could not find conda executable at {0}. "
                                 "Please ensure conda is installed as per the instructions "
                                 "at https://conda.io/docs/user-guide/install/index.html. You may "
                                 "also configure MLflow to look for a specific conda executable "
                                 "by setting the {1} environment variable to the path of the conda "
                                 "executable".format(conda_path, MLFLOW_CONDA))
    (_, stdout, _) = process.exec_cmd([conda_path, "env", "list", "--json"])
    env_names = [os.path.basename(env) for env in json.loads(stdout)['envs']]

    conda_action = 'create'
    if conda_env not in env_names:
        eprint('=== Creating conda environment %s ===' % conda_env)
        process.exec_cmd([conda_path, "env", conda_action, "-n", conda_env, "--file",
                          conda_env_path], stream_output=True)
示例#10
0
def _get_conda_command(conda_env_name, direct_output_to_err=False):
    conda_path = _get_conda_bin_executable("conda")
    activate_path = _get_conda_bin_executable("activate")

    try:
        process.exec_cmd([conda_path, "--help"], throw_on_error=False)
    except EnvironmentError:
        raise ExecutionException(
            "Could not find Conda executable at {0}. "
            "Ensure Conda is installed as per the instructions "
            "at https://conda.io/docs/user-guide/install/index.html. You can "
            "also configure MLflow to look for a specific Conda executable "
            "by setting the {1} environment variable to the path of the Conda "
            "executable".format(conda_path, MLFLOW_CONDA_HOME))

    (_, stdout, _) = process.exec_cmd([conda_path, "info", "--json"])
    conda_env_version = json.loads(stdout)['conda_env_version']
    conda_env_version_major = int(conda_env_version.split(".")[0])
    conda_env_version_minor = int(conda_env_version.split(".")[1])

    output_direct = ""
    if direct_output_to_err:
        output_direct = " 1>&2"

    # in case os name is not 'nt', we are not running on windows. It introduces
    # bash command otherwise.
    if os.name != "nt" and (conda_env_version_major == 4
                            and conda_env_version_minor < 6):
        return [
            "source %s %s%s" % (activate_path, conda_env_name, output_direct)
        ]
    else:
        # TODO Need to fix, getting conda.sh is not simple
        # As per https://github.com/conda/conda/issues/7126
        # Notes:
        # 1. $(dirname $CONDA_EXE)/../etc/profile.d/conda.sh will break in cases where conda and conda.sh is in expected directories, ie. /usr/bin/conda, /etc/profile.d/conda.sh
        # 2. $(dirname $CONDA_EXE)/activate <env> will not work if activate and deactivate does not stick around.
        return [
            "source /etc/profile.d/conda.sh",
            "%s activate %s%s" % (conda_path, conda_env_name, output_direct)
        ]
def test_mnist_example():
    os.environ["MKL_THREADING_LAYER"] = "GNU"
    home_dir = os.getcwd()
    mnist_dir = "examples/MNIST"
    example_command = ["python", "mnist_model.py", "--max_epochs", "1"]
    process.exec_cmd(example_command, cwd=mnist_dir)

    assert os.path.exists(os.path.join(mnist_dir, "models", "state_dict.pth"))
    create_deployment_command = [
        "python",
        "create_deployment.py",
        "--export_path",
        os.path.join(home_dir, "model_store"),
    ]

    process.exec_cmd(create_deployment_command, cwd=mnist_dir)

    assert os.path.exists(os.path.join(home_dir, "model_store", "mnist_classification.mar"))

    predict_command = ["python", "predict.py"]
    res = process.exec_cmd(predict_command, cwd=mnist_dir)
    assert "ONE" in res[1]
示例#12
0
def _run_server(file_store_path, default_artifact_root, host, port, static_prefix=None,
                workers=None, gunicorn_opts=None, waitress_opts=None):
    """
    Run the MLflow server, wrapping it in gunicorn or waitress on windows
    :param static_prefix: If set, the index.html asset will be served from the path static_prefix.
                          If left None, the index.html asset will be served from the root path.
    :return: None
    """
    env_map = {}
    if file_store_path:
        env_map[BACKEND_STORE_URI_ENV_VAR] = file_store_path
    if default_artifact_root:
        env_map[ARTIFACT_ROOT_ENV_VAR] = default_artifact_root
    if static_prefix:
        env_map[STATIC_PREFIX_ENV_VAR] = static_prefix

    # TODO: eventually may want waitress on non-win32
    if sys.platform == 'win32':
        full_command = _build_waitress_command(waitress_opts, host, port)
    else:
        full_command = _build_gunicorn_command(gunicorn_opts, host, port, workers or 4)
    exec_cmd(full_command, env=env_map, stream_output=True)
示例#13
0
def _run_server(file_store_path, default_artifact_root, host, port, workers,
                static_prefix, gunicorn_opts):
    """
    Run the MLflow server, wrapping it in gunicorn
    :param static_prefix: If set, the index.html asset will be served from the path static_prefix.
                          If left None, the index.html asset will be served from the root path.
    :return: None
    """
    env_map = {}
    if file_store_path:
        env_map[BACKEND_STORE_URI_ENV_VAR] = file_store_path
    if default_artifact_root:
        env_map[ARTIFACT_ROOT_ENV_VAR] = default_artifact_root
    if static_prefix:
        env_map[STATIC_PREFIX_ENV_VAR] = static_prefix
    bind_address = "%s:%s" % (host, port)
    opts = shlex.split(gunicorn_opts) if gunicorn_opts else []
    exec_cmd(["gunicorn"] + opts +
             ["-b", bind_address, "-w",
              "%s" % workers, "mlflow.server:app"],
             env=env_map,
             stream_output=True)
示例#14
0
def _fetch_git_repo(uri, version, dst_dir, git_username, git_password):
    """
    Clone the git repo at ``uri`` into ``dst_dir``, checking out commit ``version`` (or defaulting
    to the head commit of the repository's master branch if version is unspecified).
    If ``git_username`` and ``git_password`` are specified, uses them to authenticate while fetching
    the repo. Otherwise, assumes authentication parameters are specified by the environment,
    e.g. by a Git credential helper.
    """
    # We defer importing git until the last moment, because the import requires that the git
    # executable is availble on the PATH, so we only want to fail if we actually need it.
    import git
    repo = git.Repo.init(dst_dir)
    origin = repo.create_remote("origin", uri)
    git_args = [git_username, git_password]
    if not (all(arg is not None
                for arg in git_args) or all(arg is None for arg in git_args)):
        raise ExecutionException(
            "Either both or neither of git_username and git_password must be "
            "specified.")
    if git_username:
        git_credentials = "url=%s\nusername=%s\npassword=%s" % (
            uri, git_username, git_password)
        repo.git.config("--local", "credential.helper", "cache")
        process.exec_cmd(cmd=["git", "credential-cache", "store"],
                         cwd=dst_dir,
                         cmd_stdin=git_credentials)
    origin.fetch()
    if version is not None:
        try:
            repo.git.checkout(version)
        except git.exc.GitCommandError as e:
            raise ExecutionException(
                "Unable to checkout version '%s' of git repo %s"
                "- please ensure that the version exists in the repo. "
                "Error: %s" % (version, uri, e))
    else:
        repo.create_head("master", origin.refs.master)
        repo.heads.master.checkout()
def test_iris_example(tmpdir):
    iris_dir = os.path.join("examples", "IrisClassification")
    home_dir = os.getcwd()
    example_command = [
        "python", os.path.join(iris_dir, "iris_classification.py")
    ]
    extra_files = "{},{}".format(
        os.path.join(iris_dir, "index_to_name.json"),
        os.path.join(home_dir, "model/MLmodel"),
    )
    process.exec_cmd(example_command, cwd=home_dir)
    create_deployment_command = [
        "python",
        os.path.join(iris_dir, "create_deployment.py"),
        "--export_path",
        os.path.join(home_dir, "model_store"),
        "--handler",
        os.path.join(iris_dir, "iris_handler.py"),
        "--model_file",
        os.path.join(iris_dir, "iris_classification.py"),
        "--extra_files",
        extra_files,
    ]

    process.exec_cmd(create_deployment_command, cwd=home_dir)
    mlflow.end_run()
    assert os.path.exists(
        os.path.join(home_dir, "model_store", "iris_classification.mar"))
    predict_command = [
        "python",
        os.path.join(iris_dir, "predict.py"),
        "--input_file_path",
        os.path.join(iris_dir, "sample.json"),
    ]
    res = process.exec_cmd(predict_command, cwd=home_dir)
    assert "SETOSA" in res[1]
示例#16
0
def test_iris_example(tmpdir):
    iris_dir = os.path.join("examples", "IrisClassification")
    iris_dir_absolute_path = os.path.join(os.getcwd(), iris_dir)
    example_command = ["python", "iris_classification.py"]
    process.exec_cmd(example_command, cwd=iris_dir)
    model_uri = os.path.join(iris_dir_absolute_path, "iris.pt")
    model_file_path = os.path.join(iris_dir_absolute_path,
                                   "iris_classification.py")
    handler_file_path = os.path.join(iris_dir_absolute_path, "iris_handler.py")
    extra_file_path = os.path.join(iris_dir_absolute_path,
                                   "index_to_name.json")
    input_file_path = os.path.join(iris_dir_absolute_path, "sample.json")
    output_file_path = os.path.join(str(tmpdir), "output.json")
    create_deployment_command = [
        "mlflow deployments create "
        "--name iris_test_29 "
        "--target torchserve "
        "--model-uri {model_uri} "
        '-C "MODEL_FILE={model_file}" '
        '-C "HANDLER={handler_file}" '
        '-C "EXTRA_FILES={extra_file}"'.format(
            model_uri=model_uri,
            model_file=model_file_path,
            handler_file=handler_file_path,
            extra_file=extra_file_path,
        ),
    ]

    process.exec_cmd(create_deployment_command, shell=True)

    predict_command = [
        "mlflow deployments predict "
        "--name iris_test_29 "
        "--target torchserve "
        "--input-path {} --output-path {}".format(input_file_path,
                                                  output_file_path)
    ]

    process.exec_cmd(predict_command, cwd=iris_dir, shell=True)
    assert os.path.exists(output_file_path)

    with open(output_file_path) as fp:
        result = fp.read()

    assert "SETOSA" in result
示例#17
0
def get_or_create_conda_env(conda_env_path, env_id=None):
    """
    Given a `Project`, creates a conda environment containing the project's dependencies if such a
    conda environment doesn't already exist. Returns the name of the conda environment.
    :param conda_env_path: Path to a conda yaml file.
    :param env_id: Optional string that is added to the contents of the yaml file before
                   calculating the hash. It can be used to distinguish environments that have the
                   same conda dependencies but are supposed to be different based on the context.
                   For example, when serving the model we may install additional dependencies to the
                   environment after the environment has been activated.
    """

    conda_path = get_conda_bin_executable("conda")
    conda_env_create_path = _get_conda_executable_for_create_env()

    try:
        process.exec_cmd([conda_path, "--help"], throw_on_error=False)
    except EnvironmentError:
        raise ExecutionException(
            "Could not find Conda executable at {0}. "
            "Ensure Conda is installed as per the instructions at "
            "https://conda.io/projects/conda/en/latest/"
            "user-guide/install/index.html. "
            "You can also configure MLflow to look for a specific "
            "Conda executable by setting the {1} environment variable "
            "to the path of the Conda executable".format(
                conda_path, MLFLOW_CONDA_HOME))

    try:
        process.exec_cmd([conda_env_create_path, "--help"],
                         throw_on_error=False)
    except EnvironmentError:
        raise ExecutionException(
            "You have set the env variable {0}, but {1} does not exist or "
            "it is not working properly. Note that {1} and the conda executable need to be "
            "in the same conda environment. You can change the search path by"
            "modifying the env variable {2}".format(
                MLFLOW_CONDA_CREATE_ENV_CMD,
                conda_env_create_path,
                MLFLOW_CONDA_HOME,
            ))

    (_, stdout, _) = process.exec_cmd([conda_path, "env", "list", "--json"])
    env_names = [os.path.basename(env) for env in json.loads(stdout)["envs"]]
    project_env_name = _get_conda_env_name(conda_env_path, env_id)
    if project_env_name not in env_names:
        _logger.info("=== Creating conda environment %s ===", project_env_name)
        if conda_env_path:
            process.exec_cmd(
                [
                    conda_env_create_path,
                    "env",
                    "create",
                    "-n",
                    project_env_name,
                    "--file",
                    conda_env_path,
                ],
                stream_output=True,
            )
        else:
            process.exec_cmd(
                [
                    conda_env_create_path,
                    "create",
                    "--channel",
                    "conda-forge",
                    "--override-channels",
                    "-n",
                    project_env_name,
                    "python",
                ],
                stream_output=True,
            )
    return project_env_name
示例#18
0
def test_command_example(directory, command):
    cwd_dir = os.path.join(EXAMPLES_DIR, directory)
    process.exec_cmd(command, cwd=cwd_dir)
示例#19
0
def _fetch_s3(uri, local_path):
    print("=== Downloading S3 object %s to local path %s ===" % (uri, os.path.abspath(local_path)))
    process.exec_cmd(cmd=["aws", "s3", "cp", uri, local_path])
示例#20
0
def _fetch_dbfs(uri, local_path):
    print("=== Downloading DBFS file %s to local path %s ===" % (uri, os.path.abspath(local_path)))
    process.exec_cmd(cmd=["databricks", "fs", "cp", "--overwrite", uri, local_path])
示例#21
0
def clean_envs_and_cache():
    yield

    if get_free_disk_space() < 7.0:  # unit: GiB
        process.exec_cmd(["./dev/remove-conda-envs.sh"])
示例#22
0
def remove_conda_env(env_name):
    process.exec_cmd(["conda", "remove", "--name", env_name, "--yes", "--all"])
示例#23
0
def get_conda_envs():
    stdout = process.exec_cmd(["conda", "env", "list", "--json"])[1]
    return [os.path.basename(env) for env in json.loads(stdout)["envs"]]
示例#24
0
def _list_conda_environments():
    (_, stdout, _) = process.exec_cmd(
        [get_conda_bin_executable("conda"), "env", "list", "--json"])
    return list(map(os.path.basename, json.loads(stdout).get("envs", [])))
示例#25
0
def _run_project(project, entry_point, work_dir, parameters, use_conda,
                 storage_dir, experiment_id):
    """Locally run a project that has been checked out in `work_dir`."""
    if storage_dir is not None and not os.path.exists(storage_dir):
        os.makedirs(storage_dir)
    storage_dir_for_run = tempfile.mkdtemp(dir=storage_dir)
    eprint(
        "=== Created directory %s for downloading remote URIs passed to arguments of "
        "type 'path' ===" % storage_dir_for_run)
    # Try to build the command first in case the user mis-specified parameters
    run_project_command = project.get_entry_point(entry_point)\
        .compute_command(parameters, storage_dir_for_run)
    commands = []
    if use_conda:
        with open(os.path.join(work_dir, project.conda_env)) as conda_env_file:
            conda_env_sha = hashlib.sha1(
                conda_env_file.read().encode("utf-8")).hexdigest()
        conda_env = "mlflow-%s" % conda_env_sha
        (exit_code, _, stderr) = process.exec_cmd(["conda", "--help"],
                                                  throw_on_error=False)
        if exit_code != 0:
            eprint(
                'conda is not installed properly. Please follow the instructions on '
                'https://conda.io/docs/user-guide/install/index.html')
            eprint(stderr)
            sys.exit(1)
        (_, stdout,
         stderr) = process.exec_cmd(["conda", "env", "list", "--json"])
        env_names = [
            os.path.basename(env) for env in json.loads(stdout)['envs']
        ]

        conda_action = 'create'
        if conda_env not in env_names:
            eprint('=== Creating conda environment %s ===' % conda_env)
            process.exec_cmd([
                "conda", "env", conda_action, "-n", conda_env, "--file",
                project.conda_env
            ],
                             cwd=work_dir,
                             stream_output=True)
        commands.append("source activate %s" % conda_env)

    # Create a new run and log every provided parameter into it.
    active_run = tracking.start_run(
        experiment_id=experiment_id,
        source_name=project.uri,
        source_version=tracking._get_git_commit(work_dir),
        entry_point_name=entry_point,
        source_type=SourceType.PROJECT)
    for key, value in parameters.items():
        active_run.log_param(Param(key, value))
    # Add the run id into a magic environment variable that the subprocess will read,
    # causing it to reuse the run.
    exp_id = experiment_id or tracking._get_experiment_id()
    env_map = {
        tracking._RUN_NAME_ENV_VAR: active_run.run_info.run_uuid,
        tracking._TRACKING_URI_ENV_VAR: tracking.get_tracking_uri(),
        tracking._EXPERIMENT_ID_ENV_VAR: str(exp_id),
    }

    commands.append(run_project_command)
    command = " && ".join(commands)
    eprint("=== Running command: %s ===" % command)
    try:
        process.exec_cmd([os.environ.get("SHELL", "bash"), "-c", command],
                         cwd=work_dir,
                         stream_output=True,
                         env=env_map)
        tracking.end_run()
        eprint("=== Run succeeded ===")
    except process.ShellCommandException:
        tracking.end_run("FAILED")
        eprint("=== Run failed ===")