コード例 #1
0
def load_model(path, tf_sess, run_id=None):
    """
    Load an MLflow model that contains the Tensorflow flavor from the specified path.

    **This method must be called within a Tensorflow graph context!**

    :param path: The local filesystem path or run-relative artifact path to the model.
    :param tf_sess: The Tensorflow session in which to the load the model.
    :return: A Tensorflow signature definition of type:
             `tensorflow.core.protobuf.meta_graph_pb2.SignatureDef`. This defines the input and
             output tensors for model inference.

    >>> import mlflow.tensorflow
    >>> import tensorflow as tf
    >>> tf_graph = tf.Graph()
    >>> tf_sess = tf.Session(graph=tf_graph)
    >>> with tf_graph.as_default():
    >>>     signature_definition = mlflow.tensorflow.load_model(path="model_path", tf_sess=tf_sess)
    >>>     input_tensors = [tf_graph.get_tensor_by_name(input_signature.name)
    >>>                      for _, input_signature in signature_def.inputs.items()]
    >>>     output_tensors = [tf_graph.get_tensor_by_name(output_signature.name)
    >>>                       for _, output_signature in signature_def.outputs.items()]
    """
    if run_id is not None:
        path = _get_model_log_dir(model_name=path, run_id=run_id)
    m = Model.load(os.path.join(path, 'MLmodel'))
    if FLAVOR_NAME not in m.flavors:
        raise Exception("Model does not have {} flavor".format(FLAVOR_NAME))
    conf = m.flavors[FLAVOR_NAME]
    saved_model_dir = os.path.join(path, conf['saved_model_dir'])
    return _load_model(tf_saved_model_dir=saved_model_dir,
                       tf_sess=tf_sess,
                       tf_meta_graph_tags=conf['meta_graph_tags'],
                       tf_signature_def_key=conf['signature_def_key'])
コード例 #2
0
def test_build_image_includes_default_metadata_in_azure_image_and_model_tags(sklearn_model):
    artifact_path = "model"
    with mlflow.start_run():
        mlflow.sklearn.log_model(sk_model=sklearn_model, artifact_path=artifact_path)
        run_id = mlflow.active_run().info.run_uuid
    model_config = Model.load(os.path.join(_get_model_log_dir(artifact_path, run_id), "MLmodel"))

    with AzureMLMocks() as aml_mocks:
        workspace = get_azure_workspace()
        mlflow.azureml.build_image(model_path=artifact_path, run_id=run_id, workspace=workspace)

        register_model_call_args = aml_mocks["register_model"].call_args_list
        assert len(register_model_call_args) == 1
        _, register_model_call_kwargs = register_model_call_args[0]
        called_tags = register_model_call_kwargs["tags"]
        assert called_tags["run_id"] == run_id
        assert called_tags["model_path"] == artifact_path
        assert called_tags["python_version"] ==\
            model_config.flavors[pyfunc.FLAVOR_NAME][pyfunc.PY_VERSION]

        create_image_call_args = aml_mocks["create_image"].call_args_list
        assert len(create_image_call_args) == 1
        _, create_image_call_kwargs = create_image_call_args[0]
        image_config = create_image_call_kwargs["image_config"]
        assert image_config.tags["run_id"] == run_id
        assert image_config.tags["model_path"] == artifact_path
        assert image_config.tags["python_version"] ==\
            model_config.flavors[pyfunc.FLAVOR_NAME][pyfunc.PY_VERSION]
コード例 #3
0
def test_log_model_persists_conda_environment(tmpdir, saved_tf_iris_model):
    conda_env_path = os.path.join(str(tmpdir), "conda_env.yaml")
    _mlflow_conda_env(path=conda_env_path,
                      additional_conda_deps=["tensorflow"])
    with open(conda_env_path, "r") as f:
        conda_env_text = f.read()

    artifact_path = "model"
    with mlflow.start_run():
        mlflow.tensorflow.log_model(
            tf_saved_model_dir=saved_tf_iris_model.path,
            tf_meta_graph_tags=saved_tf_iris_model.meta_graph_tags,
            tf_signature_def_key=saved_tf_iris_model.signature_def_key,
            artifact_path=artifact_path,
            conda_env=conda_env_path)

        run_id = mlflow.active_run().info.run_uuid

    model_dir = _get_model_log_dir(artifact_path, run_id)
    model_config = Model.load(os.path.join(model_dir, "MLmodel"))
    flavor_config = model_config.flavors.get(pyfunc.FLAVOR_NAME, None)
    assert flavor_config is not None
    pyfunc_env_subpath = flavor_config.get(pyfunc.ENV, None)
    assert pyfunc_env_subpath is not None
    with open(os.path.join(model_dir, pyfunc_env_subpath), "r") as f:
        persisted_env_text = f.read()

    assert persisted_env_text == conda_env_text
コード例 #4
0
def test_validate_deployment_flavor_validates_python_function_flavor_successfully(
        pretrained_model):
    model_config_path = os.path.join(_get_model_log_dir(
        pretrained_model.model_path, pretrained_model.run_id), "MLmodel")
    model_config = Model.load(model_config_path)
    mfs._validate_deployment_flavor(
            model_config=model_config, flavor=mlflow.pyfunc.FLAVOR_NAME)
コード例 #5
0
 def test_model_log(self):
     old_uri = mlflow.get_tracking_uri()
     # should_start_run tests whether or not calling log_model() automatically starts a run.
     for should_start_run in [False, True]:
         with TempDir(chdr=True, remove_on_exit=True) as tmp:
             try:
                 mlflow.set_tracking_uri("test")
                 if should_start_run:
                     mlflow.start_run()
                 artifact_path = "linear"
                 conda_env = os.path.join(tmp.path(), "conda_env.yaml")
                 _mlflow_conda_env(conda_env, additional_pip_deps=["sklearn"])
                 sklearn.log_model(sk_model=self._linear_lr,
                                   artifact_path=artifact_path,
                                   conda_env=conda_env)
                 x = sklearn.load_model(artifact_path, run_id=mlflow.active_run().info.run_uuid)
                 model_path = _get_model_log_dir(
                         artifact_path, mlflow.active_run().info.run_uuid)
                 model_config = Model.load(os.path.join(model_path, "MLmodel"))
                 assert pyfunc.FLAVOR_NAME in model_config.flavors
                 assert pyfunc.ENV in model_config.flavors[pyfunc.FLAVOR_NAME]
                 env_path = model_config.flavors[pyfunc.FLAVOR_NAME][pyfunc.ENV]
                 assert os.path.exists(os.path.join(model_path, env_path))
                 xpred = x.predict(self._X)
                 np.testing.assert_array_equal(self._linear_lr_predict, xpred)
             finally:
                 mlflow.end_run()
                 mlflow.set_tracking_uri(old_uri)
コード例 #6
0
def test_log_model_persists_specified_conda_env_in_mlflow_model_directory(
        sklearn_knn_model, main_scoped_model_class, pyfunc_custom_env):
    sklearn_artifact_path = "sk_model"
    with mlflow.start_run():
        mlflow.sklearn.log_model(sk_model=sklearn_knn_model,
                                 artifact_path=sklearn_artifact_path)
        sklearn_run_id = mlflow.active_run().info.run_uuid

    pyfunc_artifact_path = "pyfunc_model"
    with mlflow.start_run():
        mlflow.pyfunc.log_model(
            artifact_path=pyfunc_artifact_path,
            artifacts={
                "sk_model":
                utils_get_artifact_uri(artifact_path=sklearn_artifact_path,
                                       run_id=sklearn_run_id)
            },
            python_model=main_scoped_model_class(predict_fn=None),
            conda_env=pyfunc_custom_env)
        pyfunc_run_id = mlflow.active_run().info.run_uuid

    pyfunc_model_path = _get_model_log_dir(pyfunc_artifact_path, pyfunc_run_id)
    pyfunc_conf = _get_flavor_configuration(
        model_path=pyfunc_model_path, flavor_name=mlflow.pyfunc.FLAVOR_NAME)
    saved_conda_env_path = os.path.join(pyfunc_model_path,
                                        pyfunc_conf[mlflow.pyfunc.ENV])
    assert os.path.exists(saved_conda_env_path)
    assert saved_conda_env_path != pyfunc_custom_env

    with open(pyfunc_custom_env, "r") as f:
        pyfunc_custom_env_parsed = yaml.safe_load(f)
    with open(saved_conda_env_path, "r") as f:
        saved_conda_env_parsed = yaml.safe_load(f)
    assert saved_conda_env_parsed == pyfunc_custom_env_parsed
コード例 #7
0
def export(output, model_path, run_id, mlflow_home):
    """
    Export MLflow model as Azure ML compatible model ready to be deployed.

    Export MLflow model with everything needed to deploy on Azure ML.
    Output includes sh script with command to deploy the generated model to Azure ML.

    NOTE:

        - This command does not need an Azure ML environment to run.

        - Azure ML can not handle any Conda environment. If the model contains Conda environment
          and it has been trained outside of Azure ML, the Conda environment might need
          to be edited.

    :param output: Output folder where the model is going to be exported to.
    :param model_path: Local or MLflow run relative path to the model to be exported.
    :param run_id: If provided, ``run_id`` is used to retrieve model logged with MLflow.
    """
    output = os.path.abspath(output)
    if os.path.exists(output):
        raise Exception("output folder {} already exists".format(output))
    os.mkdir(output)
    if run_id:
        model_path = _get_model_log_dir(model_path, run_id)
    model_path = os.path.abspath(model_path)
    curr_dir = os.path.abspath(os.getcwd())
    os.chdir(output)
    try:
        exec_str = _export("$1", model_path, mlflow_home=mlflow_home)
        with open("create_service.sh", "w") as f:
            f.write("\n".join(
                ["#! /bin/sh", "cd {}".format(output), exec_str, ""]))
    finally:
        os.chdir(curr_dir)
コード例 #8
0
def test_log_model_without_specified_conda_env_uses_default_env_with_expected_dependencies(
        sklearn_knn_model, main_scoped_model_class):
    sklearn_artifact_path = "sk_model"
    with mlflow.start_run():
        mlflow.sklearn.log_model(sk_model=sklearn_knn_model,
                                 artifact_path=sklearn_artifact_path)
        sklearn_run_id = mlflow.active_run().info.run_uuid

    pyfunc_artifact_path = "pyfunc_model"
    with mlflow.start_run():
        mlflow.pyfunc.log_model(
            artifact_path=pyfunc_artifact_path,
            artifacts={
                "sk_model":
                utils_get_artifact_uri(artifact_path=sklearn_artifact_path,
                                       run_id=sklearn_run_id)
            },
            python_model=main_scoped_model_class(predict_fn=None))
        pyfunc_run_id = mlflow.active_run().info.run_uuid

    pyfunc_model_path = _get_model_log_dir(pyfunc_artifact_path, pyfunc_run_id)
    pyfunc_conf = _get_flavor_configuration(
        model_path=pyfunc_model_path, flavor_name=mlflow.pyfunc.FLAVOR_NAME)
    conda_env_path = os.path.join(pyfunc_model_path,
                                  pyfunc_conf[mlflow.pyfunc.ENV])
    with open(conda_env_path, "r") as f:
        conda_env = yaml.safe_load(f)

    assert conda_env == mlflow.pyfunc.model.DEFAULT_CONDA_ENV
コード例 #9
0
def run_local(model_path, run_id=None, port=5000, image=DEFAULT_IMAGE_NAME):
    """
    Serve model locally in a SageMaker compatible Docker container.

    :param model_path: path to the model. Either local if no ``run_id`` or MLflow-relative if
                                          ``run_id`` is specified.
    :param run_id: MLflow run ID.
    :param port: Local port.
    :param image: Name of the Docker image to be used.
    """
    if run_id:
        model_path = _get_model_log_dir(model_path, run_id)
    _check_compatible(model_path)
    model_path = os.path.abspath(model_path)
    eprint("launching docker image with path {}".format(model_path))
    cmd = [
        "docker", "run", "-v", "{}:/opt/ml/model/".format(model_path), "-p",
        "%d:8080" % port, "--rm", image, "serve"
    ]
    eprint('executing', ' '.join(cmd))
    proc = Popen(cmd, stdout=PIPE, stderr=STDOUT, universal_newlines=True)

    def _sigterm_handler(*_):
        eprint("received termination signal => killing docker process")
        proc.send_signal(signal.SIGINT)

    import signal
    signal.signal(signal.SIGTERM, _sigterm_handler)
    for x in iter(proc.stdout.readline, ""):
        eprint(x, end='')
コード例 #10
0
def deploy(app_name, model_path, run_id=None, mlflow_home=None):
    """
    Deploy an MLflow model to Azure Machine Learning.

    NOTE:

        - This command must be called from a console launched from Azure Machine Learning Workbench.
          Caller is reponsible for setting up Azure Machine Learning environment and accounts.

        - Azure Machine Learning cannot handle any Conda environment. In particular the Python
          version is fixed. If the model contains Conda environment and it has been trained outside
          of Azure Machine Learning, the Conda environment might need to be edited to work with
          Azure Machine Learning.

    :param app_name: Name of the deployed application.
    :param model_path: Local or MLflow-run-relative path to the model to be deployed.
    :param run_id: MLflow run ID.
    :param mlflow_home: Directory containing checkout of the MLflow GitHub project or
                        current directory if not specified.
    """
    if run_id:
        model_path = _get_model_log_dir(model_path, run_id)
    model_path = os.path.abspath(model_path)
    with TempDir(chdr=True, remove_on_exit=True):
        exec_str = _export(app_name, model_path, mlflow_home=mlflow_home)
        eprint("executing", '"{}"'.format(exec_str))
        # Use os.system instead of subprocess due to the fact that currently all azureml commands
        # have to be called within the same shell (launched from azureml workbench app by the user).
        # We can change this once there is a python api (or general cli) available.
        os.system(exec_str)
コード例 #11
0
def test_get_preferred_deployment_flavor_obtains_valid_flavor_from_model(pretrained_model):
    model_config_path = os.path.join(_get_model_log_dir(
        pretrained_model.model_path, pretrained_model.run_id), "MLmodel")
    model_config = Model.load(model_config_path)

    selected_flavor = mfs._get_preferred_deployment_flavor(model_config=model_config)

    assert selected_flavor in mfs.SUPPORTED_DEPLOYMENT_FLAVORS
    assert selected_flavor in model_config.flavors
コード例 #12
0
def test_mleap_model_log(spark_model_iris):
    artifact_path = "model"
    sparkm.log_model(spark_model=spark_model_iris.model,
                     sample_input=spark_model_iris.spark_df,
                     artifact_path=artifact_path)
    rid = active_run().info.run_uuid
    model_path = _get_model_log_dir(model_name=artifact_path, run_id=rid)
    config_path = os.path.join(model_path, "MLmodel")
    mlflow_model = Model.load(config_path)
    assert sparkm.FLAVOR_NAME in mlflow_model.flavors
    assert mleap.FLAVOR_NAME in mlflow_model.flavors
コード例 #13
0
def test_deployment_of_model_with_no_supported_flavors_throws_value_error(pretrained_model):
    logged_model_path = _get_model_log_dir(pretrained_model.model_path, pretrained_model.run_id)
    model_config_path = os.path.join(logged_model_path, "MLmodel")
    model_config = Model.load(model_config_path)
    del model_config.flavors[mlflow.pyfunc.FLAVOR_NAME]
    model_config.save(path=model_config_path)

    with pytest.raises(ValueError):
        mfs.deploy(app_name="missing-flavor",
                   model_path=logged_model_path,
                   flavor=None)
コード例 #14
0
ファイル: cli.py プロジェクト: sepidehhosseinzadeh/mlflow-cpp
def serve(model_path, run_id, port):
    """
    Serve an RFunction model saved with MLflow.

    If a ``run_id`` is specified, ``model-path`` is treated as an artifact path within that run;
    otherwise it is treated as a local path.
    """
    if run_id:
        model_path = _get_model_log_dir(model_path, run_id)

    command = "mlflow::mlflow_rfunc_serve('{0}', port = {1})".format(model_path, port)
    execute(command)
コード例 #15
0
def test_model_log_uses_cloudpickle_serialization_format_by_default(sklearn_knn_model):
    artifact_path = "model"
    with mlflow.start_run():
        mlflow.sklearn.log_model(
                sk_model=sklearn_knn_model.model, artifact_path=artifact_path, conda_env=None)
        run_id = mlflow.active_run().info.run_uuid
    model_path = _get_model_log_dir(artifact_path, run_id)

    sklearn_conf = _get_flavor_configuration(
            model_path=model_path, flavor_name=mlflow.sklearn.FLAVOR_NAME)
    assert "serialization_format" in sklearn_conf
    assert sklearn_conf["serialization_format"] == mlflow.sklearn.SERIALIZATION_FORMAT_CLOUDPICKLE
コード例 #16
0
ファイル: __init__.py プロジェクト: yu7856288/mlflow
def run_local(model_path,
              run_id=None,
              port=5000,
              image=DEFAULT_IMAGE_NAME,
              flavor=None):
    """
    Serve model locally in a SageMaker compatible Docker container.

    :param model_path: path to the model. Either local if no ``run_id`` or MLflow-relative if
                                          ``run_id`` is specified.
    :param run_id: MLflow run ID.
    :param port: Local port.
    :param image: Name of the Docker image to be used.
    :param flavor: The name of the flavor of the model to use for local serving. If ``None``,
                   a flavor is automatically selected from the model's available flavors. If the
                   specified flavor is not present or not supported for deployment, an exception
                   is thrown.
    """
    if run_id:
        model_path = _get_model_log_dir(model_path, run_id)
    model_path = os.path.abspath(model_path)
    model_config_path = os.path.join(model_path, "MLmodel")
    model_config = Model.load(model_config_path)

    if flavor is None:
        flavor = _get_preferred_deployment_flavor(model_config)
    else:
        _validate_deployment_flavor(model_config, flavor)
    print("Using the {selected_flavor} flavor for local serving!".format(
        selected_flavor=flavor))

    deployment_config = _get_deployment_config(flavor_name=flavor)

    eprint("launching docker image with path {}".format(model_path))
    cmd = [
        "docker", "run", "-v", "{}:/opt/ml/model/".format(model_path), "-p",
        "%d:8080" % port
    ]
    for key, value in deployment_config.items():
        cmd += ["-e", "{key}={value}".format(key=key, value=value)]
    cmd += ["--rm", image, "serve"]
    eprint('executing', ' '.join(cmd))
    proc = Popen(cmd, stdout=PIPE, stderr=STDOUT, universal_newlines=True)

    def _sigterm_handler(*_):
        eprint("received termination signal => killing docker process")
        proc.send_signal(signal.SIGINT)

    import signal
    signal.signal(signal.SIGTERM, _sigterm_handler)
    for x in iter(proc.stdout.readline, ""):
        eprint(x, end='')
コード例 #17
0
ファイル: test_deployment.py プロジェクト: zxf1864/mlflow
def test_deployment_of_model_with_no_supported_flavors_raises_exception(pretrained_model):
    logged_model_path = _get_model_log_dir(pretrained_model.model_path, pretrained_model.run_id)
    model_config_path = os.path.join(logged_model_path, "MLmodel")
    model_config = Model.load(model_config_path)
    del model_config.flavors[mlflow.pyfunc.FLAVOR_NAME]
    model_config.save(path=model_config_path)

    with pytest.raises(MlflowException) as exc:
        mfs.deploy(app_name="missing-flavor",
                   model_path=logged_model_path,
                   flavor=None)

    assert exc.value.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
コード例 #18
0
def test_model_log_without_specified_conda_env_uses_default_env_with_expected_dependencies(
        model):
    artifact_path = "model"
    with mlflow.start_run():
        mlflow.keras.log_model(keras_model=model, artifact_path=artifact_path, conda_env=None)
        run_id = mlflow.active_run().info.run_uuid
    model_path = _get_model_log_dir(artifact_path, run_id)

    pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
    conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
    with open(conda_env_path, "r") as f:
        conda_env = yaml.safe_load(f)

    assert conda_env == mlflow.keras.DEFAULT_CONDA_ENV
コード例 #19
0
def predict(model_path, run_id, input_path, output_path):
    """
    Serve an RFunction model saved with MLflow.
    Return the prediction results as a JSON DataFrame.

    If a ``run-id`` is specified, ``model-path`` is treated as an artifact path within that run;
    otherwise it is treated as a local path.
    """
    if run_id:
        model_path = _get_model_log_dir(model_path, run_id)

    str_cmd = "mlflow::mlflow_rfunc_predict('{0}', '{1}', '{2}')"
    command = str_cmd.format(model_path, input_path, str_optional(output_path))

    execute(command)
コード例 #20
0
def test_model_log_without_specified_conda_env_uses_default_env_with_expected_dependencies(
        sklearn_knn_model):
    artifact_path = "model"
    knn_model = sklearn_knn_model.model
    with mlflow.start_run():
        mlflow.sklearn.log_model(sk_model=knn_model, artifact_path=artifact_path, conda_env=None,
                                 serialization_format=mlflow.sklearn.SERIALIZATION_FORMAT_PICKLE)
        run_id = mlflow.active_run().info.run_uuid
    model_path = _get_model_log_dir(artifact_path, run_id)

    pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
    conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
    with open(conda_env_path, "r") as f:
        conda_env = yaml.safe_load(f)

    assert conda_env == mlflow.sklearn.DEFAULT_CONDA_ENV
コード例 #21
0
def serve(model_path, run_id, port, host, no_conda):
    """
    Serve a PythonFunction model saved with MLflow.

    If a ``run_id`` is specified, ``model-path`` is treated as an artifact path within that run;
    otherwise it is treated as a local path.
    """
    if run_id:
        model_path = _get_model_log_dir(model_path, run_id)

    model_env_file = _load_model_env(model_path)
    if not no_conda and model_env_file is not None:
        conda_env_path = os.path.join(model_path, model_env_file)
        return _rerun_in_conda(conda_env_path)

    app = scoring_server.init(load_pyfunc(model_path))
    app.run(port=port, host=host)
コード例 #22
0
def test_log_model_without_specified_conda_env_uses_default_env_with_expected_dependencies(
        saved_tf_iris_model, model_path):
    artifact_path = "model"
    with mlflow.start_run():
        mlflow.tensorflow.log_model(tf_saved_model_dir=saved_tf_iris_model.path,
                                    tf_meta_graph_tags=saved_tf_iris_model.meta_graph_tags,
                                    tf_signature_def_key=saved_tf_iris_model.signature_def_key,
                                    artifact_path=artifact_path,
                                    conda_env=None)
        run_id = mlflow.active_run().info.run_uuid
    model_path = _get_model_log_dir(artifact_path, run_id)

    pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
    conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
    with open(conda_env_path, "r") as f:
        conda_env = yaml.safe_load(f)

    assert conda_env == mlflow.tensorflow.DEFAULT_CONDA_ENV
コード例 #23
0
def test_model_log_persists_specified_conda_env_in_mlflow_model_directory(model, keras_custom_env):
    artifact_path = "model"
    with mlflow.start_run():
        mlflow.keras.log_model(
            keras_model=model, artifact_path=artifact_path, conda_env=keras_custom_env)
        run_id = mlflow.active_run().info.run_uuid
    model_path = _get_model_log_dir(artifact_path, run_id)

    pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
    saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
    assert os.path.exists(saved_conda_env_path)
    assert saved_conda_env_path != keras_custom_env

    with open(keras_custom_env, "r") as f:
        keras_custom_env_parsed = yaml.safe_load(f)
    with open(saved_conda_env_path, "r") as f:
        saved_conda_env_parsed = yaml.safe_load(f)
    assert saved_conda_env_parsed == keras_custom_env_parsed
コード例 #24
0
ファイル: cli.py プロジェクト: lxdatgithub/mlflow
def serve(model_path, run_id, port, host, no_conda):
    """
    Serve a pyfunc model saved with MLflow by launching a webserver on the specified
    host and port. For information about the input data formats accepted by the webserver,
    see the following documentation:
    https://www.mlflow.org/docs/latest/models.html#pyfunc-deployment.

    If a ``run_id`` is specified, ``model-path`` is treated as an artifact path within that run;
    otherwise it is treated as a local path.
    """
    if run_id:
        model_path = _get_model_log_dir(model_path, run_id)

    model_env_file = _load_model_env(model_path)
    if not no_conda and model_env_file is not None:
        conda_env_path = os.path.join(model_path, model_env_file)
        return _rerun_in_conda(conda_env_path)

    app = scoring_server.init(load_pyfunc(model_path))
    app.run(port=port, host=host)
コード例 #25
0
def test_model_log_load(sklearn_knn_model, iris_data, tmpdir):
    sk_model_path = os.path.join(str(tmpdir), "knn.pkl")
    with open(sk_model_path, "wb") as f:
        pickle.dump(sklearn_knn_model, f)

    pyfunc_artifact_path = "pyfunc_model"
    with mlflow.start_run():
        mlflow.pyfunc.log_model(artifact_path=pyfunc_artifact_path,
                                data_path=sk_model_path,
                                loader_module=os.path.basename(__file__)[:-3],
                                code_path=[__file__])
        pyfunc_run_id = mlflow.active_run().info.run_uuid

    pyfunc_model_path = _get_model_log_dir(pyfunc_artifact_path, pyfunc_run_id)
    model_config = Model.load(os.path.join(pyfunc_model_path, "MLmodel"))
    assert mlflow.pyfunc.FLAVOR_NAME in model_config.flavors
    assert mlflow.pyfunc.PY_VERSION in model_config.flavors[mlflow.pyfunc.FLAVOR_NAME]
    reloaded_model = mlflow.pyfunc.load_pyfunc(pyfunc_model_path)
    np.testing.assert_array_equal(
        sklearn_knn_model.predict(iris_data[0]), reloaded_model.predict(iris_data[0]))
コード例 #26
0
def test_model_log_persists_specified_conda_env_in_mlflow_model_directory(
        h2o_iris_model, h2o_custom_env):
    artifact_path = "model"
    with mlflow.start_run():
        mlflow.h2o.log_model(h2o_model=h2o_iris_model.model,
                             artifact_path=artifact_path,
                             conda_env=h2o_custom_env)
        run_id = mlflow.active_run().info.run_uuid
    model_path = _get_model_log_dir(artifact_path, run_id)

    pyfunc_conf = _get_flavor_configuration(model_path=model_path,
                                            flavor_name=pyfunc.FLAVOR_NAME)
    saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
    assert os.path.exists(saved_conda_env_path)
    assert saved_conda_env_path != h2o_custom_env

    with open(h2o_custom_env, "r") as f:
        h2o_custom_env_text = f.read()
    with open(saved_conda_env_path, "r") as f:
        saved_conda_env_text = f.read()
    assert saved_conda_env_text == h2o_custom_env_text
コード例 #27
0
def test_log_model_persists_specified_conda_env_in_mlflow_model_directory(
        saved_tf_iris_model, tf_custom_env):
    artifact_path = "model"
    with mlflow.start_run():
        mlflow.tensorflow.log_model(tf_saved_model_dir=saved_tf_iris_model.path,
                                    tf_meta_graph_tags=saved_tf_iris_model.meta_graph_tags,
                                    tf_signature_def_key=saved_tf_iris_model.signature_def_key,
                                    artifact_path=artifact_path,
                                    conda_env=tf_custom_env)
        run_id = mlflow.active_run().info.run_uuid
    model_path = _get_model_log_dir(artifact_path, run_id)

    pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
    saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
    assert os.path.exists(saved_conda_env_path)
    assert saved_conda_env_path != tf_custom_env

    with open(tf_custom_env, "r") as f:
        tf_custom_env_text = f.read()
    with open(saved_conda_env_path, "r") as f:
        saved_conda_env_text = f.read()
    assert saved_conda_env_text == tf_custom_env_text
コード例 #28
0
def predict(model_path, run_id, input_path, output_path, no_conda):
    """
    Load a pandas DataFrame and runs a python_function model saved with MLflow against it.
    Return the prediction results as a CSV-formatted pandas DataFrame.

    If a ``run-id`` is specified, ``model-path`` is treated as an artifact path within that run;
    otherwise it is treated as a local path.
    """
    if run_id:
        model_path = _get_model_log_dir(model_path, run_id)

    model_env_file = _load_model_env(model_path)
    if not no_conda and model_env_file is not None:
        conda_env_path = os.path.join(model_path, model_env_file)
        return _rerun_in_conda(conda_env_path)

    model = load_pyfunc(model_path)
    df = pandas.read_csv(input_path)
    result = model.predict(df)
    out_stream = sys.stdout
    if output_path:
        out_stream = open(output_path, 'w')
    pandas.DataFrame(data=result).to_csv(out_stream, header=False, index=False)
コード例 #29
0
def test_model_log(sklearn_logreg_model, model_path):
    old_uri = mlflow.get_tracking_uri()
    with TempDir(chdr=True, remove_on_exit=True) as tmp:
        for should_start_run in [False, True]:
            try:
                mlflow.set_tracking_uri("test")
                if should_start_run:
                    mlflow.start_run()

                artifact_path = "linear"
                conda_env = os.path.join(tmp.path(), "conda_env.yaml")
                _mlflow_conda_env(conda_env,
                                  additional_pip_deps=["scikit-learn"])

                mlflow.sklearn.log_model(sk_model=sklearn_logreg_model.model,
                                         artifact_path=artifact_path,
                                         conda_env=conda_env)
                run_id = mlflow.active_run().info.run_uuid

                reloaded_logreg_model = mlflow.sklearn.load_model(
                    artifact_path, run_id)
                np.testing.assert_array_equal(
                    sklearn_logreg_model.model.predict(
                        sklearn_logreg_model.inference_data),
                    reloaded_logreg_model.predict(
                        sklearn_logreg_model.inference_data))

                model_path = _get_model_log_dir(artifact_path, run_id=run_id)
                model_config = Model.load(os.path.join(model_path, "MLmodel"))
                assert pyfunc.FLAVOR_NAME in model_config.flavors
                assert pyfunc.ENV in model_config.flavors[pyfunc.FLAVOR_NAME]
                env_path = model_config.flavors[pyfunc.FLAVOR_NAME][pyfunc.ENV]
                assert os.path.exists(os.path.join(model_path, env_path))

            finally:
                mlflow.end_run()
                mlflow.set_tracking_uri(old_uri)
コード例 #30
0
def export(output, model_path, run_id=None, mlflow_home=None):
    """
    Export an MLflow model with everything needed to deploy on Azure Machine Learning.
    Output includes sh script with command to deploy the generated model to Azure Machine Learning.

    NOTE:

        - This command does not need an Azure Machine Learning environment to run.

        - Azure Machine Learning cannot handle any Conda environment. In particular the Python
          version is fixed. If the model contains Conda environment and it has been trained outside
          of Azure Machine Learning, the Conda environment might need to be edited to work with
          Azure Machine Learning.

    :param output: Output folder where the model is going to be exported to.
    :param model_path: Local or MLflow run relative path to the model to be exported.
    :param run_id: MLflow run ID.
    :param mlflow_home: Directory containing checkout of the MLflow GitHub project or
                        current directory if not specified.
    """
    output = os.path.abspath(output)
    if os.path.exists(output):
        raise Exception("output folder {} already exists".format(output))
    os.mkdir(output)
    if run_id:
        model_path = _get_model_log_dir(model_path, run_id)
    model_path = os.path.abspath(model_path)
    curr_dir = os.path.abspath(os.getcwd())
    os.chdir(output)
    try:
        exec_str = _export("$1", model_path, mlflow_home=mlflow_home)
        with open("create_service.sh", "w") as f:
            f.write("\n".join(
                ["#! /bin/sh", "cd {}".format(output), exec_str, ""]))
    finally:
        os.chdir(curr_dir)