Exemplo n.º 1
0
    def test_model_log(self):
        with TempDir(chdr=True, remove_on_exit=True) as tmp:
            model_path = tmp.path("linear.pkl")
            with open(model_path, "wb") as f:
                pickle.dump(self._linear_lr, f)
            tracking_dir = os.path.abspath(tmp.path("mlruns"))
            tracking.set_tracking_uri("file://%s" % tracking_dir)
            tracking.start_run()
            try:
                pyfunc.log_model(artifact_path="linear",
                                 data_path=model_path,
                                 loader_module=os.path.basename(__file__)[:-3],
                                 code_path=[__file__])

                run_id = tracking.active_run().info.run_uuid
                path = tracking._get_model_log_dir("linear", run_id)
                m = Model.load(os.path.join(path, "MLmodel"))
                print(m.__dict__)
                x = pyfunc.load_pyfunc("linear", run_id=run_id)
                xpred = x.predict(self._X)
                np.testing.assert_array_equal(self._linear_lr_predict, xpred)
            finally:
                tracking.end_run()
                tracking.set_tracking_uri(None)
                # Remove the log directory in order to avoid adding new tests to pytest...
                shutil.rmtree(tracking_dir)
Exemplo n.º 2
0
def load_pyfunc(path, run_id=None, suppress_warnings=False):
    """
    Load a model stored in Python function format.

    :param suppress_warnings: If True, non-fatal warning messages associated with the model
                              loading process will be suppressed. If False, these warning messages
                              will be emitted.
    """
    if run_id:
        path = tracking._get_model_log_dir(path, run_id)
    conf_path = os.path.join(path, "MLmodel")
    model = Model.load(conf_path)
    if FLAVOR_NAME not in model.flavors:
        raise Exception("Format '{format}' not found not in {path}.".format(
            format=FLAVOR_NAME, path=conf_path))
    conf = model.flavors[FLAVOR_NAME]
    model_py_version = conf.get(PY_VERSION)
    if not suppress_warnings:
        _warn_potentially_incompatible_py_version_if_necessary(
            model_py_version=model_py_version)
    if CODE in conf and conf[CODE]:
        code_path = os.path.join(path, conf[CODE])
        sys.path = [code_path] + _get_code_dirs(code_path) + sys.path
    data_path = os.path.join(path, conf[DATA]) if (DATA in conf) else path
    return importlib.import_module(conf[MAIN]).load_pyfunc(data_path)
Exemplo n.º 3
0
def export(output, model_path, run_id):
    """Export MLflow model as Azure ML compatible model ready to be deployed.

    Export MLflow model out with everything needed to deploy on Azure ML.
    Output includes sh script with command to deploy the generated model to Azure ML.
    The generated model has no dependency on MLflow.

    NOTE: This commnand does not need Azure ML environment to run.

    NOTE: Azure ML can not handle any Conda environment. In particular python version seems to be
          fixed. If the model contains Conda environment and it has been trained outside of Azure
          ML, the Conda environment might need to be edited.
    """
    output = os.path.abspath(output)
    if os.path.exists(output):
        raise Exception("output folder {} already exists".format(output))
    os.mkdir(output)
    if run_id:
        model_path = _get_model_log_dir(model_path, run_id)
    model_path = os.path.abspath(model_path)
    curr_dir = os.path.abspath(os.getcwd())
    os.chdir(output)
    try:
        exec_str = _export("$1", model_path, os.path.basename(output))
        with open("create_service.sh", "w") as f:
            f.write("\n".join(["#! /bin/sh", "cd {}".format(output), exec_str, ""]))
    finally:
        os.chdir(curr_dir)
Exemplo n.º 4
0
def deploy(app_name, model_path, run_id, mlflow_home):
    """
    Deploy MLflow model to Azure ML.

    NOTE: This command is to be called from correctly initialized Azure ML environment.
         At the moment this means it has to be run from console launched from Azure ML Workbench.
         Caller is reponsible for setting up Azure ML environment and accounts.

    NOTE: Azure ML can not handle any Conda environment. In particular python version seems to be
          fixed. If the model contains Conda environment and it has been trained outside of Azure
          ML, the Conda environment might need to be edited to work with Azure ML.

    :param mlflow_home:
    :param app_name: Name of the deployed application
    :param model_path: Local or MLflow-run-relative path to the model to be exported
    :param run_id: If provided, run_id is used to retrieve the model logged with MLflow.
    """
    if run_id:
        model_path = _get_model_log_dir(model_path, run_id)
    model_path = os.path.abspath(model_path)
    with TempDir(chdr=True, remove_on_exit=True):
        exec_str = _export(app_name, model_path, mlflow_home=mlflow_home)
        eprint("executing", '"{}"'.format(exec_str))
        # Use os.system instead of subprocess due to the fact that currently all azureml commands
        # have to be called within the same shell (launched from azureml workbench app by the user).
        # We can change this once there is a python api (or general cli) available.
        os.system(exec_str)
Exemplo n.º 5
0
def export(output, model_path, run_id, mlflow_home):
    """
    Export MLflow model as Azure ML compatible model ready to be deployed.

    Export MLflow model out with everything needed to deploy on Azure ML.
    Output includes sh script with command to deploy the generated model to Azure ML.

    NOTE: This command does not need Azure ML environment to run.

    NOTE: Azure ML can not handle any Conda environment. If the model contains Conda environment
    and it has been trained outside of Azure ML, the Conda environment might need to be edited.

    :param output: Output folder where the model is going to be exported to.
    :param model_path: Local or MLflow-run-relative path to the model to be exported
    :param run_id: If provided, run_id is used to retrieve model logged with MLflow.
    """
    output = os.path.abspath(output)
    if os.path.exists(output):
        raise Exception("output folder {} already exists".format(output))
    os.mkdir(output)
    if run_id:
        model_path = _get_model_log_dir(model_path, run_id)
    model_path = os.path.abspath(model_path)
    curr_dir = os.path.abspath(os.getcwd())
    os.chdir(output)
    try:
        exec_str = _export("$1", model_path, mlflow_home=mlflow_home)
        with open("create_service.sh", "w") as f:
            f.write("\n".join(
                ["#! /bin/sh", "cd {}".format(output), exec_str, ""]))
    finally:
        os.chdir(curr_dir)
Exemplo n.º 6
0
def deploy(app_name,
           model_path,
           execution_role_arn,
           bucket,
           run_id=None,
           container="mlflow_sage",
           region_name="us-west-2"):  # noqa
    """ Deploy model on sagemaker.

    :param app_name: Name of the deployed app.
    :param path: Path to the model.
    Either local if no run_id or mlflow-relative if run_id is specified)
    :param execution_role_arn: Amazon execution role with sagemaker rights
    :param bucket: S3 bucket where model artifacts are gonna be stored
    :param run_id: mlflow run id.
    :param container: name of the Docker container to be used.
    :return:
    """
    prefix = model_path
    if run_id:
        model_path = _get_model_log_dir(model_path, run_id)
        prefix = run_id + "/" + prefix
    _check_compatible(model_path)
    model_s3_path = _upload_s3(local_model_path=model_path,
                               bucket=bucket,
                               prefix=prefix)
    print('model_s3_path', model_s3_path)
    _deploy(role=execution_role_arn,
            container_name=container,
            app_name=app_name,
            model_s3_path=model_s3_path,
            run_id=run_id,
            region_name=region_name)
Exemplo n.º 7
0
def deploy(app_name, model_path, execution_role_arn, bucket, run_id=None,
           image="mlflow_sage", region_name="us-west-2"):
    """
    Deploy model on Sagemaker.
    Current active AWS account needs to have correct permissions setup.

    :param app_name: Name of the deployed app.
    :param path: Path to the model.
                Either local if no run_id or MLflow-relative if run_id is specified)
    :param execution_role_arn: Amazon execution role with sagemaker rights
    :param bucket: S3 bucket where model artifacts are gonna be stored
    :param run_id: MLflow run id.
    :param image: name of the Docker image to be used.
    :param region_name: Name of the AWS region to deploy to.
    """
    prefix = model_path
    if run_id:
        model_path = _get_model_log_dir(model_path, run_id)
        prefix = run_id + "/" + prefix
    run_id = _check_compatible(model_path)
    model_s3_path = _upload_s3(local_model_path=model_path, bucket=bucket, prefix=prefix)
    _deploy(role=execution_role_arn,
            image=image,
            app_name=app_name,
            model_s3_path=model_s3_path,
            run_id=run_id,
            region_name=region_name)
Exemplo n.º 8
0
def spark_udf(spark, path, run_id=None, result_type="double"):
    """Returns a Spark UDF that can be used to invoke the python-function formatted model.

    Note that parameters passed to the UDF will be forwarded to the model as a DataFrame
    where the names are simply ordinals (0, 1, ...).

    Example:
        predict = mlflow.pyfunc.spark_udf(spark, "/my/local/model")
        df.withColumn("prediction", predict("name", "age")).show()

    Args:
        spark (SparkSession): a SparkSession object
        path (str): A path containing a pyfunc model.
        result_type (str): Spark UDF type returned by the model's prediction method. Default double
    """

    # Scope Spark import to this method so users don't need pyspark to use non-Spark-related
    # functionality.
    from mlflow.pyfunc.spark_model_cache import SparkModelCache
    from pyspark.sql.functions import pandas_udf

    if run_id:
        path = tracking._get_model_log_dir(path, run_id)

    archive_path = SparkModelCache.add_local_model(spark, path)

    def predict(*args):
        model = SparkModelCache.get_or_load(archive_path)
        schema = {str(i): arg for i, arg in enumerate(args)}
        pdf = pandas.DataFrame(schema)
        result = model.predict(pdf)
        return pandas.Series(result)

    return pandas_udf(predict, result_type)
Exemplo n.º 9
0
def run_local(model_path, run_id=None, port=5000, image=DEFAULT_IMAGE_NAME):
    """
    Serve model locally in a SageMaker compatible Docker container.
    :param model_path:  Path to the model.
    Either local if no run_id or MLflow-relative if run_id is specified)
    :param run_id: MLflow RUN-ID.
    :param port: local port
    :param image: name of the Docker image to be used.
    """
    if run_id:
        model_path = _get_model_log_dir(model_path, run_id)
    _check_compatible(model_path)
    model_path = os.path.abspath(model_path)
    eprint("launching docker image with path {}".format(model_path))
    cmd = ["docker", "run", "-v", "{}:/opt/ml/model/".format(model_path), "-p", "%d:8080" % port,
           "--rm", image, "serve"]
    eprint('executing', ' '.join(cmd))
    proc = Popen(cmd, stdout=PIPE, stderr=STDOUT, universal_newlines=True)

    def _sigterm_handler(*_):
        eprint("received termination signal => killing docker process")
        proc.send_signal(signal.SIGINT)

    import signal
    signal.signal(signal.SIGTERM, _sigterm_handler)
    for x in iter(proc.stdout.readline, ""):
        eprint(x, end='')
Exemplo n.º 10
0
def _load_model_conf(path, run_id=None):
    """Load a model configuration stored in Python function format."""
    if run_id:
        path = tracking._get_model_log_dir(path, run_id)
    conf_path = os.path.join(path, "MLmodel")
    model = Model.load(conf_path)
    if FLAVOR_NAME not in model.flavors:
        raise Exception("Format '{format}' not found not in {path}.".format(
            format=FLAVOR_NAME, path=conf_path))
    return model.flavors[FLAVOR_NAME]
Exemplo n.º 11
0
def serve(model_path, run_id, port):
    """
    Serve a PythonFunction model saved with MLflow.

    If a run_id is specified, MODEL_PATH is treated as an artifact path within that run;
    otherwise it is treated as a local path.
    """
    if run_id:
        model_path = _get_model_log_dir(model_path, run_id)
    app = scoring_server.init(load_pyfunc(model_path))
    app.run(port=port)
Exemplo n.º 12
0
def load_pyfunc(path, run_id=None):
    """Load a model stored in Python function format."""
    if run_id:
        path = tracking._get_model_log_dir(path, run_id)
    conf_path = os.path.join(path, "MLmodel")
    model = Model.load(conf_path)
    if FLAVOR_NAME not in model.flavors:
        raise Exception("Format '{format}' not found not in {path}.".format(
            format=FLAVOR_NAME, path=conf_path))
    conf = model.flavors[FLAVOR_NAME]
    if CODE in conf and conf[CODE]:
        code_path = os.path.join(path, conf[CODE])
        sys.path = [code_path] + _get_code_dirs(code_path) + sys.path
    data_path = os.path.join(path, conf[DATA]) if (DATA in conf) else path
    return importlib.import_module(conf[MAIN]).load_pyfunc(data_path)
Exemplo n.º 13
0
def deploy(app_name,
           model_path,
           execution_role_arn=None,
           bucket=None,
           run_id=None,
           image_url=None,
           region_name="us-west-2"):
    """
    Deploy model on SageMaker.
    Current active AWS account needs to have correct permissions setup.

    :param app_name: Name of the deployed app.
    :param path: Path to the model.
                 Either local if no run_id or MLflow-relative if run_id is specified)
    :param execution_role_arn: Amazon execution role with sagemaker rights. defaults
                               to the currently-assumed role.
    :param bucket: S3 bucket where model artifacts will be stored. defaults to a
                   SageMaker-compatible bucket name.
    :param run_id: MLflow run id.
    :param image: name of the Docker image to be used. if not specified, uses a
                  publicly-available pre-built image.
    :param region_name: Name of the AWS region to deploy to.
    """
    if not image_url:
        image_url = _get_default_image_url()

    if not execution_role_arn:
        execution_role_arn = _get_assumed_role_arn()

    if not bucket:
        eprint("No model data bucket specified, using the default bucket")
        bucket = _get_default_s3_bucket(region_name)

    prefix = model_path
    if run_id:
        model_path = _get_model_log_dir(model_path, run_id)
        prefix = os.path.join(run_id, prefix)
    run_id = _check_compatible(model_path)

    model_s3_path = _upload_s3(local_model_path=model_path,
                               bucket=bucket,
                               prefix=prefix)
    _deploy(role=execution_role_arn,
            image_url=image_url,
            app_name=app_name,
            model_s3_path=model_s3_path,
            run_id=run_id,
            region_name=region_name)
Exemplo n.º 14
0
def predict(model_path, run_id, input_path, output_path):
    """
    Loads a Pandas DataFrame and runs a PythonFunction model saved with MLflow against it.
    This method will return the prediction results as a CSV-formatted Pandas DataFrame.

    If a run_id is specified, MODEL_PATH is treated as an artifact path within that run;
    otherwise it is treated as a local path.
    """
    if run_id:
        model_path = _get_model_log_dir(model_path, run_id)
    model = load_pyfunc(model_path)
    df = pandas.read_csv(input_path)
    result = model.predict(df)
    out_stream = sys.stdout
    if output_path:
        out_stream = open(output_path, 'w')
    pandas.DataFrame(data=result).to_csv(out_stream, header=False, index=False)
Exemplo n.º 15
0
def serve(model_path, run_id, port, no_conda):
    """
    Serve a PythonFunction model saved with MLflow.

    If a ``run_id`` is specified, ``model-path`` is treated as an artifact path within that run;
    otherwise it is treated as a local path.
    """
    if run_id:
        model_path = _get_model_log_dir(model_path, run_id)

    model_env_file = _load_model_env(model_path)
    if not no_conda and model_env_file is not None:
        conda_env_path = os.path.join(model_path, model_env_file)
        return _rerun_in_conda(conda_env_path)

    app = scoring_server.init(load_pyfunc(model_path))
    app.run(port=port)
Exemplo n.º 16
0
def spark_udf(spark, path, run_id=None, result_type="double"):
    """
    Return a Spark UDF that can be used to invoke the Python function formatted model.

    Parameters passed to the UDF are forwarded to the model as a DataFrame where the names are
    simply ordinals (0, 1, ...).

    Example:

    .. code:: python

        predict = mlflow.pyfunc.spark_udf(spark, "/my/local/model")
        df.withColumn("prediction", predict("name", "age")).show()

    :param spark: A SparkSession object.
    :param path: A path containing a pyfunc model.
    :param run_id: ID of the run that produced this model. If provided, ``run_id`` is used to
                   retrieve the model logged with MLflow.
    :param result_type: Spark UDF type returned by the model's prediction method. Default double.

    """

    # Scope Spark import to this method so users don't need pyspark to use non-Spark-related
    # functionality.
    from mlflow.pyfunc.spark_model_cache import SparkModelCache
    from pyspark.sql.functions import pandas_udf

    if run_id:
        path = tracking._get_model_log_dir(path, run_id)

    archive_path = SparkModelCache.add_local_model(spark, path)

    def predict(*args):
        model = SparkModelCache.get_or_load(archive_path)
        schema = {str(i): arg for i, arg in enumerate(args)}
        # Explicitly pass order of columns to avoid lexicographic ordering (i.e., 10 < 2)
        columns = [str(i) for i, _ in enumerate(args)]
        pdf = pandas.DataFrame(schema, columns=columns)
        result = model.predict(pdf)
        return pandas.Series(result)

    return pandas_udf(predict, result_type)
Exemplo n.º 17
0
def load_pyfunc(path, run_id=None, suppress_warnings=False):
    """
    Load a model stored in Python function format.

    :param suppress_warnings: If True, non-fatal warning messages associated with the model
                              loading process will be suppressed. If False, these warning messages
                              will be emitted.
    """
    if run_id:
        path = tracking._get_model_log_dir(path, run_id)
    conf = _load_model_conf(path)
    model_py_version = conf.get(PY_VERSION)
    if not suppress_warnings:
        _warn_potentially_incompatible_py_version_if_necessary(
            model_py_version=model_py_version)
    if CODE in conf and conf[CODE]:
        code_path = os.path.join(path, conf[CODE])
        sys.path = [code_path] + _get_code_dirs(code_path) + sys.path
    data_path = os.path.join(path, conf[DATA]) if (DATA in conf) else path
    return importlib.import_module(conf[MAIN]).load_pyfunc(data_path)
Exemplo n.º 18
0
def predict(model_path, run_id, input_path, output_path, no_conda):
    """
    Load a pandas DataFrame and runs a PythonFunction model saved with MLflow against it.
    Return the prediction results as a CSV-formatted pandas DataFrame.

    If a ``run-id`` is specified, ``model-path`` is treated as an artifact path within that run;
    otherwise it is treated as a local path.
    """
    if run_id:
        model_path = _get_model_log_dir(model_path, run_id)

    model_env_file = _load_model_env(model_path)
    if not no_conda and model_env_file is not None:
        conda_env_path = os.path.join(model_path, model_env_file)
        return _rerun_in_conda(conda_env_path)

    model = load_pyfunc(model_path)
    df = pandas.read_csv(input_path)
    result = model.predict(df)
    out_stream = sys.stdout
    if output_path:
        out_stream = open(output_path, 'w')
    pandas.DataFrame(data=result).to_csv(out_stream, header=False, index=False)
Exemplo n.º 19
0
def run_local(model_path, run_id=None, port=5000, container="mlflow_sage"):
    """
    Serve model locally in a sagemaker compatible docker container.
    :param path:  Path to the model.
    Either local if no run_id or mlflow-relative if run_id is specified)
    :param run_id: mlflow run id.
    :param port: local port
    :param container: name of the Docker container to be used.
    :return:
    """
    if run_id:
        model_path = _get_model_log_dir(model_path, run_id)
    _check_compatible(model_path)
    model_path = os.path.abspath(model_path)
    print("launching docker container with path {}".format(model_path))
    proc = Popen([
        "docker", "run", "-v", "{}:/opt/ml/model/".format(model_path), "-p",
        "%d:8080" % port, "--rm", container, "serve"
    ],
                 stdout=PIPE,
                 stderr=STDOUT,
                 universal_newlines=True)
    for x in iter(proc.stdout.readline, ""):
        print(x, end='', flush=True)
Exemplo n.º 20
0
def deploy(app_name, model_path, run_id):
    """Deploy MLflow model to Azure ML.

    This command will export MLflow model into Azure ML compatible format and create a service
    models this model.

    NOTE: This command is to be called from correctly initialized Azure ML environment.
         At the moment this means it has to be run from console launched from Azure ML Workbench.
         Caller is reponsible for setting up Azure ML environment and accounts.

    NOTE: Azure ML can not handle any Conda environment. In particular python version seems to be
          fixed. If the model contains Conda environment and it has been trained outside of Azure
          ML, the Conda environment might need to be edited to work with Azure ML.
    """
    if run_id:
        model_path = _get_model_log_dir(model_path, run_id)
    model_path = os.path.abspath(model_path)
    with TempDir(chdr=True, remove_on_exit=True):
        exec_str = _export(app_name, model_path, "model")
        print("executing", '"{}"'.format(exec_str))
        # Use os.system instead of subprocess due to the fact that currently all azureml commands
        # have to be called within the same shell (launched from azureml workbench app by the user).
        # We can change this once there is a python api (or general cli) available.
        os.system(exec_str)
Exemplo n.º 21
0
def deploy(app_name,
           model_path,
           execution_role_arn=None,
           bucket=None,
           run_id=None,
           image_url=None,
           region_name="us-west-2",
           mode=DEPLOYMENT_MODE_CREATE,
           archive=False):
    """
    Deploy model on SageMaker.
    Current active AWS account needs to have correct permissions setup.

    :param app_name: Name of the deployed application.
    :param path: Path to the model.
                 Either local if no run_id or MLflow-relative if run_id is specified)
    :param execution_role_arn: Amazon execution role with sagemaker rights. defaults
                               to the currently-assumed role.
    :param bucket: S3 bucket where model artifacts will be stored. defaults to a
                   SageMaker-compatible bucket name.
    :param run_id: MLflow run id.
    :param image: name of the Docker image to be used. if not specified, uses a
                  publicly-available pre-built image.
    :param region_name: Name of the AWS region to which to deploy the application.
    :param mode: The mode in which to deploy the application. Must be one of the following:

                 mlflow.sagemaker.DEPLOYMENT_MODE_CREATE: Creates an application with the specified
                     name and model. This will fail if an application of the same name already
                     exists.

                 mlflow.sagemaker.DEPLOYMENT_MODE_REPLACE: If an application of the specified name
                     exists, its model(s) will be replaced with the specified model. If no such
                     application exists, it will be created with the specified name and model.

                 mlflow.sagemaker.DEPLOYMENT_MODE_ADD: Adds the specified model to a pre-existing
                     application with the specified name, if one exists. If the application does
                     not exist,  a new application will be created with the specified name and
                     model. NOTE: **If the application already exists**, the specified model will
                     be added to the application's corresponding SageMaker endpoint with an initial
                     weight of zero (0). To route traffic to the model, update the application's
                     associated endpoint configuration  using either the AWS console or the
                     `UpdateEndpointWeightsAndCapacities` function defined in the SageMaker API
                     Documentation
                     (https://docs.aws.amazon.com/sagemaker/latest/dg/
                     API_UpdateEndpointWeightsAndCapacities.html).

    :param archive: If True, any pre-existing SageMaker application resources that become inactive
                    (i.e. as a result of deploying in mlflow.sagemaker.DEPLOYMENT_MODE_REPLACE mode)
                    will be preserved. If False, these resources will be deleted.
    """
    if mode not in DEPLOYMENT_MODES:
        raise ValueError("`mode` must be one of: {mds}".format(
            mds=",".join(DEPLOYMENT_MODES)))

    if not image_url:
        image_url = _get_default_image_url()

    if not execution_role_arn:
        execution_role_arn = _get_assumed_role_arn()

    if not bucket:
        eprint("No model data bucket specified, using the default bucket")
        bucket = _get_default_s3_bucket(region_name)

    prefix = model_path
    if run_id:
        model_path = _get_model_log_dir(model_path, run_id)
        prefix = os.path.join(run_id, prefix)
    run_id = _check_compatible(model_path)

    model_s3_path = _upload_s3(local_model_path=model_path,
                               bucket=bucket,
                               prefix=prefix)
    _deploy(role=execution_role_arn,
            image_url=image_url,
            app_name=app_name,
            model_s3_path=model_s3_path,
            run_id=run_id,
            region_name=region_name,
            mode=mode,
            archive=archive)
Exemplo n.º 22
0
def deploy(app_name,
           model_path,
           execution_role_arn=None,
           bucket=None,
           run_id=None,
           image_url=None,
           region_name="us-west-2",
           mode=DEPLOYMENT_MODE_CREATE,
           archive=False,
           instance_type=DEFAULT_SAGEMAKER_INSTANCE_TYPE,
           instance_count=DEFAULT_SAGEMAKER_INSTANCE_COUNT):
    """
    Deploy model on SageMaker.
    Currently active AWS account needs to have correct permissions set up.

    :param app_name: Name of the deployed application.
    :param path: Path to the model. Either local if no ``run_id`` or MLflow-relative if ``run_id``
        is specified.
    :param execution_role_arn: Amazon execution role with SageMaker rights.
        Defaults to the currently-assumed role.
    :param bucket: S3 bucket where model artifacts will be stored. Defaults to a
        SageMaker-compatible bucket name.
    :param run_id: MLflow run ID.
    :param image: Name of the Docker image to be used. if not specified, uses a
        publicly-available pre-built image.
    :param region_name: Name of the AWS region to which to deploy the application.
    :param mode: The mode in which to deploy the application. Must be one of the following:

        - ``mlflow.sagemaker.DEPLOYMENT_MODE_CREATE``:
          Create an application with the specified name and model. This fails if an
          application of the same name already exists.

        -  ``mlflow.sagemaker.DEPLOYMENT_MODE_REPLACE``:
           If an application of the specified name exists, its model(s) is replaced with
           the specified model. If no such application exists, it is created with the
           specified name and model.

        - ``mlflow.sagemaker.DEPLOYMENT_MODE_ADD``:
          Add the specified model to a pre-existing application with the specified name,
          if one exists. If the application does not exist,  a new application is created
          with the specified name and model. NOTE: If the application **already exists**,
          the specified model is added to the application's corresponding SageMaker
          endpoint with an initial weight of zero (0). To route traffic to the model,
          update the application's associated endpoint configuration using either the
          AWS console or the ``UpdateEndpointWeightsAndCapacities`` function defined in
          https://docs.aws.amazon.com/sagemaker/latest/dg/API_UpdateEndpointWeightsAndCapacities.html.

    :param archive: If True, any pre-existing SageMaker application resources that become inactive
        (i.e. as a result of deploying in ``mlflow.sagemaker.DEPLOYMENT_MODE_REPLACE``
        mode) are preserved. If False, these resources are deleted.
    :param instance_type: The type of SageMaker ML instance on which to deploy the model. For a list
        of supported instance types, see
        `Amazon SageMaker ML Instance Types
        <https://aws.amazon.com/sagemaker/pricing/instance-types/>`_.
    :param instance_count: The number of SageMaker ML instances on which to deploy the model.
    """
    if mode not in DEPLOYMENT_MODES:
        raise ValueError("`mode` must be one of: {mds}".format(
            mds=",".join(DEPLOYMENT_MODES)))

    if not image_url:
        image_url = _get_default_image_url()

    if not execution_role_arn:
        execution_role_arn = _get_assumed_role_arn()

    if not bucket:
        eprint("No model data bucket specified, using the default bucket")
        bucket = _get_default_s3_bucket(region_name)

    prefix = model_path
    if run_id:
        model_path = _get_model_log_dir(model_path, run_id)
        prefix = os.path.join(run_id, prefix)
    run_id = _check_compatible(model_path)

    model_s3_path = _upload_s3(local_model_path=model_path,
                               bucket=bucket,
                               prefix=prefix)
    _deploy(role=execution_role_arn,
            image_url=image_url,
            app_name=app_name,
            model_s3_path=model_s3_path,
            run_id=run_id,
            region_name=region_name,
            mode=mode,
            archive=archive,
            instance_type=instance_type,
            instance_count=instance_count)