def init():
    global model
    # note here "best_model" is the name of the model registered under the workspace
    # this call should return the path to the model.pkl file on the local disk.
    model_path = Model.get_model_path(model_name='best_model')
    # deserialize the model file back into a sklearn model
    model = joblib.load(model_path)
def main(_):
    # start_time = datetime.datetime.now()
    label_file_name = os.path.join(args.label_dir, "labels.txt")
    label_dict = get_class_label_dict(label_file_name)
    classes_num = len(label_dict)
    test_feeder = DataIterator(data_dir=args.dataset_path)
    total_size = len(test_feeder.labels)
    count = 0
    # get model from model registry
    model_path = Model.get_model_path(args.model_name)
    with tf.Session() as sess:
        test_images = test_feeder.input_pipeline(batch_size=args.batch_size)
        with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
            input_images = tf.placeholder(tf.float32, [args.batch_size, image_size, image_size, num_channel])
            logits, _ = inception_v3.inception_v3(input_images,
                                                  num_classes=classes_num,
                                                  is_training=False)
            probabilities = tf.argmax(logits, 1)

        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        saver = tf.train.Saver()
        saver.restore(sess, model_path)
        out_filename = os.path.join(args.output_dir, "result-labels.txt")
        with open(out_filename, "w") as result_file:
            i = 0
            while count < total_size and not coord.should_stop():
                test_images_batch = sess.run(test_images)
                file_names_batch = test_feeder.file_paths[i * args.batch_size:
                                                          min(test_feeder.size, (i + 1) * args.batch_size)]
                results = sess.run(probabilities, feed_dict={input_images: test_images_batch})
                new_add = min(args.batch_size, total_size - count)
                count += new_add
                i += 1
                for j in range(new_add):
                    result_file.write(os.path.basename(file_names_batch[j]) + ": " + label_dict[results[j]] + "\n")
                result_file.flush()
            coord.request_stop()
            coord.join(threads)

        # copy the file to artifacts
        shutil.copy(out_filename, "./outputs/")
예제 #3
0
def init():
    global model
    # retrieve the path to the model file using the model name
    model_path = Model.get_model_path('sklearn_mnist')
    model = joblib.load(model_path)
예제 #4
0
TENANT_ID = os.environ.get('TENANT_ID')
APP_ID = os.environ.get('APP_ID')
APP_SECRET = os.environ.get('APP_SECRET')
MODEL_PATH = os.environ.get('MODEL_PATH')
MODEL_NAME = os.environ.get('MODEL_NAME')
WORKSPACE_NAME = os.environ.get('WORKSPACE_NAME')
SUBSCRIPTION_ID = os.environ.get('SUBSCRIPTION_ID')
RESOURCE_GROUP = os.environ.get('RESOURCE_GROUP')

if os.path.isfile(MODEL_PATH) is False:
    print("The given model path %s is invalid" % (MODEL_PATH))
    sys.exit(1)

SP_AUTH = ServicePrincipalAuthentication(tenant_id=TENANT_ID,
                                         service_principal_id=APP_ID,
                                         service_principal_password=APP_SECRET)

WORKSPACE = Workspace.get(WORKSPACE_NAME, SP_AUTH, SUBSCRIPTION_ID,
                          RESOURCE_GROUP)

try:
    MODEL = Model.register(model_path=MODEL_PATH,
                           model_name=MODEL_NAME,
                           description="Forecasting Model",
                           workspace=WORKSPACE)

    print("Model registered successfully. ID: " + MODEL.id)
except Exception as caught_error:
    print("Error while registering the model: " + str(caught_error))
    sys.exit(1)
예제 #5
0
# Paramaterize the matrics on which the models should be compared

# Add golden data set on which all the model performance can be evaluated

# Get the latest run_id
with open("aml_config/run_id.json") as f:
    config = json.load(f)

new_model_run_id = config["run_id"]
experiment_name = config["experiment_name"]
exp = Experiment(workspace=ws, name=experiment_name)

try:
    # Get most recently registered model, we assume that is the model in production. Download this model and compare it with the recently trained model by running test with same data set.
    model_list = Model.list(ws)
    production_model = next(
        filter(
            lambda x: x.created_time == max(model.created_time
                                            for model in model_list),
            model_list))
    production_model_run_id = production_model.tags.get('run_id')
    run_list = exp.get_runs()
    # production_model_run = next(filter(lambda x: x.id == production_model_run_id, run_list))

    # Get the run history for both production model and newly trained model and compare mse
    production_model_run = Run(exp, run_id=production_model_run_id)
    new_model_run = Run(exp, run_id=new_model_run_id)

    production_model_mse = production_model_run.get_metrics().get('mse')
    new_model_mse = new_model_run.get_metrics().get('mse')
예제 #6
0
        memory_gb=1,
        tags={
            "data": "MNIST",
            "method": "pytorch"
        },
        description='Predict MNIST with pytorch')
    return aciconfig


def deploy(aciconfig, envfile, name, model):
    # configure the image
    image_config = ContainerImage.image_configuration(
        execution_script="./score.py", runtime="python", conda_file=envfile)

    service = Webservice.deploy_from_model(workspace=ws,
                                           name=name,
                                           deployment_config=aciconfig,
                                           models=[model],
                                           image_config=image_config)

    service.wait_for_deployment(show_output=True)

    print(service.scoring_uri)


if __name__ == '__main__':
    name = "pytorch-mnist-svc"
    model = Model(ws, 'pytorch')
    envfile = create_env()
    aci_config = create_config()
    deploy(aci_config, envfile, name, model)
ws = Workspace.from_config()

# Get the latest model details

try:
    with open("aml_config/model.json") as f:
        config = json.load(f)
except:
    print('No new model to register thus no need to create new scoring image')
    #raise Exception('No new model to register as production model perform better')
    sys.exit(0)

model_name = config['model_name']
model_version = config['model_version']

model_list = Model.list(workspace=ws)
model, = (m for m in model_list
          if m.version == model_version and m.name == model_name)
print('Model picked: {} \nModel Description: {} \nModel Version: {}'.format(
    model.name, model.description, model.version))

os.chdir('./devops/code/scoring')
image_name = "predmaintenance-model-score"

image_config = ContainerImage.image_configuration(
    execution_script="score.py",
    runtime="python-slim",
    conda_file="conda_dependencies.yml",
    description="Image with predictive maintenance model",
    tags={
        'area': "diabetes",
예제 #8
0
#%%
#Initialize Workspace
from azureml.core import Workspace
ws = Workspace.from_config() #from_config() pulls up config file in directory and finds details of ML workspace

print('workspace information:')
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')

#%% [markdown]
# ## Register Model

#%%
from azureml.core.model import Model
print(str(current_dir))
print("Model path : "+ str(cfg.MODEL_PATH))
converted_model = Model(ws, name=cfg.CONVERTED_MODEL_NAME)

print('model information:')
print(converted_model.name, converted_model.url, converted_model.version, converted_model.id, converted_model.created_time)


#%% [markdown]
# ## Build Container Image

#%%
from azureml.core.image import Image
from azureml.contrib.iot import IotContainerImage

print ('Start to create a container image ...')

# Change working directory to the main.py location
예제 #9
0
# Define model and service names
service_name = 'object-recognition-service'
model_name = 'object-recognition-pipeline'

# Get run context
run = Run.get_context()
workspace = run.experiment.workspace

# Read accuracy
with open(accuracy_file) as f:
    accuracy = f.read()

# Register model if accuracy is higher or if test dataset has changed
new_model = False
try:
    model = Model(workspace, model_name)
    prev_accuracy = model.tags['accuracy']
    prev_test_dir = model.tags['test_data']
    if prev_test_dir != test_dir or prev_accuracy >= accuracy:
        model = register_model(model_dir, model_name, accuracy, test_dir, workspace)
        new_model = True
except WebserviceException:
    print('Model does not exist yet')
    model = register_model(model_dir, model_name, accuracy, test_dir, workspace)
    new_model = True

# Deploy new webservice if new model was registered
if new_model:
    # Create inference config
    inference_config = InferenceConfig(
        source_directory = '.',
from azureml.core.model import InferenceConfig
from azureml.core.environment import Environment
from azureml.core import Workspace
from azureml.core.model import Model
from azureml.core.webservice import AciWebservice

ws = Workspace.from_config(path='./.azureml', _file_name='config.json')
model = Model(ws, name='iris_model', version=1)

env = Environment.from_conda_specification(
    name='sklearn-aml-env', file_path='./.azureml/sklearn-env-aml.yml')

inference_config = InferenceConfig(entry_script="./src/score.py",
                                   environment=env)

deployment_config = AciWebservice.deploy_configuration(cpu_cores=1,
                                                       memory_gb=1)

aci_service = Model.deploy(workspace=ws,
                           name='iris-model-service',
                           models=[model],
                           inference_config=inference_config,
                           deployment_config=deployment_config)

aci_service.wait_for_deployment(show_output=True)
print(aci_service.state)
def load_and_register_model_from_local_file(workspace, local_model_path,
                                            azure_model_name):
    return Model.register(model_path=local_model_path,
                          model_name=azure_model_name,
                          workspace=workspace)
예제 #12
0
def init():
    global model
    model_path = Model.get_model_path('LookUpModel')
    model = joblib.load(model_path)
예제 #13
0
from azureml.core.model import Model
from azureml.core.model import InferenceConfig
from azureml.core.webservice import AciWebservice, Webservice
from azureml.exceptions import WebserviceException
import json

# Initialize Workspace
ws = Workspace.from_config('aml_config')
print("--- WORKSPACE: {} - Initialized ---\n".format(ws.name))

# Register Model
model = Model.register(
    model_path="./DEPLOY/deploy_to_cloud/sklearn_regression_model.pkl",
    model_name="sklearn_regression_model.pkl",
    tags={
        'area': "diabetes",
        'type': "regression"
    },
    description="Ridge regression model to predict diabetes",
    workspace=ws)
print("--- MODEL: {} - Registered ---\n".format(model.name))

# Inference Configuration
inference_config = InferenceConfig(
    runtime="python",
    entry_script="./DEPLOY/deploy_to_cloud/score.py",
    conda_file="./DEPLOY/deploy_to_cloud/myenv.yml",
    extra_docker_file_steps="./DEPLOY/deploy_to_cloud/helloworld.txt")
print("--- IMAGE CONFIG - Created ---\n")

# Deploy as a Webservice
예제 #14
0
from azureml.core import Workspace
from azureml.core.model import InferenceConfig, Model
from azureml.core.webservice import AciWebservice  # noqa: E401
from azureml.core.webservice import LocalWebservice
from config_deepensemble_1 import CONFIG

from constants import REPO_DIR

from cgmml.common.model_utils import environment

if __name__ == "__main__":

    workspace = Workspace.from_config()
    model = Model(workspace, name=CONFIG.MODEL_NAME)

    cgm_env = environment.cgm_environment(workspace=workspace,
                                          curated_env_name="cgm-env",
                                          env_exist=True)

    inference_config_aci = InferenceConfig(
        environment=cgm_env,
        entry_script=str(REPO_DIR /
                         "cgmml/common/endpoints/entry_script_aci.py"),
    )

    if CONFIG.LOCALTEST:
        deployment_config = LocalWebservice.deploy_configuration(port=6789)
    else:
        deployment_config = AciWebservice.deploy_configuration(cpu_cores=1,
                                                               memory_gb=4)
def trigger_training_job():

    # Define Vars < Change the vars>.
    # In a production situation, don't put secrets in source code, but as secret variables,
    # see https://docs.microsoft.com/en-us/azure/devops/pipelines/process/variables?view=azure-devops&tabs=yaml%2Cbatch#secret-variables
    workspace = sys.argv[1]
    subscription_id = sys.argv[2]
    resource_grp = sys.argv[3]

    domain = sys.argv[4]
    dbr_pat_token_raw = sys.argv[5]

    DBR_PAT_TOKEN = bytes(dbr_pat_token_raw, encoding='utf-8')  # adding b'
    notebookRemote = "/3_IncomeNotebookDevops"
    experiment_name = "experiment_model_release"
    model_name_run = datetime.datetime.now().strftime(
        "%Y%m%d%H%M%S"
    ) + "_dbrmod.mml"  # in case you want to change the name, keep the .mml extension
    model_name = "databricksmodel.mml"  # in case you want to change the name, keep the .mml extension
    db_compute_name = "dbr-amls-comp"

    #
    # Step 1: Run notebook using Databricks Compute in AML SDK
    #
    cli_auth = AzureCliAuthentication()

    ws = Workspace(workspace_name=workspace,
                   subscription_id=subscription_id,
                   resource_group=resource_grp,
                   auth=cli_auth)
    ws.get_details()

    #
    # Step 2: Create job and attach it to cluster
    #
    # In this steps, secret are added as parameters (spn_tenant, spn_clientid, spn_clientsecret)
    # Never do this in a production situation, but use secret scope backed by key vault instead
    # See https://docs.azuredatabricks.net/user-guide/secrets/secret-scopes.html#azure-key-vault-backed-scopes
    response = requests.post(
        'https://%s/api/2.0/jobs/create' % domain,
        headers={'Authorization': b"Bearer " + DBR_PAT_TOKEN},
        json={
            "name": "Run AzureDevopsNotebook Job",
            "new_cluster": {
                "spark_version": "4.0.x-scala2.11",
                "node_type_id": "Standard_D3_v2",
                "spark_env_vars": {
                    'PYSPARK_PYTHON': '/databricks/python3/bin/python3',
                },
                "autoscale": {
                    "min_workers": 1,
                    "max_workers": 2
                }
            },
            "libraries": [{
                "pypi": {
                    "package": "azureml-sdk[databricks]"
                }
            }],
            "notebook_task": {
                "notebook_path":
                notebookRemote,
                "base_parameters": [{
                    "key": "subscription_id",
                    "value": subscription_id
                }, {
                    "key": "resource_group",
                    "value": resource_grp
                }, {
                    "key": "workspace_name",
                    "value": workspace
                }, {
                    "key": "model_name",
                    "value": model_name_run
                }]
            }
        })

    if response.status_code != 200:
        print("Error launching cluster: %s: %s" %
              (response.json()["error_code"], response.json()["message"]))
        exit(2)

    #
    # Step 3: Start job
    #
    databricks_job_id = response.json()['job_id']

    response = requests.post(
        'https://%s/api/2.0/jobs/run-now' % domain,
        headers={'Authorization': b"Bearer " + DBR_PAT_TOKEN},
        json={"job_id": +databricks_job_id})

    if response.status_code != 200:
        print("Error launching cluster: %s: %s" %
              (response.json()["error_code"], response.json()["message"]))
        exit(3)

    print(response.json()['run_id'])

    #
    # Step 4: Wait until job is finished
    #
    databricks_run_id = response.json()['run_id']
    scriptRun = 1
    count = 0
    while scriptRun == 1:
        response = requests.get(
            'https://%s/api/2.0/jobs/runs/get?run_id=%s' %
            (domain, databricks_run_id),
            headers={'Authorization': b"Bearer " + DBR_PAT_TOKEN},
        )

        state = response.json()['state']
        life_cycle_state = state['life_cycle_state']
        print(state)

        if life_cycle_state in ["TERMINATED", "SKIPPED", "INTERNAL_ERROR"]:
            result_state = state['result_state']
            if result_state == "SUCCESS":
                print("run ok")
                scriptRun = 0
            #exit(0)
            else:
                exit(4)
        elif count > 180:
            print("time out occurred after 30 minutes")
            exit(5)
        else:
            count += 1
            time.sleep(30)  # wait 30 seconds before next status update

    #
    # Step 5: Retrieve model from dbfs
    #
    mdl, ext = model_name_run.split(".")
    model_zip_run = mdl + ".zip"

    response = requests.get(
        'https://%s/api/2.0/dbfs/read?path=/%s' % (domain, model_zip_run),
        headers={'Authorization': b"Bearer " + DBR_PAT_TOKEN})
    if response.status_code != 200:
        print("Error copying dbfs results: %s: %s" %
              (response.json()["error_code"], response.json()["message"]))
        exit(1)

    model_output = base64.b64decode(response.json()['data'])

    # download model in deploy folder
    os.chdir("deploy")
    with open(model_zip_run, "wb") as outfile:
        outfile.write(model_output)
    print("Downloaded model {} to Project root directory".format(model_name))

    #
    # Step 6: Retrieve model metrics from dbfs
    #
    mdl, ext = model_name_run.split(".")
    model_metrics_json_run = mdl + "_metrics.json"

    response = requests.get(
        'https://%s/api/2.0/dbfs/read?path=/%s' %
        (domain, model_metrics_json_run),
        headers={'Authorization': b"Bearer " + DBR_PAT_TOKEN})
    if response.status_code != 200:
        print("Error copying dbfs results: %s: %s" %
              (response.json()["error_code"], response.json()["message"]))
        exit(2)

    model_metrics_output = json.loads(base64.b64decode(
        response.json()['data']))

    #
    # Step 7: Put model and metrics to Azure ML Service
    #

    # start a training run by defining an experiment
    myexperiment = Experiment(ws, experiment_name)
    run = myexperiment.start_logging()
    run.upload_file("outputs/" + model_zip_run, model_zip_run)

    #run.log("pipeline_run", pipeline_run.id)
    run.log("au_roc", model_metrics_output["Area_Under_ROC"])
    run.log("au_prc", model_metrics_output["Area_Under_PR"])
    run.log("truePostive", model_metrics_output["True_Positives"])
    run.log("falsePostive", model_metrics_output["False_Positives"])
    run.log("trueNegative", model_metrics_output["True_Negatives"])
    run.log("falseNegative", model_metrics_output["False_Negatives"])

    run.complete()
    run_id = run.id
    print("run id:", run_id)

    # unzip file to model_name_run
    shutil.unpack_archive(model_zip_run, model_name_run)

    model = Model.register(
        model_path=model_name_run,  # this points to a local file
        model_name=model_name,  # this is the name the model is registered as
        tags={
            "area": "spar",
            "type": "regression",
            "run_id": run_id
        },
        description="Medium blog test model",
        workspace=ws,
    )
    print("Model registered: {} \nModel Description: {} \nModel Version: {}".
          format(model.name, model.description, model.version))

    # Step 8. Finally, writing the registered model details to conf/model.json
    model_json = {}
    model_json["model_name"] = model.name
    model_json["model_version"] = model.version
    model_json["run_id"] = run_id
    model_json["model_name_run"] = model_name_run
    with open("../conf/model.json", "w") as outfile:
        json.dump(model_json, outfile)
예제 #16
0
def init():
    global model
    # this name is model.id of model that we want to deploy
    model_path = Model.get_model_path(model_name='mlopsclassifier')
    # deserialize the model file back into a sklearn model
    model = joblib.load(model_path)
예제 #17
0
from azureml.core import Workspace
from azureml.core.model import Model

from lab4_mk1 import connectToWs

model_name = "poly-sorted-regression-2"
endpoint_name = "poly-sorted-regression-2-ep"

ws = connectToWs.getWorkspace()

# Locate the model in the workspace
model = Model(ws, name=model_name)

# Deploy the model as a real-time endpoint
service = Model.deploy(ws, endpoint_name, [model])

# Wait for the model deployment to complete
service.wait_for_deployment(show_output=True)
예제 #18
0
import azureml.core
from azureml.core.authentication import ServicePrincipalAuthentication

ws = Workspace(workspace_name=workspace,
               subscription_id=subscription_id,
               resource_group=resource_grp)

ws.get_details()

# COMMAND ----------

# MAGIC %md ##### 1b. Retrieve best model from Azure ML Service

# COMMAND ----------

model = Model(ws, par_model_name)
model_list = Model.list(workspace=ws)
print("Model picked: {} \nModel Description: {} \nModel Version: {}".format(
    model.name, model.description, model.version))

# COMMAND ----------

# MAGIC %md ##### 1c. Create score file (script that will be used in endpoint to consume png) and conda env

# COMMAND ----------

#%%writefile score_deeplearning.py
score_deeplearning = """

import json
예제 #19
0
파일: score.py 프로젝트: dg1223/ML-pipeline
def init():
    global model
    model_path = Model.get_model_path(model_name='svm_mldevcredit_model.pkl')
    model = joblib.load(model_path)
def _get_list_of_models(ws, name=None):
    return Model.list(workspace=ws, name=name)
예제 #21
0
model_name = args.model_name
print('model_name:', model_name)
run.log('model_name', model_name)

datatrain_output = args.datatrain_output
print('datatrain_output:', datatrain_output)
run.log('datatrain_output', datatrain_output)

modelregistration_output = args.modelregistration_output
print('modelregistration_output:', modelregistration_output)
run.log('modelregistration_output', modelregistration_output)

is_directory = args.is_directory
print('is_directory:', is_directory)
run.log('is_directory', is_directory)

from azureml.core.model import Model

m = Model.register(model_name=model_name,
                   model_path=datatrain_output,
                   workspace=run.experiment.workspace)

m_serialized = m.serialize()
print('model_serialized2:', m_serialized)
run.log('model_serialized2', m_serialized)

import json
with open(modelregistration_output, 'w') as f:
    f.write(json.dumps(m_serialized))

run.complete()
예제 #22
0
from azureml.core.webservice import Webservice
from azureml.core.image import ContainerImage
from azureml.core import Workspace as ws
import score
from sklearn.externals import joblib
from sklearn.linear_model import LogisticRegression
import logging
from azureml.core.webservice import AciWebservice
logging.basicConfig(level=logging.DEBUG)
from azureml.core.model import Model
myws = ws.get(
    name='mnist1',
    subscription_id='bcbc4e01-e5d6-42b0-95af-06286341e6ca',
    resource_group='mnist3',
)
print(Model.get_model_path(model_name='sklearn_mnist_model.pkl'))
image_config = ContainerImage.image_configuration(execution_script="score.py",
                                                  runtime="python",
                                                  conda_file="myenv.yml")

model = Model.register(
    workspace=myws,
    model_path="./sklearn-mnist/outputs/sklearn_mnist_model.pkl",
    model_name="sklearn_mnist",
)
aciconfig = AciWebservice.deploy_configuration(
    cpu_cores=1,
    memory_gb=1,
    tags={
        "data": "MNIST",
        "method": "sklearn"
예제 #23
0
파일: __init__.py 프로젝트: zwd1990/mlflow
def deploy(
    model_uri,
    workspace,
    deployment_config=None,
    service_name=None,
    model_name=None,
    tags=None,
    mlflow_home=None,
    synchronous=True,
):
    """
    Register an MLflow model with Azure ML and deploy a websevice to Azure Container Instances (ACI)
    or Azure Kubernetes Service (AKS).

    The deployed service will contain a webserver that processes model queries.
    For information about the input data formats accepted by this webserver, see the
    :ref:`MLflow deployment tools documentation <azureml_deployment>`.

    :param model_uri: The location, in URI format, of the MLflow model used to build the Azure
                      ML deployment image. For example:

                      - ``/Users/me/path/to/local/model``
                      - ``relative/path/to/local/model``
                      - ``s3://my_bucket/path/to/model``
                      - ``runs:/<mlflow_run_id>/run-relative/path/to/model``
                      - ``models:/<model_name>/<model_version>``
                      - ``models:/<model_name>/<stage>``

                      For more information about supported URI schemes, see
                      `Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
                      artifact-locations>`_.
    :param workspace: The AzureML workspace in which to deploy the service. This is a
                      `azureml.core.Workspace` object.
    :param deployment_config: The configuration for the Azure web service. This configuration
                              allows you to specify the resources the webservice will use and
                              the compute cluster it will be deployed in. If unspecified, the web
                              service will be deployed into a Azure Container Instance. This is a
                              `azureml.core.DeploymentConfig` object. For more information, see
                              `<https://docs.microsoft.com/python/api/azureml-core/
                              azureml.core.webservice.aks.aksservicedeploymentconfiguration>`_ and
                              `<https://docs.microsoft.com/en-us/python/api/azureml-core/azureml
                              .core.webservice.aci.aciservicedeploymentconfiguration>`_
    :param service_name: The name to assign the Azure Machine learning webservice that will be
                         created. If unspecified, a unique name will be generated.
    :param model_name: The name to assign the Azure Model will be created. If unspecified,
                       a unique model name will be generated.
    :param tags: A collection of tags, represented as a dictionary of string key-value pairs, to
                 associate with the Azure Model and Deployment that will be created.
                 These tags are added to a set of default tags that include the model uri,
                 and more. For more information, see
                 `<https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.model(class)?view=azure-ml-py>`_.
    :param mlflow_home: Path to a local copy of the MLflow GitHub repository. If specified, the
                        image will install MLflow from this directory. Otherwise, it will install
                        MLflow from pip.
    :param synchronous: If ``True``, this method blocks until the image creation procedure
                        terminates before returning. If ``False``, the method returns immediately,
                        but the returned image will not be available until the asynchronous
                        creation process completes. Use the
                        ``azureml.core.Webservice.wait_for_deployment()`` function to wait
                        for the deployment process to complete.
    :return: A tuple containing the following elements in order:
            - An ``azureml.core.webservice.Webservice`` object containing metadata for the
            new service.
            - An ``azureml.core.model.Model`` object containing metadata for the new model.

    .. code-block:: python
        :caption: Example

        import mlflow.azureml
        from azureml.core import Workspace
        from azureml.core.webservice import AciWebservice, Webservice

        # Load or create an Azure ML Workspace
        workspace_name = "<Name of your Azure ML workspace>"
        subscription_id = "<Your Azure subscription ID>"
        resource_group = "<Name of the Azure resource group in which to create Azure ML resources>"
        location = "<Name of the Azure location (region) in which to create Azure ML resources>"
        azure_workspace = Workspace.create(name=workspace_name,
                                           subscription_id=subscription_id,
                                           resource_group=resource_group,
                                           location=location,
                                           create_resource_group=True,
                                           exist_ok=True)

        # Create an Azure Container Instance webservice for an MLflow model
        azure_service, azure_model = mlflow.azureml.deploy(model_uri="<model_uri>",
                                                           service_name="<deployment-name>",
                                                           workspace=azure_workspace,
                                                           synchronous=True)
    """
    # The Azure ML SDK is only compatible with Python 3. However, the `mlflow.azureml` module should
    # still be accessible for import from Python 2. Therefore, we will only import from the SDK
    # upon method invocation.
    # pylint: disable=import-error
    from azureml.core.model import Model as AzureModel, InferenceConfig
    from azureml.core import Environment as AzureEnvironment
    from azureml.core import VERSION as AZUREML_VERSION
    from azureml.core.webservice import AciWebservice

    absolute_model_path = _download_artifact_from_uri(model_uri)

    model_pyfunc_conf, model = _load_pyfunc_conf_with_model(
        model_path=absolute_model_path)
    model_python_version = model_pyfunc_conf.get(pyfunc.PY_VERSION, None)
    run_id = None
    run_id_tag = None
    try:
        run_id = model.run_id
        run_id_tag = run_id
    except AttributeError:
        run_id = str(uuid.uuid4())
    if model_python_version is not None and StrictVersion(
            model_python_version) < StrictVersion("3.0.0"):
        raise MlflowException(
            message=
            ("Azure ML can only deploy models trained in Python 3 and above. See"
             " the following MLflow GitHub issue for a thorough explanation of this"
             " limitation and a workaround to enable support for deploying models"
             " trained in Python 2: https://github.com/mlflow/mlflow/issues/668"
             ),
            error_code=INVALID_PARAMETER_VALUE,
        )

    tags = _build_tags(
        model_uri=model_uri,
        model_python_version=model_python_version,
        user_tags=tags,
        run_id=run_id_tag,
    )

    if service_name is None:
        service_name = _get_mlflow_azure_name(run_id)
    if model_name is None:
        model_name = _get_mlflow_azure_name(run_id)

    with TempDir(chdr=True) as tmp:
        model_directory_path = tmp.path("model")
        tmp_model_path = os.path.join(
            model_directory_path,
            _copy_file_or_tree(src=absolute_model_path,
                               dst=model_directory_path),
        )

        registered_model = AzureModel.register(workspace=workspace,
                                               model_path=tmp_model_path,
                                               model_name=model_name,
                                               tags=tags)

        _logger.info(
            "Registered an Azure Model with name: `%s` and version: `%s`",
            registered_model.name,
            registered_model.version,
        )

        # Create an execution script (entry point) for the image's model server. Azure ML requires
        # the container's execution script to be located in the current working directory during
        # image creation, so we create the execution script as a temporary file in the current
        # working directory.
        execution_script_path = tmp.path("execution_script.py")
        _create_execution_script(output_path=execution_script_path,
                                 azure_model=registered_model)

        environment = None
        if pyfunc.ENV in model_pyfunc_conf:
            environment = AzureEnvironment.from_conda_specification(
                _get_mlflow_azure_name(run_id),
                os.path.join(tmp_model_path, model_pyfunc_conf[pyfunc.ENV]),
            )
        else:
            environment = AzureEnvironment(_get_mlflow_azure_name(run_id))

        if mlflow_home is not None:
            path = tmp.path("dist")
            _logger.info("Bulding temporary MLFlow wheel in %s", path)
            wheel = _create_mlflow_wheel(mlflow_home, path)
            whl_url = AzureEnvironment.add_private_pip_wheel(
                workspace=workspace, file_path=wheel, exist_ok=True)
            environment.python.conda_dependencies.add_pip_package(whl_url)
        else:
            environment.python.conda_dependencies.add_pip_package(
                "mlflow=={}".format(mlflow_version))

        # AzureML requires azureml-defaults to be installed to include
        # flask for the inference server.
        environment.python.conda_dependencies.add_pip_package(
            "azureml-defaults=={}".format(AZUREML_VERSION))

        inference_config = InferenceConfig(entry_script=execution_script_path,
                                           environment=environment)

        if deployment_config is not None:
            if deployment_config.tags is not None:
                # We want more narrowly-scoped tags to win on merge
                tags.update(deployment_config.tags)
            deployment_config.tags = tags
        else:
            deployment_config = AciWebservice.deploy_configuration(tags=tags)

        webservice = AzureModel.deploy(
            workspace=workspace,
            name=service_name,
            models=[registered_model],
            inference_config=inference_config,
            deployment_config=deployment_config,
        )
        _logger.info("Deploying an Azure Webservice with name: `%s`",
                     webservice.name)
        if synchronous:
            webservice.wait_for_deployment(show_output=True)
        return webservice, registered_model
def main():
    e = Env()
    # Get Azure machine learning workspace
    ws = Workspace.get(name=e.workspace_name,
                       subscription_id=e.subscription_id,
                       resource_group=e.resource_group)
    print(f"get_workspace: {ws}")

    # Parameters
    sources_directory_train = e.sources_directory_train

    # model_names = ["nyc_energy_model", "diabetes_model"]
    model_names = get_model_names(
        os.path.join(sources_directory_train, "pipeline_config.json"))
    models = []
    for model_name in model_names:
        models.append(Model(ws, name=model_name))

    # Conda environment
    myenv = Environment.from_conda_specification(
        "myenv", os.path.join(sources_directory_train,
                              "conda_dependencies.yml"))
    # Enable Docker based environment
    myenv.docker.enabled = True

    # Deprecated: pass the model names string to score.py
    # score.py reads model names from pipeline_config.json directly.
    # model_names_str = ''
    # for name in model_names:
    #     model_names_str = model_names_str + name + ','
    # model_names_str = model_names_str[:-1]
    # myenv.environment_variables = {"MODEL_NAMES": model_names_str}

    inference_config = InferenceConfig(
        source_directory=sources_directory_train,
        entry_script="scoring/score.py",
        environment=myenv)

    deployment_config = AciWebservice.deploy_configuration(
        cpu_cores=1,
        memory_gb=2,
        tags={
            'area': "digits",
            'type': aci_service_name
        },
        description=aci_service_name)

    try:
        # Check if the service is existed
        service = Webservice(ws, name=aci_service_name)
        if service:
            print("Found existing service: %s .. delete it" % aci_service_name)
            service.delete()
    except WebserviceException as e:
        print(e)

    service = Model.deploy(ws, aci_service_name, models, inference_config,
                           deployment_config)

    service.wait_for_deployment(True)
    print(service.state)
예제 #25
0
파일: __init__.py 프로젝트: zwd1990/mlflow
def build_image(
    model_uri,
    workspace,
    image_name=None,
    model_name=None,
    mlflow_home=None,
    description=None,
    tags=None,
    synchronous=True,
):
    """
    Register an MLflow model with Azure ML and build an Azure ML ContainerImage for deployment.
    The resulting image can be deployed as a web service to Azure Container Instances (ACI) or
    Azure Kubernetes Service (AKS).

    The resulting Azure ML ContainerImage will contain a webserver that processes model queries.
    For information about the input data formats accepted by this webserver, see the
    :ref:`MLflow deployment tools documentation <azureml_deployment>`.

    :param model_uri: The location, in URI format, of the MLflow model used to build the Azure
                      ML deployment image. For example:

                      - ``/Users/me/path/to/local/model``
                      - ``relative/path/to/local/model``
                      - ``s3://my_bucket/path/to/model``
                      - ``runs:/<mlflow_run_id>/run-relative/path/to/model``
                      - ``models:/<model_name>/<model_version>``
                      - ``models:/<model_name>/<stage>``

                      For more information about supported URI schemes, see
                      `Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
                      artifact-locations>`_.

    :param image_name: The name to assign the Azure Container Image that will be created. If
                       unspecified, a unique image name will be generated.
    :param model_name: The name to assign the Azure Model will be created. If unspecified,
                       a unique model name will be generated.
    :param workspace: The AzureML workspace in which to build the image. This is a
                      `azureml.core.Workspace` object.
    :param mlflow_home: Path to a local copy of the MLflow GitHub repository. If specified, the
                        image will install MLflow from this directory. Otherwise, it will install
                        MLflow from pip.
    :param description: A string description to associate with the Azure Container Image and the
                        Azure Model that will be created. For more information, see
                        `<https://docs.microsoft.com/en-us/python/api/azureml-core/
                        azureml.core.image.container.containerimageconfig?view=azure-ml-py>`_ and
                        `<https://docs.microsoft.com/en-us/python/api/azureml-core/
                        azureml.core.model.model?view=azure-ml-py#register>`_.
    :param tags: A collection of tags, represented as a dictionary of string key-value pairs, to
                 associate with the Azure Container Image and the Azure Model that will be created.
                 These tags are added to a set of default tags that include the model uri,
                 and more. For more information, see
                 `<https://docs.microsoft.com/en-us/python/api/azureml-core/
                 azureml.core.image.container.containerimageconfig?view-azure-ml-py>`_ and
                 `<https://docs.microsoft.com/en-us/python/api/azureml-core/
                 azureml.core.model.model?view=azure-ml-py#register>`_.
    :param synchronous: If ``True``, this method blocks until the image creation procedure
                        terminates before returning. If ``False``, the method returns immediately,
                        but the returned image will not be available until the asynchronous
                        creation process completes. Use the
                        ``azureml.core.Image.wait_for_creation()`` function to wait for the creation
                        process to complete.
    :return: A tuple containing the following elements in order:
            - An ``azureml.core.image.ContainerImage`` object containing metadata for the new image.
            - An ``azureml.core.model.Model`` object containing metadata for the new model.

    .. code-block:: python
        :caption: Example

        import mlflow.azureml
        from azureml.core import Workspace
        from azureml.core.webservice import AciWebservice, Webservice

        # Load or create an Azure ML Workspace
        workspace_name = "<Name of your Azure ML workspace>"
        subscription_id = "<Your Azure subscription ID>"
        resource_group = "<Name of the Azure resource group in which to create Azure ML resources>"
        location = "<Name of the Azure location (region) in which to create Azure ML resources>"
        azure_workspace = Workspace.create(name=workspace_name,
                                           subscription_id=subscription_id,
                                           resource_group=resource_group,
                                           location=location,
                                           create_resource_group=True,
                                           exist_ok=True)

        # Build an Azure ML Container Image for an MLflow model
        azure_image, azure_model = mlflow.azureml.build_image(model_uri="<model_uri>",
                                                              workspace=azure_workspace,
                                                              synchronous=True)
        # If your image build failed, you can access build logs at the following URI:
        print("Access the following URI for build logs: {}".format(azure_image.image_build_log_uri))

        # Deploy the image to Azure Container Instances (ACI) for real-time serving
        webservice_deployment_config = AciWebservice.deploy_configuration()
        webservice = Webservice.deploy_from_image(
                            image=azure_image, workspace=azure_workspace, name="<deployment-name>")
        webservice.wait_for_deployment()
    """
    # The Azure ML SDK is only compatible with Python 3. However, the `mlflow.azureml` module should
    # still be accessible for import from Python 2. Therefore, we will only import from the SDK
    # upon method invocation.
    # pylint: disable=import-error
    from azureml.core.image import ContainerImage
    from azureml.core.model import Model as AzureModel

    absolute_model_path = _download_artifact_from_uri(model_uri)

    model_pyfunc_conf, _ = _load_pyfunc_conf_with_model(
        model_path=absolute_model_path)
    model_python_version = model_pyfunc_conf.get(pyfunc.PY_VERSION, None)
    if model_python_version is not None and StrictVersion(
            model_python_version) < StrictVersion("3.0.0"):
        raise MlflowException(
            message=
            ("Azure ML can only deploy models trained in Python 3 and above. See"
             " the following MLflow GitHub issue for a thorough explanation of this"
             " limitation and a workaround to enable support for deploying models"
             " trained in Python 2: https://github.com/mlflow/mlflow/issues/668"
             ),
            error_code=INVALID_PARAMETER_VALUE,
        )

    tags = _build_tags(model_uri=model_uri,
                       model_python_version=model_python_version,
                       user_tags=tags)

    if image_name is None:
        image_name = _get_mlflow_azure_resource_name()
    if model_name is None:
        model_name = _get_mlflow_azure_resource_name()

    with TempDir(chdr=True) as tmp:
        model_directory_path = tmp.path("model")
        tmp_model_path = os.path.join(
            model_directory_path,
            _copy_file_or_tree(src=absolute_model_path,
                               dst=model_directory_path),
        )

        registered_model = AzureModel.register(
            workspace=workspace,
            model_path=tmp_model_path,
            model_name=model_name,
            tags=tags,
            description=description,
        )
        _logger.info(
            "Registered an Azure Model with name: `%s` and version: `%s`",
            registered_model.name,
            registered_model.version,
        )

        # Create an execution script (entry point) for the image's model server. Azure ML requires
        # the container's execution script to be located in the current working directory during
        # image creation, so we create the execution script as a temporary file in the current
        # working directory.
        execution_script_path = tmp.path("execution_script.py")
        _create_execution_script(output_path=execution_script_path,
                                 azure_model=registered_model)
        # Azure ML copies the execution script into the image's application root directory by
        # prepending "/var/azureml-app" to the specified script path. The script is then executed
        # by referencing its path relative to the "/var/azureml-app" directory. Unfortunately,
        # if the script path is an absolute path, Azure ML attempts to reference it directly,
        # resulting in a failure. To circumvent this problem, we provide Azure ML with the relative
        # script path. Because the execution script was created in the current working directory,
        # this relative path is the script path's base name.
        execution_script_path = os.path.basename(execution_script_path)

        if mlflow_home is not None:
            _logger.info(
                "Copying the specified mlflow_home directory: `%s` to a temporary location for"
                " container creation",
                mlflow_home,
            )
            mlflow_home = os.path.join(
                tmp.path(),
                _copy_project(src_path=mlflow_home, dst_path=tmp.path()))
            image_file_dependencies = [mlflow_home]
        else:
            image_file_dependencies = None
        dockerfile_path = tmp.path("Dockerfile")
        _create_dockerfile(output_path=dockerfile_path,
                           mlflow_path=mlflow_home)

        conda_env_path = None
        if pyfunc.ENV in model_pyfunc_conf:
            conda_env_path = os.path.join(tmp_model_path,
                                          model_pyfunc_conf[pyfunc.ENV])

        image_configuration = ContainerImage.image_configuration(
            execution_script=execution_script_path,
            runtime="python",
            docker_file=dockerfile_path,
            dependencies=image_file_dependencies,
            conda_file=conda_env_path,
            description=description,
            tags=tags,
        )
        image = ContainerImage.create(
            workspace=workspace,
            name=image_name,
            image_config=image_configuration,
            models=[registered_model],
        )
        _logger.info(
            "Building an Azure Container Image with name: `%s` and version: `%s`",
            image.name,
            image.version,
        )
        if synchronous:
            image.wait_for_creation(show_output=True)
        return image, registered_model
예제 #26
0
def init():
     global model_path
     model_path = Model.get_model_path(model_name = 'Connect4ONNX')
def init():
    global model
    model_path = Model.get_model_path('pytorch-hymenoptera')
    model = torch.load(model_path, map_location=lambda storage, loc: storage)
    model.eval()
예제 #28
0
def init():
    global model
    # retrieve the path to the model file using the model name
    model_path = Model.get_model_path('mlp_classifier')
    model = joblib.load(model_path)
예제 #29
0
def init():
    global model
    global vec
    model_path = Model.get_model_path("trump-tweet-classification")
    model = joblib.load(model_path + '/trump-tweet-classification.pkl')
    vec = joblib.load(model_path + '/vec.pkl')
예제 #30
0
def init():
    global model
    model_path = Model.get_model_path(model_name="AutoML3722eb63518")
    model = joblib.load(model_path)
예제 #31
0
print("Azure ML SDK Version: ", azureml.core.VERSION)

ws = Workspace.from_config()
print("Resource group: ", ws.resource_group)
print("Location: ", ws.location)
print("Workspace name: ", ws.name)

from azureml.core.webservice import Webservice

for web_svc in Webservice.list(ws):
    print("Deleting web service", web_svc.name, "...")
    web_svc.delete()

from azureml.core import ComputeTarget

for target in ComputeTarget.list(ws):
    print("Deleting compute target", target.name, "...")
    target.delete()

from azureml.core import Image

for img in Image.list(ws):
    print("Deleting image", img.id, "...")
    img.delete()

from azureml.core.model import Model

for model in Model.list(ws):
    print("Deleting model", model.id, "...")
    model.delete()
예제 #32
0
def init():
    global model
    # This name is model.id of model that we want to deploy deserialize the model file back
    # into a sklearn model
    model_path = Model.get_model_path(model_name='Model')
    model = joblib.load(model_path)
예제 #33
0
def build_image(model_path, workspace, run_id=None, image_name=None, model_name=None,
                mlflow_home=None, description=None, tags=None, synchronous=True):
    """
    Register an MLflow model with Azure ML and build an Azure ML ContainerImage for deployment.
    The resulting image can be deployed as a web service to Azure Container Instances (ACI) or
    Azure Kubernetes Service (AKS).

    :param model_path: The path to MLflow model for which the image will be built. If a run id
                       is specified, this is should be a run-relative path. Otherwise, it
                       should be a local path.
    :param run_id: MLflow run ID.
    :param image_name: The name to assign the Azure Container Image that will be created. If
                       unspecified, a unique image name will be generated.
    :param model_name: The name to assign the Azure Model will be created. If unspecified,
                       a unique model name will be generated.
    :param workspace: The AzureML workspace in which to build the image. This is a
                      `azureml.core.Workspace` object.
    :param mlflow_home: Path to a local copy of the MLflow GitHub repository. If specified, the
                        image will install MLflow from this directory. Otherwise, it will install
                        MLflow from pip.
    :param description: A string description to associate with the Azure Container Image and the
                        Azure Model that will be created. For more information, see
                        `<https://docs.microsoft.com/en-us/python/api/azureml-core/
                        azureml.core.image.container.containerimageconfig>`_ and
                        `<https://docs.microsoft.com/en-us/python/api/azureml-core/
                        azureml.core.model.model?view=azure-ml-py#register>`_.
    :param tags: A collection of tags, represented as a dictionary of string key-value pairs, to
                 associate with the Azure Container Image and the Azure Model that will be created.
                 These tags will be added to a set of default tags that include the model path,
                 the model run id (if specified), and more. For more information, see
                 `<https://docs.microsoft.com/en-us/python/api/azureml-core/
                 azureml.core.image.container.containerimageconfig>`_ and
                 `<https://docs.microsoft.com/en-us/python/api/azureml-core/
                 azureml.core.model.model?view=azure-ml-py#register>`_.
    :param synchronous: If `True`, this method will block until the image creation procedure
                        terminates before returning. If `False`, the method will return immediately,
                        but the returned image will not be available until the asynchronous
                        creation process completes. The `azureml.core.Image.wait_for_creation()`
                        function can be used to wait for the creation process to complete.
    :return: A tuple containing the following elements in order:
             - An `azureml.core.image.ContainerImage` object containing metadata for the new image.
             - An `azureml.core.model.Model` object containing metadata for the new model.

    >>> import mlflow.azureml
    >>> from azureml.core import Workspace
    >>> from azureml.core.webservice import AciWebservice, Webservice
    >>>
    >>> # Load or create an Azure ML Workspace
    >>> workspace_name = "<Name of your Azure ML workspace>"
    >>> subscription_id = "<Your Azure subscription ID>"
    >>> resource_group = "<Name of the Azure resource group in which to create Azure ML resources>"
    >>> location = "<Name of the Azure location (region) in which to create Azure ML resources>"
    >>> azure_workspace = Workspace.create(name=workspace_name,
    >>>                                    subscription_id=subscription_id,
    >>>                                    resource_group=resource_group,
    >>>                                    location=location,
    >>>                                    create_resource_group=True,
    >>>                                    exist_okay=True)
    >>>
    >>> # Build an Azure ML Container Image for an MLflow model
    >>> azure_image, azure_model = mlflow.azureml.build_image(
    >>>                                 model_path="<model_path>",
    >>>                                 workspace=azure_workspace,
    >>>                                 synchronous=True)
    >>> # If your image build failed, you can access build logs at the following URI:
    >>> print("Access the following URI for build logs: {}".format(azure_image.image_build_log_uri))
    >>>
    >>> # Deploy the image to Azure Container Instances (ACI) for real-time serving
    >>> webservice_deployment_config = AciWebservice.deploy_configuration()
    >>> webservice = Webservice.deploy_from_image(
    >>>                    image=azure_image, workspace=azure_workspace, name="<deployment-name>")
    >>> webservice.wait_for_deployment()
    """
    # The Azure ML SDK is only compatible with Python 3. However, the `mlflow.azureml` module should
    # still be accessible for import from Python 2. Therefore, we will only import from the SDK
    # upon method invocation.
    # pylint: disable=import-error
    from azureml.core.image import ContainerImage
    from azureml.core.model import Model as AzureModel

    if run_id is not None:
        absolute_model_path = _get_model_log_dir(model_name=model_path, run_id=run_id)
    else:
        absolute_model_path = os.path.abspath(model_path)

    model_pyfunc_conf = _load_pyfunc_conf(model_path=absolute_model_path)
    model_python_version = model_pyfunc_conf.get(pyfunc.PY_VERSION, None)
    if model_python_version is not None and\
            StrictVersion(model_python_version) < StrictVersion("3.0.0"):
        raise MlflowException(
                message=("Azure ML can only deploy models trained in Python 3 or above! Please see"
                         " the following MLflow GitHub issue for a thorough explanation of this"
                         " limitation and a workaround to enable support for deploying models"
                         " trained in Python 2: https://github.com/mlflow/mlflow/issues/668"),
                error_code=INVALID_PARAMETER_VALUE)

    tags = _build_tags(relative_model_path=model_path, run_id=run_id,
                       model_python_version=model_python_version, user_tags=tags)

    if image_name is None:
        image_name = _get_mlflow_azure_resource_name()
    if model_name is None:
        model_name = _get_mlflow_azure_resource_name()

    with TempDir(chdr=True) as tmp:
        model_directory_path = tmp.path("model")
        tmp_model_path = os.path.join(
            model_directory_path,
            _copy_file_or_tree(src=absolute_model_path, dst=model_directory_path))

        registered_model = AzureModel.register(workspace=workspace, model_path=tmp_model_path,
                                               model_name=model_name, tags=tags,
                                               description=description)
        eprint("Registered an Azure Model with name: `{model_name}` and version:"
               " `{model_version}`".format(model_name=registered_model.name,
                                           model_version=registered_model.version))

        # Create an execution script (entry point) for the image's model server. Azure ML requires
        # the container's execution script to be located in the current working directory during
        # image creation, so we create the execution script as a temporary file in the current
        # working directory.
        execution_script_path = tmp.path("execution_script.py")
        _create_execution_script(output_path=execution_script_path, azure_model=registered_model)
        # Azure ML copies the execution script into the image's application root directory by
        # prepending "/var/azureml-app" to the specified script path. The script is then executed
        # by referencing its path relative to the "/var/azureml-app" directory. Unfortunately,
        # if the script path is an absolute path, Azure ML attempts to reference it directly,
        # resulting in a failure. To circumvent this problem, we provide Azure ML with the relative
        # script path. Because the execution script was created in the current working directory,
        # this relative path is the script path's base name.
        execution_script_path = os.path.basename(execution_script_path)

        if mlflow_home is not None:
            eprint("Copying the specified mlflow_home directory: `{mlflow_home}` to a temporary"
                   " location for container creation".format(mlflow_home=mlflow_home))
            mlflow_home = os.path.join(tmp.path(),
                                       _copy_project(src_path=mlflow_home, dst_path=tmp.path()))
            image_file_dependencies = [mlflow_home]
        else:
            image_file_dependencies = None
        dockerfile_path = tmp.path("Dockerfile")
        _create_dockerfile(output_path=dockerfile_path, mlflow_path=mlflow_home)

        conda_env_path = None
        if pyfunc.ENV in model_pyfunc_conf:
            conda_env_path = os.path.join(tmp_model_path, model_pyfunc_conf[pyfunc.ENV])

        image_configuration = ContainerImage.image_configuration(
                execution_script=execution_script_path,
                runtime="python",
                docker_file=dockerfile_path,
                dependencies=image_file_dependencies,
                conda_file=conda_env_path,
                description=description,
                tags=tags,
        )
        image = ContainerImage.create(workspace=workspace,
                                      name=image_name,
                                      image_config=image_configuration,
                                      models=[registered_model])
        eprint("Building an Azure Container Image with name: `{image_name}` and version:"
               " `{image_version}`".format(
                   image_name=image.name,
                   image_version=image.version))
        if synchronous:
            image.wait_for_creation(show_output=True)
        return image, registered_model