def main():
    # get workspace
    ws = load_workspace()
    model = Model.register(ws,
                           model_name='pytorch_mnist',
                           model_path='model.pth')

    # create dep file
    myenv = CondaDependencies()
    myenv.add_pip_package('numpy')
    myenv.add_pip_package('torch')
    with open('pytorchmnist.yml', 'w') as f:
        print('Writing out {}'.format('pytorchmnist.yml'))
        f.write(myenv.serialize_to_string())
        print('Done!')

    # create image
    image_config = ContainerImage.image_configuration(
        execution_script="score.py",
        runtime="python",
        conda_file="pytorchmnist.yml",
        dependencies=['./models.py'])

    image = Image.create(ws, 'pytorchmnist', [model], image_config)
    image.wait_for_creation(show_output=True)

    # create service
    aciconfig = AciWebservice.deploy_configuration(
        cpu_cores=1, memory_gb=1, description='simple MNIST digit detection')
    service = Webservice.deploy_from_image(workspace=ws,
                                           image=image,
                                           name='pytorchmnist-svc',
                                           deployment_config=aciconfig)
    service.wait_for_deployment(show_output=True)
Beispiel #2
0
def build_container():

    cd = CondaDependencies.create(pip_packages=[
        'azureml-sdk==1.0.39', 'scikit-learn==0.21.1', 'joblib==0.13.2'
    ])

    cd.save_to_file(base_directory='./', conda_file_path='myenv.yml')

    model = get_best_model(model_name)
    print('model', model)

    img_config = ContainerImage.image_configuration(
        execution_script='score.py',
        runtime='python',
        conda_file='myenv.yml',
        dependencies=['.'])

    image_name = model_name.replace("_", "").lower()

    print("Image name:", image_name)

    image = Image.create(name=image_name,
                         models=[model],
                         image_config=img_config,
                         workspace=ws)

    image.wait_for_creation(show_output=True)

    if image.creation_state != 'Succeeded':
        raise Exception('Image creation status: {image.creation_state}')

    print('{}(v.{} [{}]) stored at {} with build log {}'.format(
        image.name, image.version, image.creation_state, image.image_location,
        image.image_build_log_uri))
Beispiel #3
0
def run():
    print("entered run")
    variables_received = "sub_id: {}, rg: {}, work_name: {}, state: {}, author: {}, model_name: {}" \
                            .format(resolve_sub_id(),
                                    resolve_rg(),
                                    resolve_workspace_name(),
                                    resolve_state(),
                                    resolve_author(),
                                    resolve_model_name())
    print(variables_received)

    az_ws = Workspace(resolve_sub_id(), resolve_rg(), resolve_workspace_name())
    print("initialized workspace")
    #Get & Download model
    model = Model(az_ws,
                  name=resolve_model_name(),
                  tags={
                      "state": resolve_state(),
                      "created_by": resolve_author()
                  })
    print("initialized model")
    model.download(target_dir="./assets/")
    print("downloaded model assets")
    #TODO: remove workaround for ml sdk dropping assets into /assets/dacrook folder when files dropped to consistent location
    for dir_p, _, f_n in walk("./assets"):
        for f in f_n:
            abs_path = os.path.abspath(os.path.join(dir_p, f))
            shutil.move(abs_path, "./assets/" + f)

    #Configure Image
    my_env = CondaDependencies.create(conda_packages=["numpy", "scikit-learn"])
    with open("myenv.yml", "w") as f:
        f.write(my_env.serialize_to_string())
    image_config = ContainerImage.image_configuration(
        execution_script="score.py",
        runtime="python",
        conda_file="myenv.yml",
        dependencies=["assets", "inference_code"],
        tags={
            "state": resolve_state(),
            "created_by": resolve_author()
        })
    print("configured image")
    #TODO: use this once model is dropped to a consistent location
    #    image = Image.create(workspace = az_ws, name=resolve_image_name(), models=[model], image_config = image_config)
    image = Image.create(workspace=az_ws,
                         name=resolve_image_name(),
                         models=[model],
                         image_config=image_config)
    image.wait_for_creation()
    print("created image")
    if (image.creation_state != "Succeeded"):
        raise Exception("Failed to create image.")
    print("image location: {}".format(image.image_location))
    artifacts = {"image_location": image.image_location}
    if (not os.path.exists("/artifacts/")):
        os.makedirs("/artifacts/")
    with open("/artifacts/artifacts.json", "w") as outjson:
        json.dump(artifacts, outjson)
Beispiel #4
0
def container_img(ws, model, score_script, env_file):
    image_config = ContainerImage.image_configuration(
        execution_script=score_script, runtime="python", conda_file=env_file)
    image = Image.create(name="TeamOmega",
                         models=[model],
                         image_config=image_config,
                         workspace=ws)
    image.wait_for_creation(show_output=True)
    return image
Beispiel #5
0
def build_image():
    """Build the docker image to hold the model."""
    load_dotenv(find_dotenv())

    chdir("deploy")
    ws = Workspace(
        workspace_name=getenv("AML_WORKSPACE_NAME"),
        subscription_id=getenv("AML_SUBSCRIPTION_ID"),
        resource_group=getenv("AML_RESOURCE_GROUP"),
    )
    model = Model(ws, getenv("AML_MODEL_NAME"))

    image_config = ContainerImage.image_configuration(
        runtime="python",
        execution_script="score.py",
        conda_file="container_conda_env.yml")

    image = Image.create(name=getenv("AML_IMAGE_NAME"),
                         models=[model],
                         image_config=image_config,
                         workspace=ws)

    image.wait_for_creation(show_output=True)
Beispiel #6
0
def amls_model_to_image(amls_config, workspace, model):
    """
    Deploy a published AMLS model as docker image in AMLS' ACR.

    :param amls_config:
    :param workspace:
    :param model:
    :return:
    """

    script = "score.py"
    conda_file = "conda_dependencies.yml"
    save_conda_dependencies(amls_config, conda_file)
    if amls_config['docker_file']:
        docker_file = amls_config['docker_file']
    else:
        docker_file = None

    image_config = ContainerImage.image_configuration(
        runtime="python",
        execution_script=script,
        conda_file=conda_file,
        tags=amls_config['tags'],
        description=amls_config['description'],
        docker_file=docker_file)
    logger.info(f"Deploying image.")
    image = Image.create(
        name='image',
        # this is the model object
        models=[model],
        image_config=image_config,
        workspace=workspace)
    image.wait_for_creation(show_output=True)
    image.update_creation_state()

    return image
Beispiel #7
0
print('src directory: {}'.format(os.getcwd()))
#Set image configuration based on dependencies and AI Camera hardware
image_config = IotContainerImage.image_configuration(
    architecture="arm32v7",
    execution_script="main.py",
    dependencies=[
        "camera.py", "iot.py", "ipcprovider.py", "utility.py",
        "frame_iterators.py", "azureStorage.py"
    ],
    docker_file="Dockerfile",
    tags=cfg.IMAGE_TAGS,
    description=cfg.IMAGE_DESCRIPTION)
#create image on AML Workspace to be loaded onto device
image = Image.create(
    name=cfg.IMAGE_NAME,
    # this is the model object
    models=[converted_model],
    image_config=image_config,
    workspace=ws)

image.wait_for_creation(show_output=True)

# Change working directory back to workspace root.
ChangeDir(current_dir)
print('current directory: {}'.format(os.getcwd()))

#%% [markdown]
# ## Write .ENV File

#%%
# Getting your container details; prepares all parameters of container to be written to env_file below
model = Model(ws, name=MODEL_NAME, version=MODEL_VERSION)
os.chdir("./code/scoring")

image_config = ContainerImage.image_configuration(
    execution_script="score.py",
    runtime="python",
    conda_file="conda_dependencies.yml",
    description="Image with ridge regression model",
    tags={
        "area": "diabetes",
        "type": "regression"
    },
)

image = Image.create(name=IMAGE_NAME,
                     models=[model],
                     image_config=image_config,
                     workspace=ws)

image.wait_for_creation(show_output=True)

if image.creation_state != "Succeeded":
    raise Exception("Image creation status: {image.creation_state}")

print("{}(v.{} [{}]) stored at {} with build log {}".format(
    image.name,
    image.version,
    image.creation_state,
    image.image_location,
    image.image_build_log_uri,
))
Beispiel #9
0
# MAGIC %md ## Deploy the model to "dev" using [Azure Container Instances (ACI)](https://docs.microsoft.com/en-us/azure/container-instances/)
# MAGIC 
# MAGIC The [ACI platform](https://docs.microsoft.com/en-us/azure/container-instances/) is the recommended environment for staging and developmental model deployments.

# COMMAND ----------

# MAGIC %md ### Create an ACI webservice deployment using the model's Container Image
# MAGIC 
# MAGIC Using the Azure ML SDK, deploy the Container Image for the trained MLflow model to ACI.

# COMMAND ----------

from azureml.core.webservice import AciWebservice, Webservice
from azureml.core.image import Image

model_image = Image(workspace, id=model_image_id)

dev_webservice_name = "wine-quality-aci"
dev_webservice_deployment_config = AciWebservice.deploy_configuration()
dev_webservice = Webservice.deploy_from_image(name=dev_webservice_name, image=model_image, deployment_config=dev_webservice_deployment_config, workspace=workspace, deployment_target=None, overwrite=True)

dev_webservice.wait_for_deployment()

# COMMAND ----------

# MAGIC %md ## Query the deployed model in "dev"

# COMMAND ----------

# MAGIC %md ### Load dataset
Beispiel #10
0
from azureml.core.conda_dependencies import CondaDependencies

myenv = CondaDependencies.create(conda_packages=['scikit-learn', 'joblib'])

with open("myenv.yml", "w") as f:
    f.write(myenv.serialize_to_string())

from azureml.core.image import Image
from azureml.core.image import ContainerImage
# configure the image
image_config = ContainerImage.image_configuration(execution_script="score.py",
                                                  runtime="python",
                                                  conda_file="myenv.yml")
image = Image.create(
    name="fdc-oneclasssvm",
    # this is the model object
    models=[model],
    image_config=image_config,
    workspace=ws)
image.wait_for_creation(show_output=True)

# Create Container Instance

from azureml.core.webservice import AciWebservice

aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,
                                               memory_gb=1,
                                               tags={
                                                   "data": "fdc",
                                                   "method": WhatModel
                                               },
                                               description='fdc')
image_config = ContainerImage.image_configuration(
    runtime="python",
    execution_script="score.py",
    conda_file="myenv.yml",
    tags={
        "data": "meteosalut",
        "method": "knn"
    },
    description="Image test knn sur donnees meteo")

# os.chdir(old_wd)

image = Image.create(
    name="myimage1",
    # this is the model object. note you can pass in 0-n models via this list-type parameter
    # in case you need to reference multiple models, or none at all, in your scoring script.
    models=[model],
    image_config=image_config,
    workspace=ws)
image.wait_for_creation(True)

#Create a container configuration file
from azureml.core.webservice import AciWebservice

aciconfig = AciWebservice.deploy_configuration(
    cpu_cores=1,
    memory_gb=1,
    tags={
        "data": "meteo",
        "method": "knn"
    },
Beispiel #12
0
    description="Ridge regression model to predict diabetes")

regression_models = ws.models(tag="regression")
for m in regression_models:
    print("Name:", m.name, "\tVersion:", m.version, "\tDescription:",
          m.description, m.tags)

model = regression_models[-1]
print(model.description)

from azureml.core.image import Image

image = Image.create(name="myimage",
                     workspace=ws,
                     models=[model],
                     runtime="python",
                     execution_script="score-2.py",
                     conda_file="myenv.yml",
                     tags=["diabetes", "regression"],
                     description="Image with ridge regression model")

image.wait_for_creation(show_output=True)

for i in ws.images(tag="diabetes"):
    print('{}(v.{} [{}]) stored at {} with build log {}'.format(
        i.name, i.version, i.creation_state, i.image_location,
        i.image_build_log_uri))

aciconfig = AciWebservice.deploy_configuration(
    cpu_cores=1,
    memory_gb=1,
    tags=['regression', 'diabetes'],
def test_scoring_image_present(get_ws_config):
    image_list = Image.list(ws, model_name="model.pkl")
    assert len(image_list) > 0, "Image deployed with model.pkl"
Beispiel #14
0
import os, json, datetime, sys
from operator import attrgetter
from azureml.core import Workspace
from azureml.core.model import Model
from azureml.core.image import Image
from azureml.core.webservice import Webservice
from azureml.core.webservice import AciWebservice
from azureml.core.authentication import AzureCliAuthentication

cli_auth = AzureCliAuthentication()
# Get workspace
ws = Workspace.from_config(auth=cli_auth)  # Get the Image to deploy details

image = Image(workspace=ws, name="myonnxmodelimage")
print(image)

aciconfig = AciWebservice.deploy_configuration(
    cpu_cores=1,
    memory_gb=1,
    tags={
        "area": "MNIST",
        "type": "DNN"
    },
    description="Description",
)

aci_service_name = "aciwebservice"

service = Webservice.deploy_from_image(deployment_config=aciconfig,
                                       image=image,
                                       name=aci_service_name,
Beispiel #15
0
# Get workspace
ws = Workspace.from_config(auth=cli_auth)

# Get the Image to deploy details
try:
    with open("aml_config/image.json") as f:
        config = json.load(f)
except:
    print("No new model, thus no deployment on ACI")
    # raise Exception('No new model to register as production model perform better')
    sys.exit(0)

image_name = config["image_name"]
image_version = config["image_version"]

images = Image.list(workspace=ws)
image, = (m for m in images
          if m.version == image_version and m.name == image_name)
print(
    "From image.json, Image used to deploy webservice on ACI: {}\nImage Version: {}\nImage Location = {}"
    .format(image.name, image.version, image.image_location))

# image = max(images, key=attrgetter('version'))
# print('From Max Version, Image used to deploy webservice on ACI: {}\nImage Version: {}\nImage Location = {}'.format(image.name, image.version, image.image_location))

# Check if AKS already Available
try:
    with open("aml_config/aks_webservice.json") as f:
        config = json.load(f)
    aks_name = config["aks_name"]
    aks_service_name = config["aks_service_name"]
# COMMAND ----------

from azureml.core.image import Image, ContainerImage

image_config = ContainerImage.image_configuration(runtime="python",
                                 execution_script="score.py",
                                 conda_file="myenv.yml",
                                 dependencies=["myconfig.json"],               
                                 tags = {'area': "Credit scoring", 'type': "classification"}, 
                                 description = "Credit Scoring model image"
                                                 )

image = Image.create(name = "credit-scoring-" + notebook_username,
                     # this is the model object. note you can pass in 0-n models via this list-type parameter
                     # in case you need to reference multiple models, or none at all, in your scoring script.
                     models = [model],
                     image_config = image_config, 
                     workspace = ws)

# COMMAND ----------

image.wait_for_creation(show_output = True)

# COMMAND ----------

# MAGIC %md List images by tag and find out the detailed build log for debugging.

# COMMAND ----------

for i in Image.list(workspace = ws,tags = ["area"]):
    print('{}(v.{} [{}]) stored at {} with build log {}'.format(i.name, i.version, i.creation_state, i.image_location, i.image_build_log_uri))
Beispiel #17
0
                      subscription_id=subscription_id,
                      resource_group=resource_group,
                      location=workspace_region,
                      exist_ok=True)

print("Workspace Provisioning complete.")

# Step 2 - Build the ContainerImage for the IoT Edge Module
###########################################################
from azureml.core.image import ContainerImage, Image

runtime = "python"
driver_file = "iot_score.py"
conda_file = "myenv.yml"

image_config = ContainerImage.image_configuration(execution_script=driver_file,
                                                  runtime=runtime,
                                                  conda_file=conda_file)

model = Model.register(model_path="model.pkl",
                       model_name="iot_model.pkl",
                       workspace=ws)

image = Image.create(
    name="iotimage",
    # this is the model object
    models=[model],
    image_config=image_config,
    workspace=ws)
image.wait_for_creation(show_output=True)
# COMMAND ----------

aks_target.wait_for_completion(show_output=True)

# COMMAND ----------

# MAGIC %md ### Deploy the model's image to the specified AKS cluster

# COMMAND ----------

from azureml.core.webservice import Webservice, AksWebservice
from azureml.core.image import Image

# Get Model
model_image = Image(aksml_workspace, id=model_image_id)
# Get Webservice
prod_webservice_name = "drinks-quality-aks"

# COMMAND ----------

from azureml.core.webservice import Webservice, AksWebservice
from azureml.core.image import Image

# Get Model
model_image = Image(aksml_workspace, id=model_image_id)

# Get Webservice
prod_webservice_name = "drinks-quality-aks"
try:
    prod_webservice = Webservice(aksml_workspace, prod_webservice_name)
)

# Note that following command can take a few minutes. An image can contain multiple models.

# In[ ]:

from azureml.core.image import Image, ContainerImage

image_config = ContainerImage.image_configuration(
    runtime="python",
    execution_script="score.py",
    conda_file="conda_dependencies.yml")

image = Image.create(
    name="ftkimage1",
    # this is the model object
    models=[model],
    image_config=image_config,
    workspace=ws)

# Monitor image creation.

# In[ ]:

image.wait_for_creation(show_output=True)

# List images and find out the detailed build log for debugging.

# In[ ]:

for i in Image.list(workspace=ws):
    print('{}(v.{} [{}]) stored at {} with build log {}'.format(
Beispiel #20
0
os.chdir("./scripts/scoring")
image_name = "arima-forecast-score"

image_config = ContainerImage.image_configuration(
    execution_script="score.py",
    runtime="python-slim",
    conda_file="conda_dependencies.yml",
    description="Image with robberies arima forecasting model",
    tags={
        "area": "robberies",
        "type": "forecasting"
    },
)

image = Image.create(name=image_name,
                     models=[model],
                     image_config=image_config,
                     workspace=ws)

image.wait_for_creation(show_output=True)
os.chdir("../..")

if image.creation_state != "Succeeded":
    raise Exception("Image creation status: {image.creation_state}")

print("{}(v.{} [{}]) stored at {} with build log {}".format(
    image.name,
    image.version,
    image.creation_state,
    image.image_location,
    image.image_build_log_uri,
))
Beispiel #21
0
myenv = CondaDependencies.create(
    pip_packages=ast.literal_eval(config['docker']['pip_packages']),
    conda_packages=ast.literal_eval(config['train']['conda_packages']))
myenv.add_pip_package("pynacl==1.2.1")

# CREATE CONDA ENVIRONMENT FILE
with open(config['docker']['conda_env_file'], "w") as f:
    f.write(myenv.serialize_to_string())

# Create docker image
from azureml.core.image import Image, ContainerImage

image_config = ContainerImage.image_configuration(
    runtime="python",
    execution_script=config['docker']['path_scoring_script'],
    conda_file=config['docker']['conda_env_file'],
    tags={
        'area': "meter_classification",
        'type': "meter_classification"
    },
    description="Image with re-trained vgg model")

image = Image.create(
    name=config['docker']['docker_image_name'],
    # this is the model object
    models=[model],
    image_config=image_config,
    workspace=ws)

image.wait_for_creation(show_output=True)