def build_container(): cd = CondaDependencies.create(pip_packages=[ 'azureml-sdk==1.0.39', 'scikit-learn==0.21.1', 'joblib==0.13.2' ]) cd.save_to_file(base_directory='./', conda_file_path='myenv.yml') model = get_best_model(model_name) print('model', model) img_config = ContainerImage.image_configuration( execution_script='score.py', runtime='python', conda_file='myenv.yml', dependencies=['.']) image_name = model_name.replace("_", "").lower() print("Image name:", image_name) image = Image.create(name=image_name, models=[model], image_config=img_config, workspace=ws) image.wait_for_creation(show_output=True) if image.creation_state != 'Succeeded': raise Exception('Image creation status: {image.creation_state}') print('{}(v.{} [{}]) stored at {} with build log {}'.format( image.name, image.version, image.creation_state, image.image_location, image.image_build_log_uri))
def main(): # get workspace ws = load_workspace() model = Model.register(ws, model_name='pytorch_mnist', model_path='model.pth') # create dep file myenv = CondaDependencies() myenv.add_pip_package('numpy') myenv.add_pip_package('torch') with open('pytorchmnist.yml', 'w') as f: print('Writing out {}'.format('pytorchmnist.yml')) f.write(myenv.serialize_to_string()) print('Done!') # create image image_config = ContainerImage.image_configuration( execution_script="score.py", runtime="python", conda_file="pytorchmnist.yml", dependencies=['./models.py']) image = Image.create(ws, 'pytorchmnist', [model], image_config) image.wait_for_creation(show_output=True) # create service aciconfig = AciWebservice.deploy_configuration( cpu_cores=1, memory_gb=1, description='simple MNIST digit detection') service = Webservice.deploy_from_image(workspace=ws, image=image, name='pytorchmnist-svc', deployment_config=aciconfig) service.wait_for_deployment(show_output=True)
def run(): print("entered run") variables_received = "sub_id: {}, rg: {}, work_name: {}, state: {}, author: {}, model_name: {}" \ .format(resolve_sub_id(), resolve_rg(), resolve_workspace_name(), resolve_state(), resolve_author(), resolve_model_name()) print(variables_received) az_ws = Workspace(resolve_sub_id(), resolve_rg(), resolve_workspace_name()) print("initialized workspace") #Get & Download model model = Model(az_ws, name=resolve_model_name(), tags={ "state": resolve_state(), "created_by": resolve_author() }) print("initialized model") model.download(target_dir="./assets/") print("downloaded model assets") #TODO: remove workaround for ml sdk dropping assets into /assets/dacrook folder when files dropped to consistent location for dir_p, _, f_n in walk("./assets"): for f in f_n: abs_path = os.path.abspath(os.path.join(dir_p, f)) shutil.move(abs_path, "./assets/" + f) #Configure Image my_env = CondaDependencies.create(conda_packages=["numpy", "scikit-learn"]) with open("myenv.yml", "w") as f: f.write(my_env.serialize_to_string()) image_config = ContainerImage.image_configuration( execution_script="score.py", runtime="python", conda_file="myenv.yml", dependencies=["assets", "inference_code"], tags={ "state": resolve_state(), "created_by": resolve_author() }) print("configured image") #TODO: use this once model is dropped to a consistent location # image = Image.create(workspace = az_ws, name=resolve_image_name(), models=[model], image_config = image_config) image = Image.create(workspace=az_ws, name=resolve_image_name(), models=[model], image_config=image_config) image.wait_for_creation() print("created image") if (image.creation_state != "Succeeded"): raise Exception("Failed to create image.") print("image location: {}".format(image.image_location)) artifacts = {"image_location": image.image_location} if (not os.path.exists("/artifacts/")): os.makedirs("/artifacts/") with open("/artifacts/artifacts.json", "w") as outjson: json.dump(artifacts, outjson)
def container_img(ws, model, score_script, env_file): image_config = ContainerImage.image_configuration( execution_script=score_script, runtime="python", conda_file=env_file) image = Image.create(name="TeamOmega", models=[model], image_config=image_config, workspace=ws) image.wait_for_creation(show_output=True) return image
def build_image(): """Build the docker image to hold the model.""" load_dotenv(find_dotenv()) chdir("deploy") ws = Workspace( workspace_name=getenv("AML_WORKSPACE_NAME"), subscription_id=getenv("AML_SUBSCRIPTION_ID"), resource_group=getenv("AML_RESOURCE_GROUP"), ) model = Model(ws, getenv("AML_MODEL_NAME")) image_config = ContainerImage.image_configuration( runtime="python", execution_script="score.py", conda_file="container_conda_env.yml") image = Image.create(name=getenv("AML_IMAGE_NAME"), models=[model], image_config=image_config, workspace=ws) image.wait_for_creation(show_output=True)
def amls_model_to_image(amls_config, workspace, model): """ Deploy a published AMLS model as docker image in AMLS' ACR. :param amls_config: :param workspace: :param model: :return: """ script = "score.py" conda_file = "conda_dependencies.yml" save_conda_dependencies(amls_config, conda_file) if amls_config['docker_file']: docker_file = amls_config['docker_file'] else: docker_file = None image_config = ContainerImage.image_configuration( runtime="python", execution_script=script, conda_file=conda_file, tags=amls_config['tags'], description=amls_config['description'], docker_file=docker_file) logger.info(f"Deploying image.") image = Image.create( name='image', # this is the model object models=[model], image_config=image_config, workspace=workspace) image.wait_for_creation(show_output=True) image.update_creation_state() return image
os.chdir("./scripts/scoring") image_name = "arima-forecast-score" image_config = ContainerImage.image_configuration( execution_script="score.py", runtime="python-slim", conda_file="conda_dependencies.yml", description="Image with robberies arima forecasting model", tags={ "area": "robberies", "type": "forecasting" }, ) image = Image.create(name=image_name, models=[model], image_config=image_config, workspace=ws) image.wait_for_creation(show_output=True) os.chdir("../..") if image.creation_state != "Succeeded": raise Exception("Image creation status: {image.creation_state}") print("{}(v.{} [{}]) stored at {} with build log {}".format( image.name, image.version, image.creation_state, image.image_location, image.image_build_log_uri, ))
print('src directory: {}'.format(os.getcwd())) #Set image configuration based on dependencies and AI Camera hardware image_config = IotContainerImage.image_configuration( architecture="arm32v7", execution_script="main.py", dependencies=[ "camera.py", "iot.py", "ipcprovider.py", "utility.py", "frame_iterators.py", "azureStorage.py" ], docker_file="Dockerfile", tags=cfg.IMAGE_TAGS, description=cfg.IMAGE_DESCRIPTION) #create image on AML Workspace to be loaded onto device image = Image.create( name=cfg.IMAGE_NAME, # this is the model object models=[converted_model], image_config=image_config, workspace=ws) image.wait_for_creation(show_output=True) # Change working directory back to workspace root. ChangeDir(current_dir) print('current directory: {}'.format(os.getcwd())) #%% [markdown] # ## Write .ENV File #%% # Getting your container details; prepares all parameters of container to be written to env_file below
model = Model(ws, name=MODEL_NAME, version=MODEL_VERSION) os.chdir("./code/scoring") image_config = ContainerImage.image_configuration( execution_script="score.py", runtime="python", conda_file="conda_dependencies.yml", description="Image with ridge regression model", tags={ "area": "diabetes", "type": "regression" }, ) image = Image.create(name=IMAGE_NAME, models=[model], image_config=image_config, workspace=ws) image.wait_for_creation(show_output=True) if image.creation_state != "Succeeded": raise Exception("Image creation status: {image.creation_state}") print("{}(v.{} [{}]) stored at {} with build log {}".format( image.name, image.version, image.creation_state, image.image_location, image.image_build_log_uri, ))
from azureml.core.conda_dependencies import CondaDependencies myenv = CondaDependencies.create(conda_packages=['scikit-learn', 'joblib']) with open("myenv.yml", "w") as f: f.write(myenv.serialize_to_string()) from azureml.core.image import Image from azureml.core.image import ContainerImage # configure the image image_config = ContainerImage.image_configuration(execution_script="score.py", runtime="python", conda_file="myenv.yml") image = Image.create( name="fdc-oneclasssvm", # this is the model object models=[model], image_config=image_config, workspace=ws) image.wait_for_creation(show_output=True) # Create Container Instance from azureml.core.webservice import AciWebservice aciconfig = AciWebservice.deploy_configuration(cpu_cores=1, memory_gb=1, tags={ "data": "fdc", "method": WhatModel }, description='fdc')
image_config = ContainerImage.image_configuration( runtime="python", execution_script="score.py", conda_file="myenv.yml", tags={ "data": "meteosalut", "method": "knn" }, description="Image test knn sur donnees meteo") # os.chdir(old_wd) image = Image.create( name="myimage1", # this is the model object. note you can pass in 0-n models via this list-type parameter # in case you need to reference multiple models, or none at all, in your scoring script. models=[model], image_config=image_config, workspace=ws) image.wait_for_creation(True) #Create a container configuration file from azureml.core.webservice import AciWebservice aciconfig = AciWebservice.deploy_configuration( cpu_cores=1, memory_gb=1, tags={ "data": "meteo", "method": "knn" },
description="Ridge regression model to predict diabetes") regression_models = ws.models(tag="regression") for m in regression_models: print("Name:", m.name, "\tVersion:", m.version, "\tDescription:", m.description, m.tags) model = regression_models[-1] print(model.description) from azureml.core.image import Image image = Image.create(name="myimage", workspace=ws, models=[model], runtime="python", execution_script="score-2.py", conda_file="myenv.yml", tags=["diabetes", "regression"], description="Image with ridge regression model") image.wait_for_creation(show_output=True) for i in ws.images(tag="diabetes"): print('{}(v.{} [{}]) stored at {} with build log {}'.format( i.name, i.version, i.creation_state, i.image_location, i.image_build_log_uri)) aciconfig = AciWebservice.deploy_configuration( cpu_cores=1, memory_gb=1, tags=['regression', 'diabetes'],
subscription_id=subscription_id, resource_group=resource_group, location=workspace_region, exist_ok=True) print("Workspace Provisioning complete.") # Step 2 - Build the ContainerImage for the IoT Edge Module ########################################################### from azureml.core.image import ContainerImage, Image runtime = "python" driver_file = "iot_score.py" conda_file = "myenv.yml" image_config = ContainerImage.image_configuration(execution_script=driver_file, runtime=runtime, conda_file=conda_file) model = Model.register(model_path="model.pkl", model_name="iot_model.pkl", workspace=ws) image = Image.create( name="iotimage", # this is the model object models=[model], image_config=image_config, workspace=ws) image.wait_for_creation(show_output=True)
myenv = CondaDependencies.create( pip_packages=ast.literal_eval(config['docker']['pip_packages']), conda_packages=ast.literal_eval(config['train']['conda_packages'])) myenv.add_pip_package("pynacl==1.2.1") # CREATE CONDA ENVIRONMENT FILE with open(config['docker']['conda_env_file'], "w") as f: f.write(myenv.serialize_to_string()) # Create docker image from azureml.core.image import Image, ContainerImage image_config = ContainerImage.image_configuration( runtime="python", execution_script=config['docker']['path_scoring_script'], conda_file=config['docker']['conda_env_file'], tags={ 'area': "meter_classification", 'type': "meter_classification" }, description="Image with re-trained vgg model") image = Image.create( name=config['docker']['docker_image_name'], # this is the model object models=[model], image_config=image_config, workspace=ws) image.wait_for_creation(show_output=True)