def _predict_remotely( self, ws, experiment, predict_data, model_id, threshold): input_payload = predict_data.to_json(orient='split', index = False) remote_run = AutoMLRun(experiment = experiment, run_id = model_id) model_name = remote_run.properties['model_name'] aci_service_name = self._aci_service_name(model_name) aci_service = AciWebservice(ws, aci_service_name) input_payload = json.loads(input_payload) # If you have a classification model, you can get probabilities by changing this to 'predict_proba'. method = 'predict' if threshold is not None: method = 'predict_proba' input_payload = { 'method': method, 'data': input_payload['data'] } input_payload = json.dumps(input_payload) try: response = aci_service.run(input_data = input_payload) print(response) except Exception as e: print('err log', aci_service.get_logs()) raise e results_proba = None proba_classes = None return json.loads(response)['result'], results_proba, proba_classes
def main(): ws = Workspace.from_config() conda = CondaDependencies() conda.add_conda_package("python==3.5") conda.add_pip_package("h5py==2.8.0") conda.add_pip_package("html5lib==1.0.1") conda.add_pip_package("keras==2.2.0") conda.add_pip_package("Keras-Applications==1.0.2") conda.add_pip_package("Keras-Preprocessing==1.0.1") conda.add_pip_package("matplotlib==2.2.2") conda.add_pip_package("numpy==1.14.5") conda.add_pip_package("opencv-python==3.3.0.9") conda.add_pip_package("pandas==0.23.3") conda.add_pip_package("Pillow==5.2.0") conda.add_pip_package("requests==2.19.1") conda.add_pip_package("scikit-image==0.14.0") conda.add_pip_package("scikit-learn==0.19.2") conda.add_pip_package("scipy==1.1.0") conda.add_pip_package("sklearn==0.0") conda.add_pip_package("tensorflow==1.9.0") conda.add_pip_package("urllib3==1.23") conda.add_pip_package("azureml-sdk") with open("environment.yml", "w") as f: f.write(conda.serialize_to_string()) with open("environment.yml", "r") as f: print(f.read()) image_config = ContainerImage.image_configuration( execution_script="score.py", runtime="python", conda_file="environment.yml", docker_file="Dockerfile", dependencies=DEPENDENCIES) webservices = ws.webservices(compute_type='ACI') image = ContainerImage.create(name="ai-bootcamp", models=[], image_config=image_config, workspace=ws) image.wait_for_creation(show_output=True) webservices_list = [] for key in webservices: webservices_list.append(key) service_name = webservices_list[0] aciwebservice = AciWebservice(ws, service_name) aciwebservice.update(image=image)
def _predict_remotely(self, predict_data, model_id, predict_proba): from azureml.core.webservice import AciWebservice from azureml.train.automl.run import AutoMLRun from azureml.core.run import Run import numpy as np ws, experiment = self._get_experiment() model_features = None target_categories = None remote_run = AutoMLRun(experiment = experiment, run_id = model_id) model_features, target_categories = self._get_remote_model_features(remote_run) if model_id.startswith("AutoML_"): model_name = remote_run.properties['model_name'] else: model_name = model_id if model_features: predict_data = predict_data[model_features] input_payload = predict_data.to_json(orient='split', index = False) aci_service_name = self._aci_service_name(model_name) aci_service = AciWebservice(ws, aci_service_name) input_payload = json.loads(input_payload) # If you have a classification model, you can get probabilities by changing this to 'predict_proba'. method = 'predict' if predict_proba: method = 'predict_proba' input_payload = { 'data': {'data': input_payload['data'], 'method': method} } input_payload = json.dumps(input_payload) try: response = aci_service.run(input_data = input_payload) except Exception as e: log_file = 'automl_errors.log' fsclient.write_text_file(log_file, aci_service.get_logs(), mode="a") raise AzureException("Prediction service error. Please redeploy the model. Log saved to file '%s'. Details: %s"%(log_file, str(e))) response = json.loads(response) if "error" in response or not 'result' in response: raise AzureException('Prediction service return error: %s'%response.get('error')) results_proba = None proba_classes = None results = response['result'] if predict_proba: results_proba = results proba_classes = response['proba_classes'] results_proba = np.array(results_proba) return results, results_proba, proba_classes, target_categories
def __deploy_model(self): service_name = self.__args.service_name model = Model(self.__ws, self.__args.model_name) explainer_model = Model(self.__ws, self.__args.explainer_model_name) myenv = Environment.from_conda_specification( name=self.__config.get('DEPLOY', 'ENV_NAME'), file_path=self.__config.get('DEPLOY', 'ENV_FILE_PATH')) inference_config = InferenceConfig( entry_script=self.__config.get('DEPLOY', 'SCORE_PATH'), environment=myenv, source_directory=self.__config.get('DEPLOY', 'DEPENDENCIES_DIRECTORY')) if not self.__args.update_deployment: deployment_config = AciWebservice.deploy_configuration( cpu_cores=self.__config.getint('DEPLOY', 'ACI_CPU'), memory_gb=self.__config.getint('DEPLOY', 'ACI_MEM'), collect_model_data=True, enable_app_insights=True) service = Model.deploy(self.__ws, service_name, [model, explainer_model], inference_config, deployment_config) else: service = AciWebservice(self.__ws, service_name) service.update(models=[model, explainer_model], inference_config=inference_config) service.wait_for_deployment(show_output=True) print(service.state) print(service.get_logs())
def call_web_service(e, service_name, body): aml_workspace = Workspace.get(name=e.workspace_name, subscription_id=e.subscription_id, resource_group=e.resource_group) print("Fetching service") headers = {'content-encoding': 'gzip'} service = AciWebservice(aml_workspace, service_name) if service.auth_enabled: service_keys = service.get_keys() headers['Authorization'] = 'Bearer ' + service_keys[0] print("Testing service") print(". url: %s" % service.scoring_uri) output = call_web_app(service.scoring_uri, headers, body) return output
def deploy(self): myenv = CondaDependencies() myenv.add_pip_package("azureml-sdk") myenv.add_pip_package("joblib") myenv.add_pip_package("tensorflow") myenv.add_pip_package("Pillow") myenv.add_pip_package("azureml-dataprep[pandas,fuse]>=1.1.14") with open("diagnoz_env.yml", "w") as f: f.write(myenv.serialize_to_string()) huml_env = Environment.from_conda_specification( name="diagnoz_env", file_path="diagnoz_env.yml") inference_config = InferenceConfig(entry_script="score.py", source_directory='.', environment=huml_env) print("file deployement : ") for root, dir_, files in os.walk(os.getcwd()): print("dir_", dir_) for filename in files: print("filename :", filename) aciconfig = AciWebservice.deploy_configuration( cpu_cores=1, memory_gb=1, tags={ "data": "cancer-data", "method": "tensorflow" }, description='Predicting cancer with tensorflow') try: AciWebservice(self.ws, self.config.DEPLOY_SERVICE_NAME).delete() print("webservice deleted") except WebserviceException: pass model = self.ws.models[self.config.MODEL_NAME] service = Model.deploy(workspace=self.ws, name=self.config.DEPLOY_SERVICE_NAME, models=[model], inference_config=inference_config, deployment_config=aciconfig) service.wait_for_deployment(show_output=True) print("success deployement")
def deploy_webservice_from_image(amls_config, workspace, image): """ Deploy an AMLS docker image in AMLS' ACI :param amls_config: :param workspace: :param image: :return: """ aciconfig = AciWebservice.deploy_configuration( cpu_cores=1, memory_gb=1, tags=amls_config['tags'], description=amls_config['description']) try: Webservice(workspace=workspace, name=amls_config['name']) \ .delete() logger.info(f"Deleted existing webservice {amls_config['name']}") except WebserviceException: # No need to delete pass logger.info(f"Creating webservice {amls_config['name']}") service = Webservice.deploy_from_image(deployment_config=aciconfig, image=image, name=amls_config['name'], workspace=workspace) service.wait_for_deployment(show_output=True) return service
def main(): """ Deploy model to your service """ work_space = Workspace.from_config() environment = Environment("keras-service-environment") environment.python.conda_dependencies = CondaDependencies.create( python_version="3.7.7", pip_packages=["azureml-defaults", "numpy", "tensorflow==2.3.1"], ) model = Model(work_space, "keras_mnist") model_list = model.list(work_space) validation_accuracy = [] version = [] for i in model_list: validation_accuracy.append(float(i.properties["val_accuracy"])) version.append(i.version) model = Model(work_space, "keras_mnist", version=version[np.argmax(validation_accuracy)]) service_name = "keras-mnist-service" inference_config = InferenceConfig(entry_script="score_keras.py", environment=environment) aci_config = AciWebservice.deploy_configuration(cpu_cores=1, memory_gb=1) service = Model.deploy( workspace=work_space, name=service_name, models=[model], inference_config=inference_config, deployment_config=aci_config, overwrite=True, ) service.wait_for_deployment(show_output=True) print(service.get_logs())
def main(): # get workspace ws = load_workspace() model = Model.register(ws, model_name='pytorch_mnist', model_path='model.pth') # create dep file myenv = CondaDependencies() myenv.add_pip_package('numpy') myenv.add_pip_package('torch') with open('pytorchmnist.yml', 'w') as f: print('Writing out {}'.format('pytorchmnist.yml')) f.write(myenv.serialize_to_string()) print('Done!') # create image image_config = ContainerImage.image_configuration( execution_script="score.py", runtime="python", conda_file="pytorchmnist.yml", dependencies=['./models.py']) image = Image.create(ws, 'pytorchmnist', [model], image_config) image.wait_for_creation(show_output=True) # create service aciconfig = AciWebservice.deploy_configuration( cpu_cores=1, memory_gb=1, description='simple MNIST digit detection') service = Webservice.deploy_from_image(workspace=ws, image=image, name='pytorchmnist-svc', deployment_config=aciconfig) service.wait_for_deployment(show_output=True)
def main(): print('Cargando configuracion workspace...') ws = Workspace.from_config() print('Obteniendo modelo...') model = Model(ws, 'yolov3-tf') print("Configurando Objects...") aciconfig = AciWebservice.deploy_configuration( cpu_cores=2, memory_gb=2, tags={"data": "solo yolov3 tensorflow"}, description='yolov3 y tensorflow', dns_name_label='ceibatest') inference_config = InferenceConfig(entry_script="score.py", source_directory="../azure", conda_file='conda-cpu.yml', runtime='python') print("Desplegando...") service = Model.deploy(workspace=ws, name='yolov3-tf-deploy', models=[model], inference_config=inference_config, deployment_config=aciconfig, overwrite=True) service.wait_for_deployment(show_output=True) url = service.scoring_uri print(url)
def test_deployed_aci_service(data): webservice = AciWebservice(ws, endpointId) headers = {'Content-Type': 'application/json'} headers['Authorization'] = 'Bearer ' + webservice.get_keys()[0] test_sample = json.dumps(data) response = requests.post(webservice.scoring_uri, data=test_sample, headers=headers) if response.status_code != 200: raise Exception( 'The service return non-success status code: {}'.format( response.status_code)) if response.json() != test_sample: raise Exception('The scoring result is incorrect: {}'.format( response.json()))
def deploy(self): try: AciWebservice(self.ws, self.DEPLOY_SERVICE_NAME).delete() print("webservice deleted") except WebserviceException: pass conda_dep = CondaDependencies() conda_dep.add_pip_package("joblib") conda_dep.add_pip_package("torch") conda_dep.add_pip_package("torchvision") conda_dep.add_pip_package("azureml-sdk") conda_dep.add_pip_package("azure-storage-blob") conda_dep.add_pip_package("PyYAML") conda_dep.add_pip_package("scikit-learn") conda_dep.add_pip_package("matplotlib") conda_dep.add_pip_package("opencensus-ext-azure") shoes_designer_env_file = "shoes_designer_env.yml" with open(shoes_designer_env_file,"w") as f: f.write(conda_dep.serialize_to_string()) shoes_designer_env = Environment.from_conda_specification(name="shoes_designer_env", file_path=shoes_designer_env_file) inference_config = InferenceConfig(entry_script="score.py", environment=shoes_designer_env) aciconfig = AciWebservice.deploy_configuration(cpu_cores=1, memory_gb=2, tags={"method" : "torch"}, description='Generate shoes with torch') model = self.ws.models[self.MODEL_NAME] service = Model.deploy(workspace=self.ws, name=self.DEPLOY_SERVICE_NAME, models=[model], inference_config=inference_config, deployment_config=aciconfig, overwrite=True) service.wait_for_deployment(show_output=True) print("success deployement") return service
def deploy_aci(workspace, model_azure, endpoint_name, inference_config): deployment_config = AciWebservice.deploy_configuration(cpu_cores = 1, memory_gb = 1, auth_enabled=True) service = Model.deploy(workspace, endpoint_name, [model_azure], inference_config, deployment_config, overwrite=True) service.wait_for_deployment(show_output = True) print(f"Endpoint : {endpoint_name} was successfully deployed to ACI") print(f"Endpoint : {service.scoring_uri} created") return service
def deploy_service(aml_interface): inference_config = get_inference_config(aml_interface) deployment_config = AciWebservice.deploy_configuration(cpu_cores=1, memory_gb=1) model = aml_interface.workspace.models.get(MODEL_NAME) service = Model.deploy(aml_interface.workspace, DEPLOYMENT_SERVICE_NAME, [model], inference_config, deployment_config) service.wait_for_deployment(show_output=True) print(service.scoring_uri)
def call_web_service(e, service_type, service_name): aml_workspace = Workspace.get(name=e.workspace_name, subscription_id=e.subscription_id, resource_group=e.resource_group) print("Fetching service") headers = {} if service_type == "ACI": service = AciWebservice(aml_workspace, service_name) else: service = AksWebservice(aml_workspace, service_name) if service.auth_enabled: service_keys = service.get_keys() headers['Authorization'] = 'Bearer ' + service_keys[0] print("Testing service") print(". url: %s" % service.scoring_uri) output = call_web_app(service.scoring_uri, headers) return output
def deploy(self, model_id, locally): if locally: self.ctx.log('Local deployment step is not required for Azure..') return {'model_id': model_id} ws = AzureProject(self.ctx)._get_ws() experiment_name = self.ctx.config.get('experiment/name', None) if experiment_name is None: raise AzureException('Please specify Experiment name...') iteration, run_id = self._get_iteration(model_id) experiment = Experiment(ws, experiment_name) experiment_run = AutoMLRun(experiment=experiment, run_id=run_id) model_run = AutoMLRun(experiment=experiment, run_id=model_id) model_name = model_run.properties['model_name'] self.ctx.log('Regestiring model: %s' % model_name) description = '%s-%s' % (model_name, iteration) model = experiment_run.register_model(model_name=model_name, iteration=iteration, description=description, tags=None) script_file_name = '.azureml/score_script.py' model_run.download_file('outputs/scoring_file_v_1_0_0.py', script_file_name) # Deploying ACI Service aci_service_name = self._aci_service_name(model_name) self.ctx.log('Deploying AciWebservice %s ...' % aci_service_name) inference_config = InferenceConfig( environment=model_run.get_environment(), entry_script=script_file_name) aciconfig = AciWebservice.deploy_configuration( cpu_cores=1, memory_gb=2, tags={'type': "inference-%s" % aci_service_name}, description="inference-%s" % aci_service_name) # Remove any existing service under the same name. try: Webservice(ws, aci_service_name).delete() self.ctx.log('Remove any existing service under the same name...') except WebserviceException: pass aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aciconfig) aci_service.wait_for_deployment(True) self.ctx.log('%s state %s' % (aci_service_name, str(aci_service.state))) return {'model_id': model_id, 'aci_service_name': aci_service_name}
def deployModelAsWebService( ws, model_folder_path="models", model_name="component_compliance", scoring_script_filename="scoring_service.py", conda_packages=['numpy', 'pandas'], pip_packages=['azureml-sdk', 'onnxruntime'], conda_file="dependencies.yml", runtime="python", cpu_cores=1, memory_gb=1, tags={'name': 'scoring'}, description='Compliance classification web service.', service_name="complianceservice"): # notice for the model_path, we supply the name of the outputs folder without a trailing slash # this will ensure both the model and the customestimators get uploaded. print("Registering and uploading model...") registered_model = Model.register(model_path=model_folder_path, model_name=model_name, workspace=ws) # create a Conda dependencies environment file print("Creating conda dependencies file locally...") from azureml.core.conda_dependencies import CondaDependencies mycondaenv = CondaDependencies.create(conda_packages=conda_packages, pip_packages=pip_packages) with open(conda_file, "w") as f: f.write(mycondaenv.serialize_to_string()) # create container image configuration print("Creating container image configuration...") from azureml.core.image import ContainerImage image_config = ContainerImage.image_configuration( execution_script=scoring_script_filename, runtime=runtime, conda_file=conda_file) # create ACI configuration print("Creating ACI configuration...") from azureml.core.webservice import AciWebservice, Webservice aci_config = AciWebservice.deploy_configuration(cpu_cores=cpu_cores, memory_gb=memory_gb, tags=tags, description=description) # deploy the webservice to ACI print("Deploying webservice to ACI...") webservice = Webservice.deploy_from_model(workspace=ws, name=service_name, deployment_config=aci_config, models=[registered_model], image_config=image_config) webservice.wait_for_deployment(show_output=True) return webservice
def run(model_path, model_name): auth_args = { 'tenant_id': os.environ['TENANT_ID'], 'service_principal_id': os.environ['SERVICE_PRINCIPAL_ID'], 'service_principal_password': os.environ['SERVICE_PRINCIPAL_PASSWORD'] } ws_args = { 'auth': ServicePrincipalAuthentication(**auth_args), 'subscription_id': os.environ['SUBSCRIPTION_ID'], 'resource_group': os.environ['RESOURCE_GROUP'] } ws = Workspace.get(os.environ['WORKSPACE_NAME'], **ws_args) print(ws.get_details()) print('\nSaving model {} to {}'.format(model_path, model_name)) model = Model.register(ws, model_name=model_name, model_path=model_path) print('Done!') print('Checking for existing service {}'.format(model_name)) service_name = 'simplemnist-svc' if model_name in ws.webservices: print('Found it!\nRemoving Existing service...') ws.webservices[model_name].delete() print('Done!') else: print('Not found, creating new one!') # image configuration image_config = ContainerImage.image_configuration( execution_script="score.py", runtime="python", conda_file="environment.yml") # deployement configuration aciconfig = AciWebservice.deploy_configuration(cpu_cores=1, memory_gb=1, description=model_name) # deploy service = Webservice.deploy_from_model(workspace=ws, name=model_name, models=[model], image_config=image_config, deployment_config=aciconfig) service.wait_for_deployment(show_output=True) #print logs print(service.get_logs()) print('Done!')
def deploy_to_aci(inference_env_name,cpu_cores:int,memory_gb:int,model_name,model_version,deployment_name): try: inference_config=_create_inference_config(inference_env_name) except Exception as e: raise e try: deployment_config=AciWebservice.deploy_configuration(cpu_cores=cpu_cores,memory_gb=memory_gb) _start_deploy_model(inference_config,deployment_config,model_name,model_version,deployment_name) except Exception as e: print("failed to deploy") raise e
def create_config(): aciconfig = AciWebservice.deploy_configuration( cpu_cores=1, memory_gb=1, tags={ "data": "MNIST", "method": "pytorch" }, description='Predict MNIST with pytorch') return aciconfig
def deploy_to_ACI(workspace, service_name, models, inference_config, cpu_cores=1, memory_gb=1, overwrite=True): services = workspace.webservices if service_name in services and overwrite: print('found existing service named {}, delete it right now...'.format(service_name)) services[service_name].delete() deployment_config = AciWebservice.deploy_configuration(cpu_cores=cpu_cores, memory_gb=memory_gb) service = Model.deploy(workspace, service_name, models=models, inference_config=inference_config, deployment_config=deployment_config) service.wait_for_deployment(show_output=True) print(service.state) return service
def _deploy_remotly(self, model_id, model_run, ws, experiment): from azureml.core.model import Model from azureml.core.model import InferenceConfig from azureml.core.webservice import Webservice from azureml.core.webservice import AciWebservice from azureml.exceptions import WebserviceException from azureml.train.automl.run import AutoMLRun # ws, experiment = self._get_experiment() iteration, run_id = self._get_iteration(model_id) experiment_run = AutoMLRun(experiment = experiment, run_id = run_id) model_name = model_run.properties['model_name'] self.ctx.log('Registering model: %s' % model_id) description = '%s-%s' % (model_name, iteration) model = experiment_run.register_model( model_name = model_name, iteration=iteration, description = description, tags = None) script_file_name = '.azureml/score_script.py' model_run.download_file( 'outputs/scoring_file_v_1_0_0.py', script_file_name) self._edit_score_script(script_file_name) # Deploying ACI Service aci_service_name = self._aci_service_name(model_name) self.ctx.log('Deploying AciWebservice %s ...' % aci_service_name) inference_config = InferenceConfig( environment = model_run.get_environment(), entry_script = script_file_name) aciconfig = AciWebservice.deploy_configuration( cpu_cores = 1, memory_gb = 2, tags = {'type': "inference-%s" % aci_service_name}, description = "inference-%s" % aci_service_name) # Remove any existing service under the same name. try: Webservice(ws, aci_service_name).delete() self.ctx.log('Remove any existing service under the same name...') except WebserviceException: pass aci_service = Model.deploy( ws, aci_service_name, [model], inference_config, aciconfig) aci_service.wait_for_deployment(True) self.ctx.log('%s state %s' % (aci_service_name, str(aci_service.state))) return {'model_id': model_id, 'aci_service_name': aci_service_name}
def deploy(aml_interface, inference_config, service_name): inference_config = inference_config aci_config = AciWebservice.deploy_configuration(cpu_cores=1, memory_gb=1) model = aml_interface.workspace.models.get(MODEL_NAME) service = Model.deploy(aml_interface.workspace, name=service_name, models=[model], inference_config=inference_config, deployment_config=aci_config) service.wait_for_deployment(show_output=True) print(service.scoring_uri)
def create_deployment_config_file(cpu_cores=1, memory_gb=1, tags={"data": "data"}, description=''): return AciWebservice.deploy_configuration( cpu_cores=1, memory_gb=1, tags={ "data": "Iris", "method": "sklearn_SVM" }, description='Predict Iris with sklearn')
def deploy_image(): ws = get_workspace() azure_image = get_image() aci_config = AciWebservice.deploy_configuration(cpu_cores=1, memory_gb=1, tags={'method': 'sklearn'}, description='Worst model', location=LOCATION) webservice = Webservice.deploy_from_image(image=azure_image, workspace=ws, name=MODEL_NAME, deployment_config=aci_config) webservice.wait_for_deployment(show_output=True)
def deploy(local, aks, aci, num_cores, mem_gb, compute_name): # Get the workspace ws = Workspace.from_config() # Create inference configuration based on the environment definition and the entry script # yolo = Environment.from_conda_specification(name="env", file_path="yolo.yml") yolo = Environment.from_pip_requirements( name="yolo", file_path="./deployed_requirements.txt") # yolo.save_to_directory('') yolo.register(workspace=ws) inference_config = InferenceConfig(entry_script="azure.py", environment=yolo, source_directory="yolov5") # Retrieve registered model model = Model(ws, id="lpr:1") deploy_target = None if local: # Create a local deployment, using port 8890 for the web service endpoint deployment_config = LocalWebservice.deploy_configuration(port=8890) elif aks: # Create a AKS deployment deployment_config = AksWebservice.deploy_configuration( cpu_cores=num_cores, memory_gb=mem_gb, compute_target_name=compute_name) deploy_target = ComputeTarget(workspace=ws, name=compute_name) # if deploy_target.get_status() != "Succeeded": # print(f"Deploy Target: {deploy_target.get_status()}") # deploy_target.wait_for_completion(show_output=True) elif aks: # Create a AKS deployment deployment_config = AciWebservice.deploy_configuration( cpu_cores=num_cores, memory_gb=mem_gb, compute_target_name=compute_name) else: raise NotImplementedError("Choose deploy target please") # Deploy the service print("Deploying:") service = Model.deploy(workspace=ws, name="lpr", models=[model], inference_config=inference_config, deployment_config=deployment_config, overwrite=True, deployment_target=deploy_target) # Wait for the deployment to complete print("Deploying:") service.wait_for_deployment(True) # Display the port that the web service is available on if local: print(service.port)
def deploy_to_aci(model_image, workspace, dev_webservice_name): from azureml.core.webservice import AciWebservice, Webservice # Deploy a model image to ACI print("Deploying to ACI...") # make sure this dev_webservice_name is unique and doesnt already exist, else need to replace dev_webservice_deployment_config = AciWebservice.deploy_configuration() dev_webservice = Webservice.deploy_from_image( name=dev_webservice_name, image=model_image, deployment_config=dev_webservice_deployment_config, workspace=workspace) dev_webservice.wait_for_deployment() print("Deployment to ACI successfully complete") return dev_webservice
def deploy(): parser = argparse.ArgumentParser() parser.add_argument("--model_name", required=True) parser.add_argument("--model_path", required=True) args = parser.parse_args() print(f"model_name : {args.model_name}") print(f"model_path: {args.model_path}") run = Run.get_context() ws = run.experiment.workspace model = Model.register(workspace=ws, model_path=args.model_path, model_name=args.model_name) print("Registered version {0} of model {1}".format(model.version, model.name)) inference_config = InferenceConfig( entry_script='score.py', runtime='python', conda_file='conda.yml', extra_docker_file_steps='extra_docker_steps', source_directory='server_files/') deployment_config = AciWebservice.deploy_configuration(cpu_cores=0.1, memory_gb=0.5, auth_enabled=True) try: service = AciWebservice(ws, "testscorescriptauto") service.update(models=[model]) print("EXISTING ENDPOINT FOUND: MODEL UPDATED") except Exception: Model.deploy(ws, "testscorescriptauto", [model], inference_config, deployment_config) print("NO EXISTING ENDPOINT FOUND: DEPLOYED MODEL TO NEW ENDPOINT")
def main(): # get access to workspace try: ws = Workspace.from_config() print(ws.name, ws.location, ws.resource_group, ws.location, sep='\t') print('Library configuration succeeded') except: print('Workspace not found') return # get model model = Model(ws, 'absa') # deploy model pip = [ "azureml-defaults", "azureml-monitoring", "git+https://github.com/NervanaSystems/nlp-architect.git@absa", "spacy==2.1.4" ] myenv = CondaDependencies.create(pip_packages=pip) with open("absaenv.yml", "w") as f: f.write(myenv.serialize_to_string()) deploy_env = Environment.from_conda_specification('absa_env', "absaenv.yml") deploy_env.environment_variables = {'NLP_ARCHITECT_BE': 'CPU'} inference_config = InferenceConfig(environment=deploy_env, entry_script="score.py") deploy_config = AciWebservice.deploy_configuration( cpu_cores=1, memory_gb=1, description='Aspect-Based Sentiment Analysis - Intel') print('Initiating deployment') deployment = Model.deploy(ws, 'absa-svc', models=[model], inference_config=inference_config, deployment_config=deploy_config, overwrite=True) deployment.wait_for_deployment(show_output=True) print('Getting Logs') deployment.get_logs() print('Done!')
def deploy_container_instance(workspace, endpoint_name, inference_config, model_azure): # Remove any existing service under the same name. try: Webservice(workspace, endpoint_name).delete() except WebserviceException: pass deployment_config = AciWebservice.deploy_configuration(cpu_cores=1, memory_gb=1) service = Model.deploy(workspace, endpoint_name, [model_azure], inference_config, deployment_config) service.wait_for_deployment(show_output=True) print('A API {} foi gerada no estado {}'.format(service.scoring_uri, service.state)) return service.scoring_uri