def _predict_remotely( self, ws, experiment, predict_data, model_id, threshold): input_payload = predict_data.to_json(orient='split', index = False) remote_run = AutoMLRun(experiment = experiment, run_id = model_id) model_name = remote_run.properties['model_name'] aci_service_name = self._aci_service_name(model_name) aci_service = AciWebservice(ws, aci_service_name) input_payload = json.loads(input_payload) # If you have a classification model, you can get probabilities by changing this to 'predict_proba'. method = 'predict' if threshold is not None: method = 'predict_proba' input_payload = { 'method': method, 'data': input_payload['data'] } input_payload = json.dumps(input_payload) try: response = aci_service.run(input_data = input_payload) print(response) except Exception as e: print('err log', aci_service.get_logs()) raise e results_proba = None proba_classes = None return json.loads(response)['result'], results_proba, proba_classes
def __deploy_model(self): service_name = self.__args.service_name model = Model(self.__ws, self.__args.model_name) explainer_model = Model(self.__ws, self.__args.explainer_model_name) myenv = Environment.from_conda_specification( name=self.__config.get('DEPLOY', 'ENV_NAME'), file_path=self.__config.get('DEPLOY', 'ENV_FILE_PATH')) inference_config = InferenceConfig( entry_script=self.__config.get('DEPLOY', 'SCORE_PATH'), environment=myenv, source_directory=self.__config.get('DEPLOY', 'DEPENDENCIES_DIRECTORY')) if not self.__args.update_deployment: deployment_config = AciWebservice.deploy_configuration( cpu_cores=self.__config.getint('DEPLOY', 'ACI_CPU'), memory_gb=self.__config.getint('DEPLOY', 'ACI_MEM'), collect_model_data=True, enable_app_insights=True) service = Model.deploy(self.__ws, service_name, [model, explainer_model], inference_config, deployment_config) else: service = AciWebservice(self.__ws, service_name) service.update(models=[model, explainer_model], inference_config=inference_config) service.wait_for_deployment(show_output=True) print(service.state) print(service.get_logs())
def _predict_remotely(self, predict_data, model_id, predict_proba): from azureml.core.webservice import AciWebservice from azureml.train.automl.run import AutoMLRun from azureml.core.run import Run import numpy as np ws, experiment = self._get_experiment() model_features = None target_categories = None remote_run = AutoMLRun(experiment = experiment, run_id = model_id) model_features, target_categories = self._get_remote_model_features(remote_run) if model_id.startswith("AutoML_"): model_name = remote_run.properties['model_name'] else: model_name = model_id if model_features: predict_data = predict_data[model_features] input_payload = predict_data.to_json(orient='split', index = False) aci_service_name = self._aci_service_name(model_name) aci_service = AciWebservice(ws, aci_service_name) input_payload = json.loads(input_payload) # If you have a classification model, you can get probabilities by changing this to 'predict_proba'. method = 'predict' if predict_proba: method = 'predict_proba' input_payload = { 'data': {'data': input_payload['data'], 'method': method} } input_payload = json.dumps(input_payload) try: response = aci_service.run(input_data = input_payload) except Exception as e: log_file = 'automl_errors.log' fsclient.write_text_file(log_file, aci_service.get_logs(), mode="a") raise AzureException("Prediction service error. Please redeploy the model. Log saved to file '%s'. Details: %s"%(log_file, str(e))) response = json.loads(response) if "error" in response or not 'result' in response: raise AzureException('Prediction service return error: %s'%response.get('error')) results_proba = None proba_classes = None results = response['result'] if predict_proba: results_proba = results proba_classes = response['proba_classes'] results_proba = np.array(results_proba) return results, results_proba, proba_classes, target_categories
# Deploying dev web service from image dev_service = Webservice.deploy_from_image(workspace=ws, name=aci_settings["name"], image=image, deployment_config=aci_config) # Show output of the deployment on stdout dev_service.wait_for_deployment(show_output=True) print("State of Service: {}".format(dev_service.state)) # Checking status of web service print("Checking status of ACI Dev Deployment") if dev_service.state != "Healthy": raise Exception( "Dev Deployment on ACI failed with the following status: {} and logs: \n{}" .format(dev_service.state, dev_service.get_logs())) # Testing ACI web service print("Testing ACI web service") test_sample = test_functions.get_test_data_sample() print("Test Sample: ", test_sample) test_sample_encoded = bytes(test_sample, encoding='utf8') try: prediction = dev_service.run(input_data=test_sample) print(prediction) except Exception as e: result = str(e) logs = dev_service.get_logs() dev_service.delete() raise Exception( "ACI Dev web service is not working as expected: \n{} \nLogs: \n{}".
ws = Workspace.from_config() from azureml.core.webservice import AciWebservice service_name = 'aci-hymenoptera' service = AciWebservice(workspace=ws, name=service_name) print(service.serialize) print(service.state) # If your deployment fails for any reason and you need to redeploy, make sure to delete the service before you do so: service.delete() # Tip: If something goes wrong with the deployment, the first thing to look at is the logs from the service by running the following command: service.get_logs() # Get the web service's HTTP endpoint, which accepts REST client calls. This endpoint can be shared with anyone who wants to test the web service or integrate it into an application. print(service.scoring_uri) # Test the web service # Finally, let's test our deployed web service. We will send the data as a JSON string to the web service hosted in ACI and use the SDK's run API to invoke the service. Here we will take an image from our validation data to predict on. import os, json from PIL import Image import matplotlib.pyplot as plt plt.imshow(Image.open('test_img.jpg')) plt.show(block=False)
# Deploying dev web service from image dev_service = Webservice.deploy_from_image(workspace=ws, name=aci_settings["name"], image=image, deployment_config=aci_config) # Show output of the deployment on stdout dev_service.wait_for_deployment(show_output=True) print("State of Service: {}".format(dev_service.state)) # Checking status of web service print("Checking status of ACI Dev Deployment") if dev_service.state != "Healthy": raise Exception( "Dev Deployment on ACI failed with the following status: {} and logs: \n{}".format( dev_service.state, dev_service.get_logs() ) ) # Testing ACI web service print("Testing ACI web service") test_sample = test_functions.get_test_data_sample() print("Test Sample: ", test_sample) test_sample_encoded = bytes(test_sample, encoding='utf8') try: prediction = dev_service.run(input_data=test_sample) print(prediction) except Exception as e: result = str(e) logs = dev_service.get_logs() dev_service.delete()