Esempio n. 1
0
def attachAksComputeToWorkspace(ws, resource_group, cluster_name, name_for_compute_in_amls, is_dev=False):
    if is_dev:
        attach_config = AksCompute.attach_configuration(resource_group = resource_group,
                                                        cluster_name = cluster_name,
                                                        cluster_purpose = AksCompute.ClusterPurpose.DEV_TEST)
        print(attach_config)
    else:
        # Attach the cluster to your workgroup. If the cluster has less than 12 virtual CPUs, use the following instead:

        attach_config = AksCompute.attach_configuration(resource_group = resource_group,
                                                        cluster_name = cluster_name)


    aks_target = ComputeTarget.attach(ws, name_for_compute_in_amls, attach_config)
    print("AKS Compute Attached to Workspace")
    return aks_target
Esempio n. 2
0
def attach_cluster():
    # Set the resource group that contains the AKS cluster and the cluster name
    resource_group = RESOURCE_GROUP
    cluster_name = AKS_CLUSTER_NAME

    # Attach the cluster to your workgroup. If the cluster has less than 12 virtual CPUs, use the following instead:
    attach_config = AksCompute.attach_configuration(
        resource_group=resource_group,
        cluster_name=cluster_name,
        cluster_purpose=AksCompute.ClusterPurpose.DEV_TEST)
    ws = get_workspace()
    aks_target = ComputeTarget.attach(ws, AKS_NAME, attach_config)
    return aks_target
Esempio n. 3
0
def attach_aks_clust(parameters, ws):
    compute_type = parameters.get("compute_type", "")
    resource_grp = os.environ.get("INPUT_RESOURCE_GRP", default=None)
    if resource_grp is None:
        return
    if compute_type == 'akscluster':
        compute_name = parameters.get("name", None)
        try:
            attach_config = AksCompute.attach_configuration(
                resource_group=resource_grp, cluster_name=compute_name)
            deployment_target = ComputeTarget.attach(ws, compute_name,
                                                     attach_config)
            deployment_target.wait_for_completion(show_output=True)
            print(
                "::debug::Successfully attached the given cluster with workspace"
            )
            return 'attached'
        except ComputeTargetException:
            print(
                "::debug::Could not find existing compute target with provided name inside given resource group"
            )
            return
Esempio n. 4
0
def attachExistingCluster(workspace, cluster_name, resource_group,
                          compute_name, dev_cluster):
    '''
        Attach an existing AKS cluster, unless there is an existing AMLS compute with the same 
        name already attached to the AMLS workspace. 


        PARAMS: 
            workspace        : azureml.core.Workspace   : Existing AMLS Workspace
            cluster_name     : String                   : Name of an existing AKS cluster 
            resource_group   : String                   : Name of the Azure Resource group existing cluster is in
            compute_name     : String                   : Name of the AMLS compute to create/locate.
            dev_cluster      : bool                     : Flag indicating if this is a development cluster. A development
                                                          cluster generally has fewwer vCPU's based on node count than allowed
                                                          for a production deployment. 

        RETURNS: 
            azureml.core.compute.AksCompute

    '''
    print("Attaching existing AKS compute.....")

    purpose = _getClusterPurpose(dev_cluster)
    aks_target = _getExistingCompute(workspace, compute_name)

    if aks_target == None:
        attach_config = AksCompute.attach_configuration(
            resource_group=resource_group,
            cluster_name=cluster_name,
            cluster_purpose=purpose)

        if attach_config:
            aks_target = ComputeTarget.attach(workspace, compute_name,
                                              attach_config)

    return aks_target
Esempio n. 5
0
# MAGIC %md
# MAGIC ## Connect to an existing AKS Cluster

# COMMAND ----------

from azureml.core.compute import AksCompute, ComputeTarget

# Get the resource group from https://portal.azure.com -> Find your resource group
resource_group = "<resource-group>"

# Give the cluster a local name
aks_cluster_name = "<aks_name>"

# Attatch the cluster to your workgroup
attach_config = AksCompute.attach_configuration(resource_group=resource_group,
                                                cluster_name=aks_cluster_name)
aks_target = ComputeTarget.attach(workspace,
                                  name=aks_cluster_name,
                                  attach_configuration=attach_config)

# Wait for the operation to complete
aks_target.wait_for_completion(True)
print(aks_target.provisioning_state)
print(aks_target.provisioning_errors)

# COMMAND ----------

# MAGIC %md
# MAGIC
# MAGIC With the AKS cluster created and connected to, the image can now be deployed as a web service.
# MAGIC
Esempio n. 6
0
    },
    synchronous=False)

# COMMAND ----------

model_image.wait_for_creation(show_output=True)

# COMMAND ----------

from azureml.core.compute import AksCompute, ComputeTarget

# Get the resource id from https://porta..azure.com -> Find your resource group -> click on the Kubernetes service -> Properties
#resource_id = "/subscriptions/<your subscription id>/resourcegroups/<your resource group>/providers/Microsoft.ContainerService/managedClusters/<your aks service name>"
# Attach the cluster to your workgroup
attach_config = AksCompute.attach_configuration(
    resource_group=
    "mthone-fe",  # name of resource group in which AKS service is deployed
    cluster_name="mthoneAKS")  # name of AKS service
aks_target = ComputeTarget.attach(
    ws, 'mthone-ml-aks', attach_config
)  # To be defined name of the compute target in Azure ML workspace (can be defined here)

# Wait for the operation to complete
aks_target.wait_for_completion(True)
print(aks_target.provisioning_state)
print(aks_target.provisioning_errors)

# COMMAND ----------

from azureml.core.webservice import Webservice, AksWebservice

# Set configuration and service name
# In[78]:

aks_config = AksWebservice.deploy_configuration(
    autoscale_enabled=False,
    cpu_cores=0.1,
    description='Student admissions logistic regression model',
    memory_gb=0.2,
    num_replicas=1,
    tags={'project': project})

# ## 2.11 Use an existing AKS cluster as the deployment target

# In[79]:

attach_config = AksCompute.attach_configuration(
    cluster_name=project,
    cluster_purpose='DevTest',  # allows 1 node
    resource_group=project)

# The cell below attaches the existing Kubernetes cluster as a compute target in the Azure ML workspace. It may take about 5 minutes.

# In[80]:

aks_target = ComputeTarget.attach(
    attach_configuration=attach_config,
    name=project,  # limit of 16 characters
    workspace=ws)
aks_target.wait_for_completion(True)

# ## 2.12 Deploy the model and inference script to AKS

# Up to this point the compute target Kubernetes cluster is empty. The step below will create several resources on the cluster, including the pod serving the prediction endpoint.
Esempio n. 8
0
import sys
from azureml.core import Workspace
from azureml.core.compute import AksCompute, ComputeTarget
from azureml.core.authentication import AzureCliAuthentication

# Set the resource group that contains the AKS cluster and the cluster name
resource_group = 'DogBreeds'
cluster_name = 'DogBreeds'

# Get a reference the workspace
try:
    cli_auth = AzureCliAuthentication()
    ws = Workspace.from_config(auth=cli_auth)
except Exception as e:
    print("Workspace not accessible.")
    print(e)
    sys.exit(1)

# Attach the cluster to your workgroup. If the cluster has less than 12 virtual CPUs then
# specify: cluster_purpose = AksCompute.ClusterPurpose.DEV_TEST
attach_config = AksCompute.attach_configuration(
    resource_group=resource_group,
    cluster_name=cluster_name,
    cluster_purpose=AksCompute.ClusterPurpose.DEV_TEST)
aks_target = ComputeTarget.attach(ws, 'dogbreeds-aks', attach_config)

aks_target.wait_for_completion(show_output=True)
print(aks_target.provisioning_state)
print(aks_target.provisioning_errors)
Esempio n. 9
0
def attach_to_AKS(workspace, attachment_name, resource_id, cluster_purpose=None):
    attach_config = AksCompute.attach_configuration(resource_id=resource_id, cluster_purpose=cluster_purpose)
    aks_target = ComputeTarget.attach(workspace, attachment_name, attach_config)
    return aks_target
Esempio n. 10
0
def main():
    # Loading input values
    print("::debug::Loading input values")
    model_name = os.environ.get("INPUT_MODEL_NAME", default=None)
    model_version = os.environ.get("INPUT_MODEL_VERSION", default=None)
    resource_grp= os.environ.get("INPUT_RESOURCE_GRP", default=None) 
    # Casting input values
    print("::debug::Casting input values")
    try:
        model_version = int(model_version)
    except TypeError as exception:
        print(f"::debug::Could not cast model version to int: {exception}")
        model_version = None
    except ValueError as exception:
        print(f"::debug::Could not cast model version to int: {exception}")
        model_version = None

    # Loading azure credentials
    print("::debug::Loading azure credentials")
    azure_credentials = os.environ.get("INPUT_AZURE_CREDENTIALS", default="{}")
    try:
        azure_credentials = json.loads(azure_credentials)
    except JSONDecodeError:
        print("::error::Please paste output of `az ad sp create-for-rbac --name <your-sp-name> --role contributor --scopes /subscriptions/<your-subscriptionId>/resourceGroups/<your-rg> --sdk-auth` as value of secret variable: AZURE_CREDENTIALS")
        raise AMLConfigurationException("Incorrect or poorly formed output from azure credentials saved in AZURE_CREDENTIALS secret. See setup in https://github.com/Azure/aml-compute/blob/master/README.md")

    # Checking provided parameters
    print("::debug::Checking provided parameters")
    validate_json(
        data=azure_credentials,
        schema=azure_credentials_schema,
        input_name="AZURE_CREDENTIALS"
    )

    # Mask values
    print("::debug::Masking parameters")
    mask_parameter(parameter=azure_credentials.get("tenantId", ""))
    mask_parameter(parameter=azure_credentials.get("clientId", ""))
    mask_parameter(parameter=azure_credentials.get("clientSecret", ""))
    mask_parameter(parameter=azure_credentials.get("subscriptionId", ""))

    # Loading parameters file
    print("::debug::Loading parameters file")
    parameters_file = os.environ.get("INPUT_PARAMETERS_FILE", default="deploy.json")
    parameters_file_path = os.path.join(".cloud", ".azure", parameters_file)
    try:
        with open(parameters_file_path) as f:
            parameters = json.load(f)
    except FileNotFoundError:
        print(f"::debug::Could not find parameter file in {parameters_file_path}. Please provide a parameter file in your repository  if you do not want to use default settings (e.g. .cloud/.azure/deploy.json).")
        parameters = {}

    # Checking provided parameters
    print("::debug::Checking provided parameters")
    validate_json(
        data=parameters,
        schema=parameters_schema,
        input_name="PARAMETERS_FILE"
    )

    # Define target cloud
    if azure_credentials.get("resourceManagerEndpointUrl", "").startswith("https://management.usgovcloudapi.net"):
        cloud = "AzureUSGovernment"
    elif azure_credentials.get("resourceManagerEndpointUrl", "").startswith("https://management.chinacloudapi.cn"):
        cloud = "AzureChinaCloud"
    else:
        cloud = "AzureCloud"

    # Loading Workspace
    print("::debug::Loading AML Workspace")
    sp_auth = ServicePrincipalAuthentication(
        tenant_id=azure_credentials.get("tenantId", ""),
        service_principal_id=azure_credentials.get("clientId", ""),
        service_principal_password=azure_credentials.get("clientSecret", ""),
        cloud=cloud
    )
    config_file_path = os.environ.get("GITHUB_WORKSPACE", default=".cloud/.azure")
    config_file_name = "aml_arm_config.json"
    try:
        ws = Workspace.from_config(
            path=config_file_path,
            _file_name=config_file_name,
            auth=sp_auth
        )
    except AuthenticationException as exception:
        print(f"::error::Could not retrieve user token. Please paste output of `az ad sp create-for-rbac --name <your-sp-name> --role contributor --scopes /subscriptions/<your-subscriptionId>/resourceGroups/<your-rg> --sdk-auth` as value of secret variable: AZURE_CREDENTIALS: {exception}")
        raise AuthenticationException
    except AuthenticationError as exception:
        print(f"::error::Microsoft REST Authentication Error: {exception}")
        raise AuthenticationError
    except AdalError as exception:
        print(f"::error::Active Directory Authentication Library Error: {exception}")
        raise AdalError
    except ProjectSystemException as exception:
        print(f"::error::Workspace authorizationfailed: {exception}")
        raise ProjectSystemException

    # Loading model
    print("::debug::Loading model")
    try:
        model = Model(
            workspace=ws,
            name=model_name,
            version=model_version
        )
    except WebserviceException as exception:
        print(f"::error::Could not load model with provided details: {exception}")
        raise AMLConfigurationException(f"Could not load model with provided details: {exception}")

    # Creating inference config
    print("::debug::Creating inference config")
    if os.environ.get("CONTAINER_REGISTRY_ADRESS", None) is not None:
        container_registry = ContainerRegistry()
        container_registry.address = os.environ.get("CONTAINER_REGISTRY_ADRESS", None)
        container_registry.username = os.environ.get("CONTAINER_REGISTRY_USERNAME", None)
        container_registry.password = os.environ.get("CONTAINER_REGISTRY_PASSWORD", None)
    else:
        container_registry = None

    try:
        inference_config = InferenceConfig(
            entry_script=parameters.get("inference_entry_script", "score.py"),
            runtime=parameters.get("runtime", "python"),
            conda_file=parameters.get("conda_file", "environment.yml"),
            extra_docker_file_steps=parameters.get("extra_docker_file_steps", None),
            source_directory=parameters.get("inference_source_directory", "code/deploy/"),
            enable_gpu=parameters.get("enable_gpu", None),
            description=parameters.get("description", None),
            base_image=parameters.get("custom_base_image", None),
            base_image_registry=container_registry,
            cuda_version=parameters.get("cuda_version", None)
        )
    except WebserviceException as exception:
        print(f"::debug::Failed to create InferenceConfig. Trying to create no code deployment: {exception}")
        inference_config = None
    except TypeError as exception:
        print(f"::debug::Failed to create InferenceConfig. Trying to create no code deployment: {exception}")
        inference_config = None

    # Skip deployment if only Docker image should be created
    if not parameters.get("skip_deployment", False):
        # Default service name
        repository_name = os.environ.get("GITHUB_REPOSITORY").split("/")[-1]
        branch_name = os.environ.get("GITHUB_REF").split("/")[-1]
        default_service_name = f"{repository_name}-{branch_name}".lower().replace("_", "-")
        service_name = parameters.get("name", default_service_name)[:32]

        # Loading run config
        print("::debug::Loading run config")
        model_resource_config = model.resource_configuration
        cpu_cores = get_resource_config(
            config=parameters.get("cpu_cores", None),
            resource_config=model_resource_config,
            config_name="cpu"
        )
        memory_gb = get_resource_config(
            config=parameters.get("memory_gb", None),
            resource_config=model_resource_config,
            config_name="memory_in_gb"
        )
        gpu_cores = get_resource_config(
            config=parameters.get("gpu_cores", None),
            resource_config=model_resource_config,
            config_name="gpu"
        )

        # Profiling model
        print("::debug::Profiling model")
        if parameters.get("profiling_enabled", False):
            # Getting profiling dataset
            profiling_dataset = get_dataset(
                workspace=ws,
                name=parameters.get("profiling_dataset", None)
            )
            if profiling_dataset is None:
                profiling_dataset = model.sample_input_dataset

            # Profiling model
            try:
                model_profile = Model.profile(
                    workspace=ws,
                    profile_name=f"{service_name}-profile"[:32],
                    models=[model],
                    inference_config=inference_config,
                    input_dataset=profiling_dataset
                )
                model_profile.wait_for_completion(show_output=True)

                # Overwriting resource configuration
                cpu_cores = model_profile.recommended_cpu
                memory_gb = model_profile.recommended_memory

                # Setting output
                profiling_details = model_profile.get_details()
                print(f"::set-output name=profiling_details::{profiling_details}")
            except Exception as exception:
                print(f"::warning::Failed to profile model. Skipping profiling and moving on to deployment: {exception}")

        # Loading deployment target
        print("::debug::Loading deployment target")
        compute_name=parameters.get("deployment_compute_target", "");
        try:
            deployment_target = ComputeTarget(
                workspace=ws,
                name=compute_name 
            )
        except ComputeTargetException:
            deployment_target = None
        except TypeError:
            deployment_target = None
        if deployment_target == None:
           try:
               attach_config = AksCompute.attach_configuration(resource_group =resource_grp,cluster_name = compute_name) 
               deployment_target= ComputeTarget.attach(ws, compute_name, attach_config)
               deployment_target.wait_for_completion(show_output = True)
           except ComputeTargetException:
               deployment_target = None
        # Creating deployment config
        print("::debug::Creating deployment config")
        if type(deployment_target) is AksCompute:
            deployment_config = AksWebservice.deploy_configuration(
                autoscale_enabled=parameters.get("autoscale_enabled", None),
                autoscale_min_replicas=parameters.get("autoscale_min_replicas", None),
                autoscale_max_replicas=parameters.get("autoscale_max_replicas", None),
                autoscale_refresh_seconds=parameters.get("autoscale_refresh_seconds", None),
                autoscale_target_utilization=parameters.get("autoscale_target_utilization", None),
                collect_model_data=parameters.get("model_data_collection_enabled", None),
                auth_enabled=parameters.get("authentication_enabled", None),
                cpu_cores=cpu_cores,
                memory_gb=memory_gb,
                enable_app_insights=parameters.get("app_insights_enabled", None),
                scoring_timeout_ms=parameters.get("scoring_timeout_ms", None),
                replica_max_concurrent_requests=parameters.get("replica_max_concurrent_requests", None),
                max_request_wait_time=parameters.get("max_request_wait_time", None),
                num_replicas=parameters.get("num_replicas", None),
                primary_key=os.environ.get("PRIMARY_KEY", None),
                secondary_key=os.environ.get("SECONDARY_KEY", None),
                tags=parameters.get("tags", None),
                properties=parameters.get("properties", None),
                description=parameters.get("description", None),
                gpu_cores=gpu_cores,
                period_seconds=parameters.get("period_seconds", None),
                initial_delay_seconds=parameters.get("initial_delay_seconds", None),
                timeout_seconds=parameters.get("timeout_seconds", None),
                success_threshold=parameters.get("success_threshold", None),
                failure_threshold=parameters.get("failure_threshold", None),
                namespace=parameters.get("namespace", None),
                token_auth_enabled=parameters.get("token_auth_enabled", None)
            )
        else:
            deployment_config = AciWebservice.deploy_configuration(
                cpu_cores=cpu_cores,
                memory_gb=memory_gb,
                tags=parameters.get("tags", None),
                properties=parameters.get("properties", None),
                description=parameters.get("description", None),
                location=parameters.get("location", None),
                auth_enabled=parameters.get("authentication_enabled", None),
                ssl_enabled=parameters.get("ssl_enabled", None),
                enable_app_insights=parameters.get("app_insights_enabled", None),
                ssl_cert_pem_file=parameters.get("ssl_cert_pem_file", None),
                ssl_key_pem_file=parameters.get("ssl_key_pem_file", None),
                ssl_cname=parameters.get("ssl_cname", None),
                dns_name_label=parameters.get("dns_name_label", None),
                primary_key=os.environ.get("PRIMARY_KEY", None),
                secondary_key=os.environ.get("SECONDARY_KEY", None),
                collect_model_data=parameters.get("model_data_collection_enabled", None),
                cmk_vault_base_url=os.environ.get("CMK_VAULT_BASE_URL", None),
                cmk_key_name=os.environ.get("CMK_KEY_NAME", None),
                cmk_key_version=os.environ.get("CMK_KEY_VERSION", None)
            )

        # Deploying model
        print("::debug::Deploying model")
        try:
            service = Model.deploy(
                workspace=ws,
                name=service_name,
                models=[model],
                inference_config=inference_config,
                deployment_config=deployment_config,
                deployment_target=deployment_target,
                overwrite=True
            )
            service.wait_for_deployment(show_output=True)
        except WebserviceException as exception:
            print(f"::error::Model deployment failed with exception: {exception}")
            service_logs = service.get_logs()
            raise AMLDeploymentException(f"Model deployment failed logs: {service_logs} \nexception: {exception}")

        # Checking status of service
        print("::debug::Checking status of service")
        if service.state != "Healthy":
            service_logs = service.get_logs()
            print(f"::error::Model deployment failed with state '{service.state}': {service_logs}")
            raise AMLDeploymentException(f"Model deployment failed with state '{service.state}': {service_logs}")

        if parameters.get("test_enabled", False):
            # Testing service
            print("::debug::Testing service")
            root = os.environ.get("GITHUB_WORKSPACE", default=None)
            test_file_path = parameters.get("test_file_path", "code/test/test.py")
            test_file_function_name = parameters.get("test_file_function_name", "main")

            print("::debug::Adding root to system path")
            sys.path.insert(1, f"{root}")

            print("::debug::Importing module")
            test_file_path = f"{test_file_path}.py" if not test_file_path.endswith(".py") else test_file_path
            try:
                test_spec = importlib.util.spec_from_file_location(
                    name="testmodule",
                    location=test_file_path
                )
                test_module = importlib.util.module_from_spec(spec=test_spec)
                test_spec.loader.exec_module(test_module)
                test_function = getattr(test_module, test_file_function_name, None)
            except ModuleNotFoundError as exception:
                print(f"::error::Could not load python script in your repository which defines theweb service tests (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}")
                raise AMLConfigurationException(f"Could not load python script in your repository which defines the web service tests (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}")
            except FileNotFoundError as exception:
                print(f"::error::Could not load python script or function in your repository which defines the web service tests (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}")
                raise AMLConfigurationException(f"Could not load python script or function in your repository which defines the web service tests (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}")
            except AttributeError as exception:
                print(f"::error::Could not load python script or function in your repository which defines the web service tests (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}")
                raise AMLConfigurationException(f"Could not load python script or function in your repository which defines the web service tests (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}")

            # Load experiment config
            print("::debug::Loading experiment config")
            try:
                test_function(service)
            except TypeError as exception:
                print(f"::error::Could not load experiment config from your module (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}")
                raise AMLConfigurationException(f"Could not load experiment config from your module (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}")
            except Exception as exception:
                print(f"::error::The webservice tests did not complete successfully: {exception}")
                raise AMLDeploymentException(f"The webservice tests did not complete successfully: {exception}")

        # Deleting service if desired
        if parameters.get("delete_service_after_deployment", False):
            service.delete()
        else:
            # Creating outputs
            print("::debug::Creating outputs")
            print(f"::set-output name=service_scoring_uri::{service.scoring_uri}")
            print(f"::set-output name=service_swagger_uri::{service.swagger_uri}")

    # Creating Docker image
    if parameters.get("create_image", None) is not None:
        try:
            # Packaging model
            if parameters.get("create_image", None) == "docker":
                package = Model.package(
                    workspace=ws,
                    models=[model],
                    inference_config=inference_config,
                    generate_dockerfile=False
                )
            if parameters.get("create_image", None) == "function_blob":
                package = package_blob(
                    workspace=ws,
                    models=[model],
                    inference_config=inference_config,
                    generate_dockerfile=False,
                    input_path=os.environ.get("FUNCTION_BLOB_INPUT"),
                    output_path=os.environ.get("FUNCTION_BLOB_OUTPUT")
                )
            if parameters.get("create_image", None) == "function_http":
                package = package_http(
                    workspace=ws,
                    models=[model],
                    inference_config=inference_config,
                    generate_dockerfile=False,
                    auth_level=os.environ.get("FUNCTION_HTTP_AUTH_LEVEL")
                )
            if parameters.get("create_image", None) == "function_service_bus_queue":
                package = package_service_bus_queue(
                    workspace=ws,
                    models=[model],
                    inference_config=inference_config,
                    generate_dockerfile=False,
                    input_queue_name=os.environ.get("FUNCTION_SERVICE_BUS_QUEUE_INPUT"),
                    output_queue_name=os.environ.get("FUNCTION_SERVICE_BUS_QUEUE_OUTPUT")
                )

            # Getting container registry details
            acr = package.get_container_registry()
            mask_parameter(parameter=acr.address)
            mask_parameter(parameter=acr.username)
            mask_parameter(parameter=acr.password)

            # Wait for completion and pull image
            package.wait_for_creation(show_output=True)

            # Creating additional outputs
            print("::debug::Creating outputs")
            print(f"::set-output name=acr_address::{acr.address}")
            print(f"::set-output name=acr_username::{acr.username}")
            print(f"::set-output name=acr_password::{acr.password}")
            print(f"::set-output name=package_location::{package.location}")
        except WebserviceException as exception:
            print(f"::error::Image creation failed with exception: {exception}")
            package_logs = package.get_logs()
            raise AMLDeploymentException(f"Image creation failed with logs: {package_logs}")
    print("::debug::Successfully finished Azure Machine Learning Deploy Action")