import azureml.core
from azureml.core import Workspace
from azureml.core.model import Model
from azureml.core import Experiment
from azureml.core.webservice import Webservice
from azureml.core.image import ContainerImage
from azureml.core.webservice import AciWebservice
from azureml.core.conda_dependencies import CondaDependencies

ws = Workspace("c51cd33f-083e-4304-a95b-442a52dc4a2a", 
               "NetworkWatcherRG", "eSpaceAI", auth=None, _location=None, _disable_service_check=False)
location = 'southcentralus'

#Inputs for VM creation
#vm_size = input("Enter your vm_size : ")
#min_nodes = input("Enter your min_nodes : ")
#max_nodes = input("Enter your max_nodes : ")

#Inputs for VM creation
vm_size = "Standard_D2_v2"
min_nodes = 1
max_nodes = 4

##Existing \ new workspace
try:
    ws = Workspace(subscription_id="24075937-2687-4457-bac6-ec16dec514c3",
                   resource_group="PMGDemo",
                   workspace_name="AutoML_demo1")

    print("Found workspace {} at location {}".format(ws.name, ws.location))
    print('Found existing Workspace.')

except:
    print('need to create new Workspace.')
    print('Creating new Workspace.')

    ws = Workspace.create(name=myworkspace,
                          subscription_id=subscription_id,
                          resource_group=myresourcegroup,
                          create_resource_group=True,
                          location=location)
parser.add_argument("--loc", help="Location.", type=str)
parser.add_argument("--datafilepath", help="data file input name.", type=str)
args = parser.parse_args()

sub_id = args.sub
res_grp = args.basename + "-RG"
workspace_n = args.basename + "-AML-WS"
data_ref = args.datafilepath.split("/")[-1][:-4]
data_filepath = args.datafilepath
model_basename = "model-test"
model_pklname = "modknn"

# Configurer des ressources Machine Learning
# help(Workspace)
ws = Workspace(subscription_id=sub_id,
               resource_group=res_grp,
               workspace_name=workspace_n)

# Configurer un magasin de données
# Default datastore
def_data_store = ws.get_default_datastore()

# Configurer la référence de données
# créer une source de données susceptible d’être référencée dans un pipeline en tant qu’entrée ou étape.
# Dans un pipeline, une source de données est représentée par un objet DataReference.
from azureml.data.data_reference import DataReference
blob_input_data = DataReference(datastore=def_data_store,
                                data_reference_name=data_ref,
                                path_on_datastore=data_filepath)

# Les données intermédiaires (ou la sortie d’une étape) sont représentées par un objet PipelineData.
from azureml.core.compute import ComputeInstance, AmlCompute

# Get workspace
run = Run.get_context()
ws = None
if type(run) == _OfflineRun:
    ws = Workspace.from_config()
else:
    ws = run.experiment.workspace

# Use Managed Identity to authenticate
from azureml.core.authentication import MsiAuthentication
msi_auth = MsiAuthentication()

ws = Workspace(subscription_id=ws.subscription_id,
               resource_group=ws.resource_group,
               workspace_name=ws.name,
               auth=msi_auth)

# Loop through compute targets
for ct_name, ct in ws.compute_targets.items():
    if (isinstance(ct, ComputeInstance)
            and ct.get_status().state != "Stopped"):
        print(f"Stopping compute instance {ct.name}")
        try:
            ct.stop(wait_for_completion=False, show_output=False)
        except Exception as e:
            print(f"Failed to stop compute {ct.name} with error {e})")
    if (isinstance(ct, AmlCompute)):
        print(f"Scalling down cluster {ct.name}")
        try:
            ct.update(min_nodes=0)
from azureml.core import Workspace
from azureml.core.authentication import ServicePrincipalAuthentication

## FILL IN YOUR OWN ACCESS TOKENS BELOW (BEST PRACTICE: MAKE USE OF DBUTILS SECRETS)
svc_pr = ServicePrincipalAuthentication(
    tenant_id=dbutils.secrets.get("fieldeng", "mthone_tenant_id"),
    service_principal_id=dbutils.secrets.get("fieldeng", "mthone_sp_id"),
    service_principal_password=dbutils.secrets.get("fieldeng", "mthone_sp_pw"))

# Use interactive login (base option)
# Otherwise use service principal
ws = Workspace(
    subscription_id=dbutils.secrets.get(
        "fieldeng", "mthone_subscription_id"),  # Azure subscription ID
    resource_group=
    "mthone-fe",  # Name of resource group in which Azure ML workspace is deployed
    workspace_name="mthoneML",  # Name of Azure ML workspace 
    auth=svc_pr)
print("Found workspace {} at location {}".format(ws.name, ws.location))

# COMMAND ----------

import mlflow.azureml

# run_id_python = "runs:/e3f4485070bf433cab95836ed22d1fee/model"
run_id_python = modelURI

model_image, azure_model = mlflow.azureml.build_image(
    workspace=ws,
    model_uri=run_id_python,
Example #6
0
def setAutomatedMLWorkspace(create_workspace=False,
                            create_resource_group=False,
                            workspace_region=None,
                            *,
                            auth=None,
                            subscription_id,
                            resource_group,
                            workspace_name):
    """Set configuration file for AutomatedML actions with the EconML library. If
    ``create_workspace`` is set true, a new workspace is created
    for the user.

    Parameters
    ----------

    create_workspace: Boolean, optional, default False
       If set to true, a new workspace will be created if the specified
       workspace does not exist.

    create_resource_group: Boolean, optional, default False
       If set to true, a new resource_group will be created if the specified
       resource_group does not exist.

    workspace_region: String, optional
       Region of workspace, only necessary if create_new is set to true and a
       new workspace is being created.

    auth: azureml.core.authentication.AbstractAuthentication, optional
        If set EconML will use auth object for handling Azure Authentication.
        Otherwise, EconML will use interactive automation, opening an
        authentication portal in the browser.

    subscription_id: String, required
       Azure subscription ID for the subscription under which to run the models

    resource_group: String, required
       Name of resource group of workspace to be created or set.

    workspace_name: String, required
       Name of workspace of workspace to be created or set.
    """
    try:
        ws = Workspace(subscription_id=subscription_id,
                       resource_group=resource_group,
                       workspace_name=workspace_name,
                       auth=auth)
        # write the details of the workspace to a configuration file to the notebook library
        ws.write_config()
        print("Workspace configuration has succeeded.")
    except ProjectSystemException:
        if (create_workspace):
            if (create_resource_group):
                print("Workspace not accessible. Creating a new workspace and \
                resource group.")
                ws = Workspace.create(
                    name=workspace_name,
                    subscription_id=subscription_id,
                    resource_group=resource_group,
                    location=workspace_region,
                    create_resource_group=create_resource_group,
                    sku='basic',
                    auth=auth,
                    exist_ok=True)
                ws.get_details()
            else:
                print("Workspace not accessible. Set \
                create_resource_group = True and run again to create a new \
                workspace and resource group.")
        else:
            print("Workspace not accessible. Set create_workspace = True \
            to create a new workspace.")
Example #7
0
svc_pr_password = os.environ.get("AZUREML_PASSWORD")
tenant = os.environ.get("TENANT_ID")
serviceprin = os.environ.get("APPID")
sub = os.environ.get("SUBSCRIPTION")
rg = os.environ.get("RESOURCE_GROUP")
wrkspc = os.environ.get("WORKSPACE_NAME")

svc_pr = ServicePrincipalAuthentication(
    tenant_id=tenant,
    service_principal_id=serviceprin,
    service_principal_password=svc_pr_password)


ws = Workspace(
    subscription_id=sub,
    resource_group=rg,
    workspace_name=wrkspc,
    auth=svc_pr
    )

# Get the latest evaluation result 
try:
    with open("./aml_config/run_id.json") as f:
        config = json.load(f)
    if not config["run_id"]:
        raise Exception('No new model to register as production model perform better')
except:
    print('No new model to register as production model perform better')
    #raise Exception('No new model to register as production model perform better')
    sys.exit(0)

run_id = config["run_id"]
Example #8
0
set_diagnostics_collection(send_diagnostics=True)

from azureml.core import Workspace, Dataset

from azureml.core.authentication import ServicePrincipalAuthentication

svc_pr_password = "******"

svc_pr = ServicePrincipalAuthentication(
    tenant_id="72f988bf-86f1-41af-91ab-2d7cd011db47",
    service_principal_id="8a3ddafe-6dd6-48af-867e-d745232a1833",
    service_principal_password="******")

ws = Workspace(subscription_id="c46a9435-c957-4e6c-a0f4-b9a597984773",
               resource_group="mlops",
               workspace_name="gputraining",
               auth=svc_pr)

import tensorflow as tf
print(tf.__version__)

from azureml.core import Experiment

script_folder = './tf-mnist'
os.makedirs(script_folder, exist_ok=True)

exp = Experiment(workspace=ws, name='tf-mnist')

import urllib.request

data_folder = os.path.join(os.getcwd(), 'data')
if AZURE_ML_CONF['subscription_id'] is None:
  raise KeyError("subscription_id, resource_group, and workspace_name must be filled out in notebook 99-Shared-Functions-and-Settings")

# COMMAND ----------

from azureml.core import Workspace
from azureml.core.authentication import InteractiveLoginAuthentication

# Please note - with other examples you will find online, you don't need to do the following lines iwth the up object
# However, when working with multiple users in the same Databricks cluster, you should follow these three steps to 
# avoid interfering with other users' authentication

# Note - it won't let you "log in" as another user, it'll just make you reauthenticate every time you interact with the AML Workspace

up = InteractiveLoginAuthentication()
up.get_authentication_header()

ws = Workspace(**AZURE_ML_CONF, auth=up)

# In non-Databricks world, this would look like:
# ws = Workspace(**AZURE_ML_CONF)

# COMMAND ----------

# If you need to create a new Azure Machine Learning workspace, you can do that via the SDK.

# ws = Workspace.create(**AZURE_ML_CONF, location=AZURE_REGION, exist_ok=True)

# COMMAND ----------

Example #10
0
#### Initialize (existing) workspace and compute target ####
from azureml.core import Workspace
print("\nimported Workspace\n")
subscription_id = '65d5f74f-ace7-469d-9082-4393b3a7764b'
resource_group = 'ITAS-SCD'
workspace_name = 'ML-ITAS-SCD'
cpu_cluster_name = 'Run-Demos-Fast'  # This is also the computer name

ws = Workspace(subscription_id,
               resource_group,
               workspace_name,
               _workspace_id='0ccb4b4c419146808a2c98b8e826aa35')
print("\ninitialized Workspace\n")
#compute_target = ws.compute_targets[compute_name]

# #### Verify that cluster does not exist already ####
# from azureml.core.compute import ComputeTarget, AmlCompute
# from azureml.core.compute_target import ComputeTargetException

# try:
# cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name)
# print('Found existing cluster, use it.')
# except ComputeTargetException:
# compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_F4S_V2',
# max_nodes=8)
# cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name, compute_config)

# #### Create a run configuration for the persistent compute target ####

# from azureml.core.runconfig import RunConfiguration
# from azureml.core.conda_dependencies import CondaDependencies
def start(config_file):

    print(config_file)
    configdata = ngccontent.get_config(config_file)
    subscription_id = configdata["azureml_user"]["subscription_id"]
    resource_group = configdata["azureml_user"]["resource_group"]
    workspace_name = configdata["azureml_user"]["workspace_name"]

    ws = Workspace(workspace_name=workspace_name,
                   subscription_id=subscription_id,
                   resource_group=resource_group)

    verify = f'''
    Subscription ID: {subscription_id}
    Resource Group: {resource_group}
    Workspace: {workspace_name}'''
    print(verify)

    ### vnet settings
    vnet_rg = ws.resource_group
    vnet_name = configdata["aml_compute"]["vnet_name"]
    subnet_name = configdata["aml_compute"]["subnet_name"]

    ### azure ml names
    ct_name = configdata["aml_compute"]["ct_name"]
    exp_name = configdata["aml_compute"]["exp_name"]

    ### trust but verify
    verify = f'''
    vNET RG: {vnet_rg}
    vNET name: {vnet_name}
    vNET subnet name: {subnet_name}
    Compute target: {ct_name}
    Experiment name: {exp_name}'''
    print(verify)

    if configdata["aml_compute"]["vm_name"] in configdata[
            "supported_vm_sizes"].keys():
        vm_name = configdata["aml_compute"]["vm_name"]
        gpus_per_node = configdata["supported_vm_sizes"][vm_name]

        print(
            "Setting up compute target {ct_name} with vm_size: {vm_name} with {gpus_per_node} GPUs"
            .format(ct_name=ct_name,
                    vm_name=vm_name,
                    gpus_per_node=gpus_per_node))

        if ct_name not in ws.compute_targets:
            config = AmlCompute.provisioning_configuration(
                vm_size=vm_name,
                min_nodes=configdata["aml_compute"]["min_nodes"],
                max_nodes=configdata["aml_compute"]["max_nodes"],
                vnet_resourcegroup_name=vnet_rg,
                vnet_name=vnet_name,
                subnet_name=subnet_name,
                idle_seconds_before_scaledown=configdata["aml_compute"]
                ["idle_seconds_before_scaledown"],
                remote_login_port_public_access='Enabled')
            ct = ComputeTarget.create(ws, ct_name, config)
            ct.wait_for_completion(show_output=True)
        else:
            print("Loading Pre-existing Compute Target {ct_name}".format(
                ct_name=ct_name))
            ct = ws.compute_targets[ct_name]
    else:
        print("Unsupported vm_size {vm_size}".format(vm_size=vm_name))
        print("The specified vm size must be one of ...")
        for azure_gpu_vm_size in configdata["supported_vm_sizes"].keys():
            print("... " + azure_gpu_vm_size)
        raise Exception(
            "{vm_size} does not support Pascal or above GPUs".format(
                vm_size=vm_name))

    environment_name = configdata["aml_compute"]["environment_name"]
    python_interpreter = configdata["aml_compute"]["python_interpreter"]
    conda_packages = configdata["aml_compute"]["conda_packages"]
    from azureml.core import ContainerRegistry

    if environment_name not in ws.environments:
        env = Environment(name=environment_name)
        env.docker.enabled = configdata["aml_compute"]["docker_enabled"]
        env.docker.base_image = None
        env.docker.base_dockerfile = "FROM {dockerfile}".format(
            dockerfile=configdata["ngc_content"]["base_dockerfile"])
        env.python.interpreter_path = python_interpreter
        env.python.user_managed_dependencies = True
        conda_dep = CondaDependencies()

        for conda_package in conda_packages:
            conda_dep.add_conda_package(conda_package)

        env.python.conda_dependencies = conda_dep
        env.register(workspace=ws)
        evn = env
    else:
        env = ws.environments[environment_name]

    amlcluster = Azuremlcomputecluster.AzureMLComputeCluster(
        workspace=ws,
        compute_target=ct,
        initial_node_count=1,
        experiment_name=configdata["aml_compute"]["exp_name"],
        environment_definition=env,
        use_gpu=True,
        n_gpus_per_node=1,
        jupyter=True,
        jupyter_port=configdata["aml_compute"]["jupyter_port"],
        dashboard_port=9001,
        scheduler_port=9002,
        scheduler_idle_timeout=1200,
        worker_death_timeout=30,
        additional_ports=[],
        datastores=[],
        telemetry_opt_out=True,
        asynchronous=False)

    print(amlcluster.jupyter_link)
    amlcluster.jupyter_link
    print('Exiting script')
Example #12
0
"""
Submit job the "old school" way, using the Python SDK.

For reference, see:
https://azure.github.io/azureml-web/docs/cheatsheet/script-run-config
"""

from azureml.core import Environment, Experiment, ScriptRunConfig, Workspace
from azureml.core.runconfig import MpiConfiguration

ws = Workspace("48bbc269-ce89-4f6f-9a12-c6f91fcb772d", "aml1p-rg",
               "aml1p-ml-wus2")

env = Environment.from_conda_specification("hydra-pl", "environment.yml")
env.docker.enabled = True
env.docker.base_image = (
    "mcr.microsoft.com/azureml/openmpi3.1.2-cuda10.2-cudnn8-ubuntu18.04")

# ==============================================================================
node_count = 2
gpus_per_node = -1
cluster = "gpu-nc24-lowpri"
# ==============================================================================

mpi_config = MpiConfiguration(process_count_per_node=1, node_count=node_count)

config = ScriptRunConfig(
    source_directory=".",
    script="train.py",
    compute_target=cluster,
    distributed_job_config=mpi_config,
Example #13
0
from azureml.data.data_reference import DataReference

from azureml.pipeline.steps import PythonScriptStep
from azureml.pipeline.core import PipelineData, Pipeline
import json
import os

with open('config.json', 'r') as jsonfile:
    ws_config = json.load(jsonfile)

interactive_auth = InteractiveLoginAuthentication(
    tenant_id=ws_config['tenantId'])

ws = Workspace(
    subscription_id=ws_config['subscription_id'],
    resource_group=ws_config['resource_group'],
    workspace_name=ws_config['workspace_name'],
    auth=interactive_auth,
)

datastore_name = 'shiftdatastore'  # Name of the datastore to workspace
container_name = os.getenv("BLOB_CONTAINER",
                           "news20container")  # Name of Azure blob container
account_name = os.getenv("BLOB_ACCOUNTNAME",
                         "shiftreference")  # Storage account name
account_key = os.getenv("AZURE_STORAGE_KEY")  # Storage account key

datastore = Datastore.get(ws, "shiftdatastore")

blob_datastore = Datastore.register_azure_blob_container(
    workspace=ws,
    datastore_name=datastore_name,
Example #14
0
def main(resource, location, workspace, compute, clear_all):
    startTime = datetime.now()
    print('#########################')
    print('Resource Group: {}\nLocation: {}\nWorkspace: {}\nCompute: {}\nReset: {}'.format(resource, location, workspace, compute, clear_all))
    print('#########################')
    
    # LOGIN
    print('logging into Azure...')
    credentials = ServicePrincipalCredentials(
        client_id=AZURE_CLIENT_ID,
        secret=AZURE_CLIENT_SECRET,
        tenant=AZURE_TENANT_ID
    )

    client = ResourceManagementClient(credentials, AZURE_SUBSCRIPTION_ID)

    # DELETE PREVIOUS RG
    if clear_all:
        print('deleting {}...'.format(resource))
        delete_async_operation = client.resource_groups.delete(resource)
        delete_async_operation.wait()
        print('done!')

    # creating RG
    print('\nCreating Resource Group')
    print('-----------------------')
    resource_group_params = {'location':location}
    print('creating "{}" at "{}"'.format(resource, location))
    print(client.resource_groups.create_or_update(resource, resource_group_params))

    # preparing ARM template for Workspace
    with open('workspace.json', 'r') as f:
        template = json.load(f)

    # find api versions
    apiVersions = {}
    for item in template['resources']:
        apiVersions[item['type']] = item['apiVersion']
    apiVersions['Microsoft.MachineLearningServices/workspaces'] = template['variables']['mlservicesVersion']
    
    # clear workspace if it exists
    # loop to delete actual workspace first
    for item in client.resources.list_by_resource_group(resource):
        if item.type == 'Microsoft.MachineLearningServices/workspaces':
            print('\ndeleting workspace {} [{}]'.format(item.name, apiVersions[item.type]))
            result = client.resources.delete_by_id(item.id, apiVersions[item.type])
            result.wait()
            break
            
    # loop to clear related workspace resources
    for item in client.resources.list_by_resource_group(resource):
        if item.tags != None and 'mlWorkspace' in item.tags and item.tags['mlWorkspace'] == workspace:
            print('\tremoving {} [{} => {}]'.format(item.name, item.type, apiVersions[item.type]))
            result = client.resources.delete_by_id(item.id, apiVersions[item.type])
            result.wait()

    # ARM Template Parameters
    parameters = {
        'workspaceName': { 'value': workspace },
        'location': { 'value': location }
    }

    print('\nCreating Workspace')
    print('-----------------------')
    result = client.deployments.create_or_update(
                resource,
                'ai_workspace_{}'.format(datetime.utcnow().strftime("-%H%M%S")),
                properties= DeploymentProperties (
                    mode=DeploymentMode.complete,
                    template=template,
                    parameters=parameters,
                )
            )
    result.wait()
    print('done!')

    print('\nCreating Workspace Compute Target')
    print('-----------------------')
    # creating compute targets
    print('Creating compute target {} in {}'.format(compute, workspace))
    subscription_id = AZURE_SUBSCRIPTION_ID
    resource_group = resource
    workspace_name = workspace

    # create workspace object
    ws = Workspace(subscription_id = subscription_id, resource_group = resource_group, workspace_name = workspace_name)

    # create compute if it doesn't already exist
    try:
        compute = ComputeTarget(workspace=ws, name=compute)
        print('Found existing compute target')
    except ComputeTargetException:
        print('Creating a new compute target...')
        compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6', min_nodes=1, max_nodes=6)
        compute = ComputeTarget.create(ws, compute, compute_config)
        compute.wait_for_completion(show_output=True)

    client.close()

    print('Done!\nTotal Time: {}'.format(datetime.now() - startTime))
Example #15
0
import time
from azureml.core import Workspace
from azureml.core.authentication import InteractiveLoginAuthentication
from azureml.pipeline.core import PipelineRun
from azureml.pipeline.core import PublishedPipeline
from azureml.core.authentication import AzureCliAuthentication

cli_auth = AzureCliAuthentication()

##------------- Get Workspace

subscriptionId = "<your subscription id>"  # make this a parameter
resourceGroup = "<your resource group>"  # make this a parameter
workspaceName = "<your ml workspace name>"  # make this a parameter

ws = Workspace(subscriptionId, resourceGroup, workspaceName, auth=cli_auth)

##------------- Run Published pipeline using REST endpoint

aad_token = cli_auth.get_authentication_header()
published_pipeline_id = "ab0691a9-438f-416b-a146-5c7660d1be11"  # Replace this with the published pipeline id
published_pipeline = PublishedPipeline.get(ws, published_pipeline_id)
rest_endpoint = published_pipeline.endpoint
print("Rest endpoint: " + rest_endpoint)

response = requests.post(rest_endpoint,
                         headers=aad_token,
                         json={
                             "ExperimentName": "quality_prediction_gb",
                             "RunSource": "SDK",
                             "ParameterAssignments": {
Example #16
0
import pyodbc
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from azureml.core import Workspace
#ws = Workspace.create(name='saab_ml_workspace', subscription_id='047ae087-7d35-4c57-8fe9-7a442cc9cf16', resource_group='Speech_Analytics')

from azureml.core.authentication import ServicePrincipalAuthentication
from azureml.core import Workspace
svc_pr_password = "******"

svc_pr = ServicePrincipalAuthentication(
    tenant_id="93f33571-550f-43cf-b09f-cd331338d086",
    service_principal_id="0a89e69e-7aae-4a80-8b76-7dc9f65c3d16",
    service_principal_password=svc_pr_password)

ws = Workspace(subscription_id="047ae087-7d35-4c57-8fe9-7a442cc9cf16",
               resource_group="Speech_Analytics",
               workspace_name="saab_ml_workspace",
               auth=svc_pr)

client_name = {'clientname': 'SAAB'}
mysql = {
    'server': 'saab-server-resource.database.windows.net',
    'database': 'saab_dw_resource',
    'username': '******',
    'password': '******',
    'driver': '{ODBC Driver 17 for SQL Server}',
    'PORT': '1433'
}
client_details = {
    'Domain_ID': '101',
    'UserName': '******',
    'call_language1': 'en'
Example #17
0
                        dest="aml_resource_group",
                        type=str,
                        help="Resource group of Azure Machine Learning")
    parser.add_argument("--aml-subscription-id",
                        dest="aml_subscription_id",
                        type=str,
                        help="Subscription ID of Azure Machine Learning")
    parser.add_argument("--aml-workspace-name",
                        dest="aml_workspace_name",
                        type=str,
                        help="Name of Azure Machine Learning workspace")
    args = parser.parse_args()
    return args


if __name__ == "__main__":
    # Load arguments
    print("Loading arguments")
    args = parse_args()

    # Load workspace
    print("Load Workspace")
    interactive_auth = InteractiveLoginAuthentication()
    ws = Workspace(subscription_id=args.aml_subscription_id,
                   resource_group=args.aml_resource_group,
                   workspace_name=args.aml_workspace_name,
                   auth=interactive_auth)

    # Attach ADB as remote compute
    print("Attaching ADB as remote compute")
    main(args=args, workspace=ws)
Example #18
0
from azureml.core.conda_dependencies import CondaDependencies

if __name__ == "__main__":
    parser = ArgumentParser()
    parser.add_argument('--subscription_id', help='the subscription id of aml')
    parser.add_argument('--resource_group', help='the resource group of aml')
    parser.add_argument('--workspace_name', help='the workspace name of aml')
    parser.add_argument('--compute_target',
                        help='the compute cluster name of aml')
    parser.add_argument('--docker_image', help='the docker image of job')
    parser.add_argument('--experiment_name', help='the experiment name')
    parser.add_argument('--script_dir', help='script directory')
    parser.add_argument('--script_name', help='script name')
    args = parser.parse_args()

    ws = Workspace(args.subscription_id, args.resource_group,
                   args.workspace_name)
    compute_target = ComputeTarget(workspace=ws, name=args.compute_target)
    experiment = Experiment(ws, args.experiment_name)
    run_config = RunConfiguration()
    run_config.environment.python.user_managed_dependencies = True
    run_config.environment.docker.enabled = True
    run_config.environment.docker.base_image = args.docker_image
    run_config.target = compute_target
    run_config.node_count = 1
    config = ScriptRunConfig(source_directory=args.script_dir,
                             script=args.script_name,
                             run_config=run_config)
    run = experiment.submit(config)
    print(run.get_details()["runId"])
    while True:
        line = sys.stdin.readline().rstrip()
Example #19
0
import requests
import time
import base64
import datetime
import azureml.core
import shutil
import os, json
from azureml.core import Workspace
from azureml.core.run import Run
from azureml.core.experiment import Experiment
from azureml.core.model import Model
import azureml.core
from azureml.core.authentication import ServicePrincipalAuthentication

ws = Workspace(workspace_name=workspace,
               subscription_id=subscription_id,
               resource_group=resource_grp)

ws.get_details()

# COMMAND ----------

# MAGIC %md ##### 1b. Load models from disk where it was stored in previous notebook 1_DeepLearningCifar!0NotebookExploration.py

# COMMAND ----------

import keras
from keras.models import load_model

#path= '/dbfs/CIFAR10/models/'
model2000path = path + par_model2000_name
Example #20
0
from azureml.core.runconfig import RunConfiguration
from azureml.core.model import Model
from operator import attrgetter

spAuth = ServicePrincipalAuthentication(tenant_id="TENANT_ID",
                                        username="******",
                                        password="******")

print("SDK Version: ", azureml.core.VERSION)

subscription_id = "SUBSCRIPTION_ID"
resource_group = "RESOURCE_GROUP_NAME"
workspace_name = "WORKSPACE_NAME"

ws = Workspace(auth=spAuth,
               subscription_id=subscription_id,
               resource_group=resource_group,
               workspace_name=workspace_name)
print("Loaded workspace: " + ws.name)

ready_models = Model.list(workspace=ws, tags=[['is_model_ready', 'true']])
model = max(
    [model for model in ready_models if model.name == "breast-cancer-model"],
    key=attrgetter('version'))

from azureml.core.conda_dependencies import CondaDependencies

myenv = CondaDependencies()
myenv.add_pip_package("numpy")
myenv.add_pip_package("sklearn")
#myenv.add_pip_package("azureml-core")
with open("myenv.yml", "w") as f:
Example #21
0
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import pandas as pd
import numpy as np
from azureml.core.run import Run
from azureml.data.dataset_factory import TabularDatasetFactory
from azureml.core import Dataset
from azureml.core import Workspace, Dataset

## initialize your workspace information here

subscription_id = '10c5d508-c599-42ff-85c4-c15b92f298b5'
resource_group = 'nirmal-test'
workspace_name = 'AzureML_Nirmal_Test'
workspace = Workspace(subscription_id, resource_group, workspace_name)


def split_data(ds_name, test_size):

    dataset = Dataset.get_by_name(workspace, name=ds_name)
    df = dataset.to_pandas_dataframe()
    y = df.iloc[:, -1].values  # output variable
    X = df.iloc[:, :-1].values  # feature variables
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=test_size,
                                                        random_state=42)
    return X_train, X_test, y_train, y_test  # return the train and test split data

Example #22
0
print("azureml.core.VERSION", VERSION)

base_dir = "."

config_json = os.path.join(base_dir, "config.json")
with open(config_json, "r") as f:
    config = json.load(f)

auth = ServicePrincipalAuthentication(
    tenant_id=config["tenant_id"],
    service_principal_id=config["service_principal_id"],
    service_principal_password=config["service_principal_password"],
)

ws = Workspace(config["subscription_id"],
               config["resource_group"],
               config["workspace_name"],
               auth=auth)

print(ws.get_details)

keyvault = ws.get_default_keyvault()
keyvault.set_secret("tenantID", config["tenant_id"])
keyvault.set_secret("servicePrincipalId", config["service_principal_id"])
keyvault.set_secret("servicePrincipalPassword",
                    config["service_principal_password"])

# folder for scripts that need to be uploaded to Aml compute target
script_folder = "./scripts/"
if os.path.exists(script_folder):
    print("Deleting:", script_folder)
    shutil.rmtree(script_folder)
Example #23
0
# Get the latest evaluation result
try:
    with open("conf/image.json") as f:
        config = json.load(f)
    if not config["image_name"]:
        raise Exception('No new image created in build')
except:
    print('No image to deploy')
    #raise Exception('No new model to register as production model perform better')
    sys.exit(1)

# Start creating
# Point file to conf directory containing details for the aml service
cli_auth = AzureCliAuthentication()
ws = Workspace(workspace_name=workspace_name,
               subscription_id=subscription_id,
               resource_group=resource_grp,
               auth=cli_auth)

image_name = config["image_name"]
image_version = config["image_version"]
images = Image.list(workspace=ws)
image, = (m for m in images
          if m.version == image_version and m.name == image_name)
print('From image.json, Model used: {}\nModel Version {}'.format(
    config["model_name"], config["model_version"]))
print(
    'From image.json, Image used to deploy webservice on ACI: {}\nImage Version: {}\nImage Location = {}'
    .format(image.name, image.version, image.image_location))

try:
    service = AksWebservice(name=service_name, workspace=ws)
Example #24
0
from azureml.core import Workspace
from azureml.core import Environment
from azureml.core.model import InferenceConfig, Model
from azureml.core.webservice import AciWebservice, Webservice

ws = Workspace(subscription_id="19f6dcec-26c8-4916-89b0-ac775bc5e6b8",
               resource_group="dev",
               workspace_name="climbing-hold-detection")

env = Environment.from_dockerfile(name="yolov5s", dockerfile="Dockerfile")

env.inferencing_stack_version = "latest"
env.python.user_managed_dependencies = True
env.python.interpreter_path = "/usr/bin/python3.7"

inference_config = InferenceConfig(
    environment=env,
    source_directory="./source_dir",
    entry_script="./entry_script.py",
)

deployment_config = AciWebservice.deploy_configuration(cpu_cores=1,
                                                       memory_gb=1)
deployment_config.auth_enabled = True

model = Model(workspace=ws, name="yolov5s", id="yolov5s_onnx:1")

service = Model.deploy(
    ws,
    "yolov5s-service",
    [model],
Example #25
0
import azureml.core
from azureml.core import Workspace
from azureml.core.authentication import ServicePrincipalAuthentication
from azureml.core.run import Run
from azureml.core.experiment import Experiment

# Check core SDK version number
print("SDK version:", azureml.core.VERSION)

# COMMAND ----------

spn = ServicePrincipalAuthentication(par_spn_tenant, par_spn_clientid,
                                     par_spn_clientsecret)
ws = Workspace(auth=spn,
               workspace_name=par_workspace_name,
               subscription_id=par_subscription_id,
               resource_group=par_resource_group)

ws.get_details()

print('Workspace name: ' + ws.name,
      'Azure region: ' + ws.location,
      'Subscription id: ' + ws.subscription_id,
      'Resource group: ' + ws.resource_group,
      sep='\n')

# COMMAND ----------

import os
import urllib
import pprint
parser.add_argument('--storage_container',
                    type=str,
                    default=os.getenv('STORAGE_CONTAINER', 'eventhubs'),
                    help='Storage container name')
parser.add_argument('--storage_key',
                    type=str,
                    default=os.getenv('STORAGE_KEY'),
                    help='Storage account key')
parser.add_argument('--storage_path',
                    type=str,
                    default=os.getenv('STORAGE_PATH'),
                    help='Path to Avro data in storage container')
args = parser.parse_args()

ws = Workspace(subscription_id=args.subscription_id,
               resource_group=args.resource_group,
               workspace_name=args.workspace)

# Choose a name for your CPU cluster
cpu_cluster_name = "cpu1"

# Verify that cluster does not exist already
try:
    cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name)
    print('Found existing cluster, use it.')
except ComputeTargetException:
    compute_config = AmlCompute.provisioning_configuration(
        vm_size='STANDARD_D2_V2',
        min_nodes=0,
        max_nodes=1,
    )
Example #27
0

def resolve_source_directory():
    return os.environ["SOURCE_DIR"]


def resolve_db_cluster_id():
    return os.environ["DB_CLUSTER_ID"]


my_env = CondaDependencies.create(conda_packages=resolve_dependencies())

with open("myenv.yml", "w") as f:
    f.write(my_env.serialize_to_string())

ws = Workspace(resolve_subscription_id(), resolve_rg(),
               resolve_ml_workspace_name())

config = DatabricksCompute.attach_configuration(
    resource_group=resolve_rg(),
    workspace_name=resolve_db_workspace_name(),
    access_token=resolve_db_access_token())
databricks_compute = ComputeTarget.attach(ws, resolve_compute_name(), config)
databricks_compute.wait_for_completion(True)

dbPythonInLocalMachineStep = DatabricksStep(
    name="DBPythonInLocalMachine",
    python_script_name=resolve_script_name(),
    source_directory=resolve_source_directory(),
    run_name='DB_Worst_Regression_Run',
    compute_target=databricks_compute,
    existing_cluster_id=resolve_db_cluster_id(),
Example #28
0
    def __init__(self, request_id, input_container_sas, internal_datastore):
        try:
            aml_config = api_config.AML_CONFIG

            self.ws = Workspace(subscription_id=aml_config['subscription_id'],
                                resource_group=aml_config['resource_group'],
                                workspace_name=aml_config['workspace_name'],
                                auth=svc_pr)
            print('AMLCompute constructor, AML workspace obtained.')

            internal_dir, output_dir = self._get_data_references(
                request_id, internal_datastore)

            compute_target = self.ws.compute_targets[
                aml_config['aml_compute_name']]

            dependencies = CondaDependencies.create(pip_packages=[
                'tensorflow-gpu==1.9.0', 'pillow', 'numpy', 'azure',
                'azure-storage-blob', 'azureml-defaults'
            ])

            amlcompute_run_config = RunConfiguration(
                conda_dependencies=dependencies)
            amlcompute_run_config.environment.docker.enabled = True
            amlcompute_run_config.environment.docker.gpu_support = True
            amlcompute_run_config.environment.docker.base_image = DEFAULT_GPU_IMAGE
            amlcompute_run_config.environment.spark.precache_packages = False

            # default values are required and need to be literal values or data references as JSON
            param_job_id = PipelineParameter(name='param_job_id',
                                             default_value='default_job_id')

            param_begin_index = PipelineParameter(name='param_begin_index',
                                                  default_value=0)
            param_end_index = PipelineParameter(name='param_end_index',
                                                default_value=0)

            param_detection_threshold = PipelineParameter(
                name='param_detection_threshold', default_value=0.05)
            param_batch_size = PipelineParameter(name='param_batch_size',
                                                 default_value=8)

            batch_score_step = PythonScriptStep(
                aml_config['script_name'],
                source_directory=aml_config['source_dir'],
                name='batch_scoring',
                arguments=[
                    '--job_id',
                    param_job_id,
                    '--model_name',
                    aml_config['model_name'],
                    '--input_container_sas',
                    input_container_sas,
                    '--internal_dir',
                    internal_dir,
                    '--begin_index',
                    param_begin_index,  # inclusive
                    '--end_index',
                    param_end_index,  # exclusive
                    '--output_dir',
                    output_dir,
                    '--detection_threshold',
                    param_detection_threshold,
                    '--batch_size',
                    param_batch_size
                ],
                compute_target=compute_target,
                inputs=[internal_dir],
                outputs=[output_dir],
                runconfig=amlcompute_run_config)

            self.pipeline = Pipeline(workspace=self.ws,
                                     steps=[batch_score_step])
            self.aml_config = aml_config
            print('AMLCompute constructor all good.')
        except Exception as e:
            raise RuntimeError(
                'Error in setting up AML Compute resource: {}.'.format(str(e)))
Example #29
0
from azureml.core import Workspace, Dataset
from xgboost import XGBClassifier

from prep import CategoricalEncoder

from sklearn.preprocessing import StandardScaler
from azureml.core.authentication import ServicePrincipalAuthentication

# replace with your credentials
svc_pr = ServicePrincipalAuthentication(
    tenant_id="replace with yours",
    service_principal_id="replace with yours",
    service_principal_password="******")

ws = Workspace(subscription_id="",
               resource_group="azureML",
               workspace_name="Chun2",
               auth=svc_pr)
experiment_name = 'BanckChurner-HPO2'
experiment=Experiment(ws, experiment_name)

datastore=ws.get_default_datastore()
dataset=Dataset.Tabular.from_delimited_files(datastore.path('UI/02-28-2021_030313_UTC/BankChurners.csv'))
ds=dataset.to_pandas_dataframe()
############
target = 'Attrition_Flag'

vars_num = [c for c in ds.columns if ds[c].dtypes!='O' and c!=target]

vars_cat = [c for c in ds.columns if ds[c].dtypes=='O' and c!=target]
##############
"""make a pipeline of the preprocessing steps that are applied to the data before training. 
Example #30
0
# HINT: https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.workspace.workspace?view=azure-ml-py

# TODO: Import Workspace from azureml core
from azureml.core import Workspace

# TODO: Create a Workspace instance and the get method to get your workspace
ws = Workspace(subscription_id="ec4e8801...",
               resource_group="testVDI",
               workspace_name="claes")

# TODO: Use the write_config() method to get the workspace config on your local machine
ws.write_config()