示例#1
0
def get_auth():
    """Authentication to access workspace"""
    try:
        auth = AzureCliAuthentication()
        auth.get_authentication_header()
    except AuthenticationException:
        logger.info("Authentication Error Occured")

    return auth
def get_auth():
    logger = logging.getLogger(__name__)
    logger.debug("Trying to create Workspace with CLI Authentication")
    try:
        auth = AzureCliAuthentication()
        auth.get_authentication_header()
    except AuthenticationException:
        logger.debug("Trying to create Workspace with Interactive login")
        auth = InteractiveLoginAuthentication()
    return auth
示例#3
0
def get_auth():
    '''
    Authentication to access workspace
    '''
    try:
        auth = AzureCliAuthentication()
        auth.get_authentication_header()
    except AuthenticationException:
        print("Authentication Error Occured")

    return auth
示例#4
0
def get_auth():
    """
    Method to get the correct Azure ML Authentication type

    Always start with CLI Authentication and if it fails, fall back
    to interactive login
    """
    try:
        auth_type = AzureCliAuthentication()
        auth_type.get_authentication_header()
    except AuthenticationException:
        auth_type = InteractiveLoginAuthentication()
    return auth_type
def main(name: str, subscription_id: str, resource_group: str, location: str):
    """Create Workspace on Azure ML Service

        - https://docs.microsoft.com/pt-br/azure/machine-learning
        /how-to-manage-workspace?tab=python&tabs=python#create-multi-tenant
    """
    print(location)
    click.secho("[ML SERVICE] - Creating Workspace...", fg="green")

    cli_auth = AzureCliAuthentication()
    ws = Workspace.create(
        name=name,
        subscription_id=subscription_id,
        resource_group=resource_group,
        create_resource_group=True,
        location=location,
        auth=cli_auth,
    )

    ws.write_config(".azureml")
    print(
        "\tWorkspace name: " + ws.name,
        "Azure region: " + ws.location,
        "Subscription id: " + ws.subscription_id,
        "Resource group: " + ws.resource_group,
        sep="\n\t",
    )
def get_auth(env_path: str) -> AbstractAuthentication:
    """

    :param env_path:
    :return:
    """
    logger = logging.getLogger(__name__)
    if get_key(env_path, "password") != "YOUR_SERVICE_PRINCIPAL_PASSWORD":
        logger.debug("Trying to create Workspace with Service Principal")
        aml_sp_password = get_key(env_path, "password")
        aml_sp_tennant_id = get_key(env_path, "tenant_id")
        aml_sp_username = get_key(env_path, "username")
        auth = ServicePrincipalAuthentication(
            tenant_id=aml_sp_tennant_id,
            service_principal_id=aml_sp_username,
            service_principal_password=aml_sp_password,
        )
    else:
        logger.debug("Trying to create Workspace with CLI Authentication")
        try:
            auth = AzureCliAuthentication()
            auth.get_authentication_header()
        except AuthenticationException:
            logger.debug("Trying to create Workspace with Interactive login")
            auth = InteractiveLoginAuthentication()

    return auth
示例#7
0
def trigger_data_prep():

    # get the parameter values
    workspace = sys.argv[1]
    subscription_id = sys.argv[2]
    resource_grp = sys.argv[3]

    domain = sys.argv[4]
    DBR_PAT_TOKEN = bytes(sys.argv[5], encoding='utf-8')  # adding b'

    notebook = "lgb_eq_sec.py"
    experiment_name = "experiment_model_release"

    # Print AML Version
    print("Azure ML SDK Version: ", azureml.core.VERSION)

    # Point file to conf directory containing details for the aml service

    cli_auth = AzureCliAuthentication()

    ws = Workspace(workspace_name = workspace,
                   subscription_id = subscription_id,
                   resource_group = resource_grp,
                   auth=cli_auth)

    # Create a new experiment
    print("Starting to create new experiment")
    Experiment(workspace=ws, name=experiment_name)

    # Upload notebook to Databricks

    print("Upload notebook to databricks")
    upload_notebook(domain, DBR_PAT_TOKEN, notebook)
示例#8
0
def trigger_env_prep():

    # Define Vars < Change the vars>.
    # In a production situation, don't put secrets in source code, but as secret variables,
    # see https://docs.microsoft.com/en-us/azure/devops/pipelines/process/variables?view=azure-devops&tabs=yaml%2Cbatch#secret-variables
    workspace = "<Name of your workspace>"
    subscription_id = "<Subscription id>"
    resource_grp = "<Name of your resource group where aml service is created>"

    domain = "westeurope.azuredatabricks.net"  # change location in case databricks instance is not in westeurope
    databricks_name = "<<Your Databricks Name>>"
    dbr_pat_token_raw = "<<your Databricks Personal Access Token>>"

    DBR_PAT_TOKEN = bytes(dbr_pat_token_raw, encoding='utf-8')  # adding b'
    databricks_grp = resource_grp
    dataset = "AdultCensusIncome.csv"
    notebook = "3_IncomeNotebookDevops.py"
    experiment_name = "experiment_model_release"
    db_compute_name = "dbr-amls-comp"

    # Print AML Version
    print("Azure ML SDK Version: ", azureml.core.VERSION)

    # Point file to conf directory containing details for the aml service

    cli_auth = AzureCliAuthentication()
    ws = Workspace(workspace_name=workspace,
                   subscription_id=subscription_id,
                   resource_group=resource_grp,
                   auth=cli_auth)

    print(ws.name,
          ws._workspace_name,
          ws.resource_group,
          ws.location,
          sep='\t')

    # Create a new experiment
    print("Starting to create new experiment")
    Experiment(workspace=ws, name=experiment_name)

    # Upload notebook to Databricks

    print("Upload notebook to databricks")
    upload_notebook(domain, DBR_PAT_TOKEN, notebook)

    print("Add databricks env to Azure ML Service Compute")
    # Create databricks workspace in AML SDK
    try:
        databricks_compute = DatabricksCompute(workspace=ws,
                                               name=db_compute_name)
        print('Compute target {} already exists'.format(db_compute_name))
    except ComputeTargetException:
        print('Compute not found, will use below parameters to attach new one')
        config = DatabricksCompute.attach_configuration(
            resource_group=databricks_grp,
            workspace_name=databricks_name,
            access_token=dbr_pat_token_raw)
        databricks_compute = ComputeTarget.attach(ws, db_compute_name, config)
        databricks_compute.wait_for_completion(True)
示例#9
0
def get_workspace():
    env = EnvironmentVariables()
    cli_auth = AzureCliAuthentication()
    workspace = Workspace(workspace_name=env.workspace_name,
                          subscription_id=env.subscription_id,
                          resource_group=env.resource_group,
                          auth=cli_auth)
    return workspace
示例#10
0
def get_auth():
    '''
        Retreive the user authentication. If they aren't logged in this will
        prompt the standard interactive login method. 

        PARAMS: None

        RETURNS: Authentication object
    '''
    auth = None

    print("Get auth...")
    try:
        auth = AzureCliAuthentication()
        auth.get_authentication_header()
    except AuthenticationException:
        auth = InteractiveLoginAuthentication()

    return auth
示例#11
0
def register_model(model_name, run_id):
    """Register the model to the AML Workspace"""
    cli_auth = AzureCliAuthentication()

    experiment = Experiment.from_directory(".", auth=cli_auth)
    run = Run(experiment, run_id)

    run.register_model(model_name,
                       model_path='outputs/final_model.hdf5',
                       model_framework='TfKeras',
                       model_framework_version='1.13')
示例#12
0
    def setUpClass(cls):
        subscription_id = os.getenv("SUBSCRIPTION_ID")
        resource_group = os.getenv("RESOURCE_GROUP")
        workspace_name = os.getenv("WORKSPACE_NAME")

        auth = AzureCliAuthentication()

        setAutomatedMLWorkspace(auth=auth,
                                subscription_id=subscription_id,
                                resource_group=resource_group,
                                workspace_name=workspace_name)
示例#13
0
def main(epochs, iterations, compute_target, concurrent_runs):
    cli_auth = AzureCliAuthentication()

    experiment = Experiment.from_directory(".", auth=cli_auth)
    ws = experiment.workspace

    cluster = ws.compute_targets[compute_target]
    food_data = ws.datastores['food_images']

    script_arguments = {"--data-dir": food_data.as_mount(), "--epochs": epochs}

    tf_est = TensorFlow(source_directory=".",
                        entry_script='code/train/train.py',
                        script_params=script_arguments,
                        compute_target=cluster,
                        conda_packages=['pillow', 'pandas'],
                        pip_packages=['click', 'seaborn'],
                        use_docker=True,
                        use_gpu=True,
                        framework_version='1.13')

    # Run on subset of food categories
    tf_est.run_config.arguments.extend(
        ['apple_pie', 'baby_back_ribs', 'baklava', 'beef_carpaccio'])

    param_sampler = RandomParameterSampling({
        '--minibatch-size':
        choice(16, 32, 64),
        '--learning-rate':
        loguniform(-9, -6),
        '--optimizer':
        choice('rmsprop', 'adagrad', 'adam')
    })

    # Create Early Termination Policy
    etpolicy = BanditPolicy(evaluation_interval=2, slack_factor=0.1)

    # Create HyperDrive Run Configuration
    hyper_drive_config = HyperDriveConfig(
        estimator=tf_est,
        hyperparameter_sampling=param_sampler,
        policy=etpolicy,
        primary_metric_name='acc',
        primary_metric_goal=PrimaryMetricGoal.MAXIMIZE,
        max_total_runs=iterations,
        max_concurrent_runs=concurrent_runs)

    # Submit the Hyperdrive Run
    print("Submitting Hyperdrive Run")
    hd_run = experiment.submit(hyper_drive_config)
    hd_run.wait_for_completion(raise_on_error=True, show_output=True)
    print("Finishing Run")
    best_run = hd_run.get_best_run_by_primary_metric()
    print(f'##vso[task.setvariable variable=run_id]{best_run.id}')
	def __init__(self, ws_config_file):
		self.ws = Workspace.from_config(
			auth=AzureCliAuthentication(),
			path=os.path.join(
				os.path.dirname(os.path.realpath(__file__)),
				ws_config_file
			)
		)
		print(
			f"Found workspace {self.ws.name} \n\tat location {self.ws.location}\n\t with the id:{self.ws._workspace_id}")
		self.experiments = {}
示例#15
0
    def __init__(self, ws_config_file: str, subscription_id: str,
                 aml_resource_group: str, aml_workspace_name: str):
        """Initializes an AML workspace object"""
        auth = AzureCliAuthentication()
        if ws_config_file != None:
            self.ws = Workspace.from_config(
                auth=auth,
                path=os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                  ws_config_file))
        else:
            self.ws = Workspace.get(aml_workspace_name, auth, subscription_id,
                                    aml_resource_group)

        _logger.info(f"Found workspace {self.ws.name}")
示例#16
0
def get_cli_specific_auth():
    """
    Returns the cli specific auth.
    For azure.cli, returns an object of azureml.core.authentication.AzureCliAuthentication
    For azureml._cli, returns an object of azureml.core.authentication.ArmTokenAuthentication
    :return:
    :rtype: azureml.core.authentication.AbstractAuthentication
    """
    if AZUREML_CLI_IN_USE:
        if AZUREML_ARM_ACCESS_TOKEN in os.environ:
            return ArmTokenAuthentication(os.environ[AZUREML_ARM_ACCESS_TOKEN])
        else:
            return InteractiveLoginAuthentication()

    # az cli case
    return AzureCliAuthentication()
示例#17
0
def trigger_data_prep():

    # Define Vars < Change the vars>. 
    # In a production situation, don't put secrets in source code, but as secret variables, 
    # see https://docs.microsoft.com/en-us/azure/devops/pipelines/process/variables?view=azure-devops&tabs=yaml%2Cbatch#secret-variables
#    workspace="<Name of your workspace>"
#    subscription_id="<Subscription id>"
#    resource_grp="<Name of your resource group where aml service is created>"

#    domain = "westeurope.azuredatabricks.net" # change location in case databricks instance is not in westeurope
#    DBR_PAT_TOKEN = bytes("<<your Databricks Personal Access Token>>", encoding='utf-8') # adding b'

# inserted new code for my run 

    workspace="aiml-ws1"
    subscription_id="05c034fe-a6e2-42b7-bdfe-519a3b3a40cf"
    resource_grp="vikram-aiml"

    domain = "eastus.azuredatabricks.net" # change location in case databricks instance is not in westeurope
    DBR_PAT_TOKEN = bytes("dapi2dd4008fef79f1f64392ca27b7a2888e", encoding='utf-8') # adding b'


    dataset = "AdultCensusIncome.csv"
    notebook = "3_IncomeNotebookDevops.py"
    experiment_name = "experiment_model_release"

    # Print AML Version
    print("Azure ML SDK Version: ", azureml.core.VERSION)

    # Point file to conf directory containing details for the aml service

    cli_auth = AzureCliAuthentication()
    ws = Workspace(workspace_name = workspace,
                   subscription_id = subscription_id,
                   resource_group = resource_grp,
                   auth=cli_auth)

    print(ws.name, ws._workspace_name, ws.resource_group, ws.location, sep = '\t')

    # Create a new experiment
    print("Starting to create new experiment")
    Experiment(workspace=ws, name=experiment_name)

    # Upload notebook to Databricks

    print("Upload notebook to databricks")
    upload_notebook(domain, DBR_PAT_TOKEN, notebook)
示例#18
0
def test_response(service_name, data_path, auth_enabled):
    cli_auth = AzureCliAuthentication()
    ws = Workspace.from_config(auth=cli_auth)

    with open(data_path, "r") as file_obj:
        data = json.load(file_obj)

    service = ws.webservices[service_name]
    headers = {}
    if auth_enabled:
        auth_key = service.get_keys()[0]
        headers = {"Authorization": "Bearer {0}".format(auth_key)}

    response = requests.post(service.scoring_uri, json=data, headers=headers)

    response_data = json.loads(response.content)
    
    assert response.status_code == 200
示例#19
0
 def workspace(*args, **kwargs):
     ws = None
     try:
         click.secho("[ML SERVICE] - Connection with workspace...",
                     fg="green")
         cli_auth = AzureCliAuthentication()
         ws = Workspace.from_config(path=os.path.join(
             ".azureml", "config.json"),
                                    auth=cli_auth)
         print(
             "\tWorkspace name: " + ws.name,
             "Azure region: " + ws.location,
             "Subscription id: " + ws.subscription_id,
             "Resource group: " + ws.resource_group,
             sep="\n\t",
         )
     except WorkspaceException:
         raise WorkspaceException("Failed connection with workspace")
     return f(*args, **kwargs, ws=ws)
示例#20
0
def get_auth(env_path):
    logger = logging.getLogger(__name__)
    if os.environ.get("AML_SP_PASSWORD", None):
        logger.debug("Trying to create Workspace with Service Principal")
        aml_sp_password = os.environ.get("AML_SP_PASSWORD")
        aml_sp_tennant_id = os.environ.get("AML_SP_TENNANT_ID")
        aml_sp_username = os.environ.get("AML_SP_USERNAME")
        auth = ServicePrincipalAuthentication(tenant_id=aml_sp_tennant_id,
                                              username=aml_sp_username,
                                              password=aml_sp_password)
    else:
        logger.debug("Trying to create Workspace with CLI Authentication")
        try:
            auth = AzureCliAuthentication()
            auth.get_authentication_header()
        except AuthenticationException:
            logger.debug("Trying to create Workspace with Interactive login")
            auth = InteractiveLoginAuthentication()
    return auth
示例#21
0
文件: workspace.py 项目: keshava/dlsi
def _get_auth():
    """Returns authentication to Azure Machine Learning workspace."""
    logger = logging.getLogger(__name__)
    if os.environ.get("AML_SP_PASSWORD", None):
        logger.debug("Trying to authenticate with Service Principal")
        aml_sp_password = os.environ.get("AML_SP_PASSWORD")
        aml_sp_tennant_id = os.environ.get("AML_SP_TENNANT_ID")
        aml_sp_username = os.environ.get("AML_SP_USERNAME")
        auth = ServicePrincipalAuthentication(aml_sp_tennant_id, aml_sp_username, aml_sp_password)
    else:
        logger.debug("Trying to authenticate with CLI Authentication")
        try:
            auth = AzureCliAuthentication()
            auth.get_authentication_header()
        except AuthenticationException:
            logger.debug("Trying to authenticate with Interactive login")
            auth = InteractiveLoginAuthentication()

    return auth
示例#22
0
def trigger_env_prep():

    # Define Vars < Change the vars>.
    # In a production situation, don't put secrets in source code, but as secret variables,
    # see https://docs.microsoft.com/en-us/azure/devops/pipelines/process/variables?view=azure-devops&tabs=yaml%2Cbatch#secret-variables
    workspace = sys.argv[1]
    subscription_id = sys.argv[2]
    resource_grp = sys.argv[3]

    domain = sys.argv[4]
    dbr_pat_token_raw = sys.argv[5]

    DBR_PAT_TOKEN = bytes(dbr_pat_token_raw, encoding='utf-8')  # adding b'
    dataset = "AdultCensusIncome.csv"
    notebook = "3_IncomeNotebookDevops.py"
    experiment_name = "experiment_model_release"

    # Print AML Version
    print("Azure ML SDK Version: ", azureml.core.VERSION)

    # Point file to conf directory containing details for the aml service

    cli_auth = AzureCliAuthentication()
    ws = Workspace(workspace_name=workspace,
                   subscription_id=subscription_id,
                   resource_group=resource_grp,
                   auth=cli_auth)

    print(ws.name,
          ws._workspace_name,
          ws.resource_group,
          ws.location,
          sep='\t')

    # Create a new experiment
    print("Starting to create new experiment")
    Experiment(workspace=ws, name=experiment_name)

    # Upload notebook to Databricks

    print("Upload notebook to databricks")
    upload_notebook(domain, DBR_PAT_TOKEN, notebook)
def get_auth():
    """Get an auth object for use with Workspace objects."""
    if os.environ.get("AML_SP_PASSWORD", None):
        print("Trying to create Workspace with Service Principal")
        aml_sp_password = os.environ.get("AML_SP_PASSWORD")
        aml_sp_tenant_id = os.environ.get("AML_SP_TENANT_ID")
        aml_sp_username = os.environ.get("AML_SP_USERNAME")
        auth = ServicePrincipalAuthentication(
            tenant_id=aml_sp_tenant_id,
            service_principal_id=aml_sp_username,
            service_principal_password=aml_sp_password)
    else:
        print("Trying to create Workspace with CLI Authentication")
        try:
            auth = AzureCliAuthentication()
            auth.get_authentication_header()
        except AuthenticationException:
            print("Trying to create Workspace with Interactive login")
            auth = InteractiveLoginAuthentication()
    return auth
    def open_azure_ml_workspace(self):
        logging.info(
            "connecting to workspace name:{0} subscription ID:{1}, resource group:{2}"
            .format(
                self.args.workspace_name,
                self.args.subscription_id,
                self.args.resource_group,
            ))

        if self.args.authentication == AUTH_CLI:
            logging.info("using Azure CLI authentication")
            auth = AzureCliAuthentication()
        elif self.args.authentication == AUTH_MI:
            logging.info("using managed identity authentication")
            auth = MsiAuthentication()
        elif self.args.authentication == AUTH_SP:
            logging.info(
                "using service principal authentication tenant ID:{0} service principal ID:{1}"
                .format(
                    self.args.tenant_id,
                    self.args.service_principal_id,
                ))
            auth = ServicePrincipalAuthentication(
                tenant_id=self.args.tenant_id,
                service_principal_id=self.args.service_principal_id,
                service_principal_password=self.args.
                service_principal_password,
            )
        else:
            ex = Exception('Invalid authentication type.')
            logging.error(ex)
            raise ex

        workspace = Workspace(
            subscription_id=self.args.subscription_id,
            resource_group=self.args.resource_group,
            workspace_name=self.args.workspace_name,
            auth=auth,
        )

        return workspace
def get_auth(env_path):
    logger = logging.getLogger(__name__)
    if get_key(env_path, 'password') != "YOUR_SERVICE_PRINCIPAL_PASSWORD":
        logger.debug("Trying to create Workspace with Service Principal")
        aml_sp_password = get_key(env_path, 'password')
        aml_sp_tennant_id = get_key(env_path, 'tenant_id')
        aml_sp_username = get_key(env_path, 'username')
        auth = ServicePrincipalAuthentication(
            tenant_id=aml_sp_tennant_id,
            username=aml_sp_username,
            password=aml_sp_password
        )
    else:
        logger.debug("Trying to create Workspace with CLI Authentication")
        try:
            auth = AzureCliAuthentication()
            auth.get_authentication_header()
        except AuthenticationException:
            logger.debug("Trying to create Workspace with Interactive login")
            auth = InteractiveLoginAuthentication()

    return auth
def trigger_training_job():

    # Define Vars < Change the vars>.
    # In a production situation, don't put secrets in source code, but as secret variables,
    # see https://docs.microsoft.com/en-us/azure/devops/pipelines/process/variables?view=azure-devops&tabs=yaml%2Cbatch#secret-variables
    workspace = "<Name of your workspace>"
    subscription_id = "<Subscription id>"
    resource_grp = "<Name of your resource group where aml service is created>"

    domain = "westeurope.azuredatabricks.net"  # change location in case databricks instance is not in westeurope
    dbr_pat_token_raw = "<<your Databricks Personal Access Token>>"

    DBR_PAT_TOKEN = bytes(dbr_pat_token_raw, encoding='utf-8')  # adding b'
    notebookRemote = "/3_IncomeNotebookDevops"
    experiment_name = "experiment_model_release"
    model_name_run = datetime.datetime.now().strftime(
        "%Y%m%d%H%M%S"
    ) + "_dbrmod.mml"  # in case you want to change the name, keep the .mml extension
    model_name = "databricksmodel.mml"  # in case you want to change the name, keep the .mml extension
    db_compute_name = "dbr-amls-comp"

    #
    # Step 1: Run notebook using Databricks Compute in AML SDK
    #
    cli_auth = AzureCliAuthentication()

    ws = Workspace(workspace_name=workspace,
                   subscription_id=subscription_id,
                   resource_group=resource_grp,
                   auth=cli_auth)
    ws.get_details()

    #
    # Step 2: Create job and attach it to cluster
    #
    # In this steps, secret are added as parameters (spn_tenant, spn_clientid, spn_clientsecret)
    # Never do this in a production situation, but use secret scope backed by key vault instead
    # See https://docs.azuredatabricks.net/user-guide/secrets/secret-scopes.html#azure-key-vault-backed-scopes
    response = requests.post(
        'https://%s/api/2.0/jobs/create' % domain,
        headers={'Authorization': b"Bearer " + DBR_PAT_TOKEN},
        json={
            "name": "Run AzureDevopsNotebook Job",
            "new_cluster": {
                "spark_version": "4.0.x-scala2.11",
                "node_type_id": "Standard_D3_v2",
                "spark_env_vars": {
                    'PYSPARK_PYTHON': '/databricks/python3/bin/python3',
                },
                "autoscale": {
                    "min_workers": 1,
                    "max_workers": 2
                }
            },
            "libraries": [{
                "pypi": {
                    "package": "azureml-sdk[databricks]"
                }
            }],
            "notebook_task": {
                "notebook_path":
                notebookRemote,
                "base_parameters": [{
                    "key": "subscription_id",
                    "value": subscription_id
                }, {
                    "key": "resource_group",
                    "value": resource_grp
                }, {
                    "key": "workspace_name",
                    "value": workspace
                }, {
                    "key": "model_name",
                    "value": model_name_run
                }]
            }
        })

    if response.status_code != 200:
        print("Error launching cluster: %s: %s" %
              (response.json()["error_code"], response.json()["message"]))
        exit(2)

    #
    # Step 3: Start job
    #
    databricks_job_id = response.json()['job_id']

    response = requests.post(
        'https://%s/api/2.0/jobs/run-now' % domain,
        headers={'Authorization': b"Bearer " + DBR_PAT_TOKEN},
        json={"job_id": +databricks_job_id})

    if response.status_code != 200:
        print("Error launching cluster: %s: %s" %
              (response.json()["error_code"], response.json()["message"]))
        exit(3)

    print(response.json()['run_id'])

    #
    # Step 4: Wait until job is finished
    #
    databricks_run_id = response.json()['run_id']
    scriptRun = 1
    count = 0
    while scriptRun == 1:
        response = requests.get(
            'https://%s/api/2.0/jobs/runs/get?run_id=%s' %
            (domain, databricks_run_id),
            headers={'Authorization': b"Bearer " + DBR_PAT_TOKEN},
        )

        state = response.json()['state']
        life_cycle_state = state['life_cycle_state']
        print(state)

        if life_cycle_state in ["TERMINATED", "SKIPPED", "INTERNAL_ERROR"]:
            result_state = state['result_state']
            if result_state == "SUCCESS":
                print("run ok")
                scriptRun = 0
            #exit(0)
            else:
                exit(4)
        elif count > 180:
            print("time out occurred after 30 minutes")
            exit(5)
        else:
            count += 1
            time.sleep(30)  # wait 30 seconds before next status update

    #
    # Step 5: Retrieve model from dbfs
    #
    mdl, ext = model_name_run.split(".")
    model_zip_run = mdl + ".zip"

    response = requests.get(
        'https://%s/api/2.0/dbfs/read?path=/%s' % (domain, model_zip_run),
        headers={'Authorization': b"Bearer " + DBR_PAT_TOKEN})
    if response.status_code != 200:
        print("Error copying dbfs results: %s: %s" %
              (response.json()["error_code"], response.json()["message"]))
        exit(1)

    model_output = base64.b64decode(response.json()['data'])

    # download model in deploy folder
    os.chdir("deploy")
    with open(model_zip_run, "wb") as outfile:
        outfile.write(model_output)
    print("Downloaded model {} to Project root directory".format(model_name))

    #
    # Step 6: Retrieve model metrics from dbfs
    #
    mdl, ext = model_name_run.split(".")
    model_metrics_json_run = mdl + "_metrics.json"

    response = requests.get(
        'https://%s/api/2.0/dbfs/read?path=/%s' %
        (domain, model_metrics_json_run),
        headers={'Authorization': b"Bearer " + DBR_PAT_TOKEN})
    if response.status_code != 200:
        print("Error copying dbfs results: %s: %s" %
              (response.json()["error_code"], response.json()["message"]))
        exit(2)

    model_metrics_output = json.loads(base64.b64decode(
        response.json()['data']))

    #
    # Step 7: Put model and metrics to Azure ML Service
    #

    # start a training run by defining an experiment
    myexperiment = Experiment(ws, experiment_name)
    run = myexperiment.start_logging()
    run.upload_file("outputs/" + model_zip_run, model_zip_run)

    #run.log("pipeline_run", pipeline_run.id)
    run.log("au_roc", model_metrics_output["Area_Under_ROC"])
    run.log("au_prc", model_metrics_output["Area_Under_PR"])
    run.log("truePostive", model_metrics_output["True_Positives"])
    run.log("falsePostive", model_metrics_output["False_Positives"])
    run.log("trueNegative", model_metrics_output["True_Negatives"])
    run.log("falseNegative", model_metrics_output["False_Negatives"])

    run.complete()
    run_id = run.id
    print("run id:", run_id)

    # unzip file to model_name_run
    shutil.unpack_archive(model_zip_run, model_name_run)

    model = Model.register(
        model_path=model_name_run,  # this points to a local file
        model_name=model_name,  # this is the name the model is registered as
        tags={
            "area": "spar",
            "type": "regression",
            "run_id": run_id
        },
        description="Medium blog test model",
        workspace=ws,
    )
    print("Model registered: {} \nModel Description: {} \nModel Version: {}".
          format(model.name, model.description, model.version))

    # Step 8. Finally, writing the registered model details to conf/model.json
    model_json = {}
    model_json["model_name"] = model.name
    model_json["model_version"] = model.version
    model_json["run_id"] = run_id
    model_json["model_name_run"] = model_name_run
    with open("../conf/model.json", "w") as outfile:
        json.dump(model_json, outfile)
示例#27
0
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_error
from sklearn.externals import joblib
import math

from azureml.core import Workspace
from azureml.core.authentication import AzureCliAuthentication
from azureml.core import Experiment
from azureml.opendatasets import Diabetes
from azureml.core import Run

#Logon to Azure

cli_auth = AzureCliAuthentication()

ws = Workspace(subscription_id="affeac10-5929-43aa-9abd-4ada85acb943",
               resource_group="mlexp",
               workspace_name="ml_workspace",
               auth=cli_auth)

# Create the workspace using the specified parameters
# ws = Workspace.create(name = "ml_workspace",
#                      subscription_id = "affeac10-5929-43aa-9abd-4ada85acb943",
#                      resource_group = 'mlexp',
#                      location = "eastus2",
#                      create_resource_group = False,
#                      sku = 'basic',
#                      exist_ok = True)
示例#28
0
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
from azureml.core import Workspace
from azureml.core.authentication import AzureCliAuthentication

# load Azure ML workspace
workspace = Workspace.from_config(auth=AzureCliAuthentication())

# Create compute target if not present
# Choose a name for your GPU cluster
gpu_cluster_name = "fullcomputegpu"

# Verify that cluster does not exist already
try:
    gpu_cluster = ComputeTarget(workspace=workspace, name=gpu_cluster_name)
    print('Found existing cluster, use it.')
except ComputeTargetException:
    compute_config = AmlCompute.provisioning_configuration(
        vm_size='Standard_NC12', max_nodes=8)
    gpu_cluster = ComputeTarget.create(workspace, gpu_cluster_name,
                                       compute_config)

gpu_cluster.wait_for_completion(show_output=True)
示例#29
0
"""
Helper to generate an AAD token
"""
from azureml.core.authentication import AzureCliAuthentication

print(
    AzureCliAuthentication().get_authentication_header().get("Authorization"))
示例#30
0
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT

import sys
from typing import Tuple

import click
from azureml.core import (ComputeTarget, Dataset, Environment,
                          RunConfiguration, Workspace)
from azureml.core.authentication import AzureCliAuthentication
from azureml.core.experiment import Experiment
from azureml.pipeline.core import (Pipeline, PipelineData, PipelineParameter,
                                   PublishedPipeline)
from azureml.pipeline.steps import DatabricksStep, PythonScriptStep

CLI_AUTH = AzureCliAuthentication()
# noinspection PyTypeChecker
WS = Workspace.from_config(auth=CLI_AUTH)
RC = RunConfiguration()
RC.environment = Environment.get(WS, "lightgbm")


# noinspection PyTypeChecker
def create_databricks_step(
        input_dataset: Dataset, compute: ComputeTarget,
        debug_run: bool) -> Tuple[DatabricksStep, PipelineData]:
    output_data = PipelineData(name="ParquetFiles",
                               datastore=WS.get_default_datastore(),
                               is_directory=True)

    node_size = 'Standard_DS4_v2'