Esempio n. 1
0
def Prediction():
    subscription_id = request.json['subscription_id']
    resource_group = request.json['resource_group']
    workspace_name = request.json['workspace_name']
    location = request.json['location']
    file_name = request.json['file_name']
    target_var = request.json['target_var']
    best_model = request.json['best_model']
    Model_path = request.json['Model_path']

    ws = Workspace(subscription_id=subscription_id,
                   resource_group=resource_group,
                   workspace_name=workspace_name)

    print("Found workspace {} at location {}".format(ws.name, ws.location))
    print('Found existing Workspace.')

    dataset_name = file_name
    # Get a dataset by name
    df = Dataset.get_by_name(workspace=ws, name=dataset_name)
    stock_dataset_df = df.to_pandas_dataframe()
    print('file successfully recieved.')
    X = df.drop_columns(columns=[target_var])
    y = df.keep_columns(columns=[target_var], validate=True)
    y_df = stock_dataset_df[target_var].values
    x_df = stock_dataset_df.drop([target_var], axis=1)
    print(y)

    #from azureml.core import Run
    #experiment=Experiment(ws, workspace_name)
    #from azureml.core.model import Model
    #model = Model(ws, name=Model_path)
    #model.download(exist_ok=True)
    from sklearn.externals import joblib
    cwd = 'D:\DCSAIAUTOML\BestModels\Azure'
    model_path = os.path.join(cwd, Model_path, best_model, "outputs")
    #model_path1 = os.path.join(model_path, "outputs", "model.pkl")
    print(model_path)
    os.chdir(model_path)
    model = joblib.load('model.pkl')
    #best_run = Run(experiment=experiment, run_id='AutoML_74e9d9dc-f347-4392-b8bb-3edeb4a6afad_8')
    #fitted_model = Run(experiment=experiment, run_id='AutoML_74e9d9dc-f347-4392-b8bb-3edeb4a6afad_8')
    print(model)
    try:
        y_predict = model.predict(x_df)
        print(y_predict)
        #prediction_toJson = y_predict.to_json(orient='columns')
        #print(prediction_toJson)
        df = pd.DataFrame(y_predict)
        df.rename(columns={0: "Prediction"}, inplace=True)
        #stock_df = stock_dataset_df[['SepalLengthCm','SepalWidthCm','Species']]
        result = pd.concat([stock_dataset_df, df], axis=1)
        result.to_csv(
            'D:\\PredictionResult\\Azure\\prediction_azure_health.csv',
            index=False,
            date_format='%Y%m%d')
        result.head()
        prediction_toJson = result.to_json(orient='records')
        return prediction_toJson

    except Exception as e:
        error_statement = str(e)
        print("Error statement: ", error_statement)
        return error_statement
import json
import pickle
import numpy as np
import pandas as pd
from azureml.core.workspace import Workspace
import azureml.train.automl
from sklearn.externals import joblib
from azureml.core.model import Model

ws = Workspace.from_config('./config.json')

from azureml.core.webservice import Webservice, AciWebservice, AksWebservice
service = AciWebservice(ws, "sentiment-scorer-korean")
# service = AksWebservice(ws, "sentiment-scorer-korean-aks")

# input_sample = pd.DataFrame({'id': pd.Series(['6471903'], dtype='int64'), 'document': pd.Series(['진짜 별로다 헐 ㅡ'], dtype='object')})
from load_dataset import testdata as input_sample

import json
test = json.dumps({"data": input_sample.values.tolist()})
result = service.run(input_data=bytes(test, encoding="utf8"))

input_sample['predicted'] = list(json.loads(result).values())[0]
print(input_sample)
from azureml.core.model import Model as azModel
import os
import shutil
import argparse

try:
    sub_id = os.environ["SUBSCRIPTION_ID"]
    rg = os.environ["RESOURCE_GROUP"]
    ml_ws_name = os.environ["ML_WS_NAME"]
except Exception:
    parser = argparse.ArgumentParser()
    parser.add_argument("--sub_id")
    parser.add_argument("--rg")
    parser.add_argument("--ml_ws_name")
    args = parser.parse_args()
    sub_id = args.sub_id
    rg = args.rg
    ml_ws_name = args.ml_ws_name

az_ws = Workspace(sub_id, rg, ml_ws_name)

prefix = "./src/ai_acc_quality/ml/ml_assets/"

if not os.path.exists(prefix):
    os.makedirs(prefix)
else:
    shutil.rmtree(prefix)
    os.makedirs(prefix, exist_ok=True)

azml_model = azModel(az_ws, name="anomaly_enc_dec")
azml_model.download(target_dir=prefix)
Esempio n. 4
0
from azureml.core.workspace import Workspace
from azureml.core.compute import ComputeTarget, AksCompute
from azureml.exceptions import ComputeTargetException
from azureml.core.webservice import AksWebservice
from azureml.core.model import InferenceConfig, Model
from azureml.core.environment import Environment, DEFAULT_GPU_IMAGE

# Initialize a workspace
ws = Workspace.from_config(
    "C:/Users/Danilo.Bento/Icon Dropbox/DEVDATA/RO/DEVELOPMENT/SIB2/dev/.azureml/config.json"
)
print('Workspace name: ' + ws.name,
      'Azure region: ' + ws.location,
      'Subscription id: ' + ws.subscription_id,
      'Resource group: ' + ws.resource_group,
      'Workspace connected',
      sep='\n')

# Choose a name for your cluster
aks_name = "SIB2-AKS-GPU"

# Check to see if the cluster already exists and create it if non existant
try:
    aks_target = ComputeTarget(workspace=ws, name=aks_name)
    print('Found existing compute target')
except ComputeTargetException:
    print('Creating a new compute target...')
    # Provision AKS cluster with GPU machine
    prov_config = AksCompute.provisioning_configuration(vm_size="Standard_NC6")

    # Create the cluster
Esempio n. 5
0
cli_auth = AzureCliAuthentication()

webservice_name = 'turbofan-rul'

################ Workspace ##############################
with open("./../config/aml_config.json") as f:
    config = json.load(f)

workspace_name = config["workspace_name"]
resource_group = config["resource_group"]
subscription_id = config["subscription_id"]
workspace_region = config["location"]

#Interactive Authentication
ws = Workspace(workspace_name=workspace_name,
               subscription_id=subscription_id,
               resource_group=resource_group,
               auth=cli_auth)

run = [x for x in ws.experiments['gbr-turbofan'].get_runs()][0]
run_metrics = run.get_metrics()

print(run)

model = run.register_model(model_name="turbofan-rul",
                           model_path="outputs/model.pkl",
                           tags={
                               "mae": run_metrics["mae"],
                               "python version": sys.version[0:6]
                           })

deployment_config = AciWebservice.deploy_configuration(cpu_cores=1,
def RunAutoML():
    subscription_id = request.json['subscription_id']
    resource_group = request.json['resource_group']
    workspace_name = request.json['workspace_name']
    file_name = request.json['file_name']
    location = request.json['location']
    target_var = request.json['target_var']

    ws = Workspace(subscription_id=subscription_id,
                   resource_group=resource_group,
                   workspace_name=workspace_name)

    print("Found workspace {} at location {}".format(ws.name, ws.location))
    print('Found existing Workspace.')

    dataset_name = file_name

    # Get a dataset by name
    df = Dataset.get_by_name(workspace=ws, name=dataset_name)
    stock_dataset_df = df.to_pandas_dataframe()
    print('file successfully recieved.')
    stock_dataset_df.head()
    #stock_dataset_json = stock_dataset_df.to_json(orient='split')
    #print(stock_dataset_json)
    y_df = stock_dataset_df[target_var].values
    x_df = stock_dataset_df.drop([target_var], axis=1)
    print(y_df)
    ExperimentName = request.json['ExperimentName']
    tasks = request.json['tasks']
    iterations = request.json['iterations']
    n_cross_validations = request.json['n_cross_validations']
    iteration_timeout_minutes = request.json['iteration_timeout_minutes']
    primary_metric = request.json['primary_metric']
    max_concurrent_iterations = request.json['max_concurrent_iterations']
    best_model = request.json['best_model']

    #n_cross_validations = request.json['n_cross_validations']

    try:
        automl_settings = {
            "name": ExperimentName,
            "iteration_timeout_minutes": iteration_timeout_minutes,
            "iterations": iterations,
            "n_cross_validations": n_cross_validations,
            "primary_metric": primary_metric,
            "preprocess": True,
            "max_concurrent_iterations": max_concurrent_iterations,
            "verbosity": logging.INFO
        }

        automl_config = AutoMLConfig(
            task=tasks,
            debug_log='automl_errors.log',
            path=
            'D:\\Stock_Prediction\\AutoML_Azure\\python\\Flask_API_Azure\\log',
            #compute_target = 'Automlvm',
            X=x_df,
            y=y_df,
            **automl_settings,
        )

        experiment = Experiment(ws, ExperimentName)
        remote_run = experiment.submit(automl_config, show_output=True)
        best_run, fitted_model = remote_run.get_output()
        #print(best_run)
        print(best_run.get_file_names())
        #Register the model
        from datetime import date
        model = best_run.register_model(model_name=best_model +
                                        str(date.today()),
                                        model_path='outputs/model.pkl')
        print(model.name, model.id, model.version, sep='\t')
        children = list(remote_run.get_children())
        metricslist = {}
        for run in children:
            properties = run.get_properties()
            metrics = {
                k: v
                for k, v in run.get_metrics().items() if isinstance(v, float)
            }
            metricslist[int(properties['iteration'])] = metrics

        rundata = pd.DataFrame(metricslist).sort_index(1)
        rundata.rename(column={
            0: "one",
            1: "two",
            2: "three",
            3: "four",
            4: "five",
            5: "six",
            6: "seven",
            7: "right",
            8: "nine",
            9: "ten",
        },
                       inplace=True)
        rundata_toJson = rundata.to_json(orient='columns')
        print(rundata_toJson)
        return rundata_toJson
    except:

        return 'error'
Esempio n. 7
0
import numpy as np
from sklearn.metrics import mean_absolute_error
from azureml.train.automl.automlexplainer import retrieve_model_explanation
from azureml.core.model import Model
from azureml.core.image import ContainerImage
from azureml.core.image.image import Image
from azureml.core import Webservice
from azureml.core.webservice import AciWebservice

# try:
# setting the local env to hadnle missing packages
run_user_managed = RunConfiguration()
run_user_managed.environment.python.user_managed_dependencies = False

# Create workspace object for existing one and create an experiment
ws = Workspace.from_config('subscription.json')
print(ws.name, ws.location, ws.resource_group, ws.location, sep='\t')
experiment = Experiment(workspace=ws, name='experiment1')

# full path to training data,testing data
file_path1 = os.path.join(os.getcwd(), "cumodelwo2014.csv")
dflowtr = dprep.auto_read_file(path=file_path1)
file_path2 = os.path.join(os.getcwd(), "test2014.csv")
dflowte = dprep.auto_read_file(path=file_path2)

# Specifying x(causal) and y(response) attributes in training data
dflowtr_x = dflowtr.keep_columns([
    'cell-ID', 'Soil_Name', 'MEAN_Yld_V', 'COUNT_Yld', 'MEAN_Eleva',
    'RANGE_Elev', 'Crop-Type', 'V.A.T(F)', 'R.A.T(F)', 'M.A.T(F)',
    'V.PET(inch)', 'R.PET(inch)', 'M.PET(inch)', 'V.T.R(inch)', 'R.T.R(inch)',
    'M.T.R(inch)'
Esempio n. 8
0
import argparse
from pathlib import Path

from azureml.core.datastore import Datastore
from azureml.core.workspace import Workspace

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--subscription-id", type=str)
    parser.add_argument("--resource-group", type=str)
    parser.add_argument("--workspace-name", type=str)
    parser.add_argument("--datastore-name", type=str)
    parser.add_argument("--data-directory", type=str)
    parser.add_argument("--dataset-name", type=str)
    args = parser.parse_args()

    print(args.workspace_name)
    workspace = Workspace(
        subscription_id=args.subscription_id,
        resource_group=args.resource_group,
        workspace_name=args.workspace_name,
    )
    datastore = Datastore.get(workspace, args.datastore_name)
    local_path = Path(args.data_directory)
    for phase in ["train", "val"]:
        local_directory = str(local_path / phase)
        target_path = str(Path(args.dataset_name) / phase)
        datastore.upload(local_directory,
                         target_path=target_path,
                         show_progress=True)
Esempio n. 9
0
)
from azureml.pipeline.steps import PythonScriptStep, AutoMLStep
from azureml.train.automl import AutoMLConfig

print("This notebook was created using version 1.6.0 of the Azure ML SDK")
print(
    "You are currently using version",
    azureml.core.VERSION,
    "of the Azure ML SDK",
)

ws = Workspace(
    subscription_id="45b59352-da58-4e3a-beab-1a4518951e4e",
    resource_group="kk6gpv-rg",
    workspace_name="kk6gpv-aml",
    auth=ServicePrincipalAuthentication(
        tenant_id=os.environ["tenant_id"],
        service_principal_id=os.environ["sp_id"],
        service_principal_password=os.environ["sp_password"],
    ),
)
dstor = ws.get_default_datastore()

# cancel all pipeline schedules
print("Scheduled pipelines before:")
scheds = Schedule.list(ws)
print(scheds)
for sched in scheds:
    sched.disable()
    print(sched.id)

print("Scheduled pipelines after:")
Esempio n. 10
0
def RunAutoMLForecast():
    subscription_id = request.json['subscription_id']
    resource_group = request.json['resource_group']
    workspace_name = request.json['workspace_name']
    file_name = request.json['file_name']
    location = request.json['location']
    target_var = request.json['target_var']
    cluster_name = request.json['cluster_name']
    best_model = request.json['best_model']
    time_column_name = request.json['time_column_name']
    max_horizon = request.json['max_horizon']

    ws = Workspace(subscription_id=subscription_id,
                   resource_group=resource_group,
                   workspace_name=workspace_name)

    print("Found workspace {} at location {}".format(ws.name, ws.location))
    print('Found existing Workspace.')
    compute_target = AmlCompute(ws, cluster_name)
    print('Found existing AML compute context.')
    dataset_name = file_name
    time_column_name = time_column_name
    # Get a dataset by name
    dataset = Dataset.get_by_name(workspace=ws,
                                  name=dataset_name).with_timestamp_columns(
                                      fine_grain_timestamp=time_column_name)
    print(dataset)
    #df_ts = Dataset.Tabular.from_delimited_files(df_ts)
    dataset.to_pandas_dataframe().describe()
    dataset.take(3).to_pandas_dataframe()
    print(dataset)
    #y_df = df_ts[target_var].values
    #x_df = df_ts.drop([target_var], axis=1)
    print('file successfully recieved.')
    #stock_dataset_df.head()
    # create a new RunConfig object
    conda_run_config = RunConfiguration(framework="python")
    conda_run_config.environment.docker.enabled = True
    conda_run_config.environment.docker.base_image = azureml.core.runconfig.DEFAULT_CPU_IMAGE
    cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]'],
                                  conda_packages=['numpy', 'py-xgboost<=0.80'])
    conda_run_config.environment.python.conda_dependencies = cd
    print('run config is ready')
    ExperimentName = request.json['ExperimentName']
    tasks = request.json['tasks']
    iterations = request.json['iterations']
    n_cross_validations = request.json['n_cross_validations']
    iteration_timeout_minutes = request.json['iteration_timeout_minutes']
    primary_metric = request.json['primary_metric']
    #max_concurrent_iterations = request.json['max_concurrent_iterations']

    automl_settings = {
        'time_column_name': time_column_name,
        'max_horizon': max_horizon,
        "iterations": iterations,
    }

    automl_config = AutoMLConfig(
        task=tasks,
        primary_metric=primary_metric,
        #blacklist_models = ['ExtremeRandomTrees', 'AutoArima', 'Prophet'],
        experiment_timeout_minutes=iteration_timeout_minutes,
        training_data=dataset,
        label_column_name=target_var,
        compute_target=compute_target,
        enable_early_stopping=True,
        n_cross_validations=n_cross_validations,
        #verbosity=logging.INFO,
        **automl_settings)
    print("AutoML config created.")
    experiment = Experiment(ws, ExperimentName)
    remote_run = experiment.submit(automl_config, show_output=True)
    children = list(remote_run.get_children())
    metricslist = {}
    for run in children:
        properties = run.get_properties()
        metrics = {
            k: v
            for k, v in run.get_metrics().items() if isinstance(v, float)
        }
        metricslist[int(properties['iteration'])] = metrics

    rundata = pd.DataFrame(metricslist).sort_index(axis=1, by=primary_metric)
    rundata.rename(columns={
        0: "one",
        1: "two",
        2: "three",
        3: "four",
        4: "five",
        5: "six",
        6: "seven",
        7: "eight",
        8: "nine",
        9: "ten",
    },
                   inplace=True)
    iterations_toJson = rundata.to_json(orient='columns')
    print(iterations_toJson)
    best_run, fitted_model = remote_run.get_output()
    #best_run_toJson = best_run.get_metrics()
    #dict = {}
    #dict['iterations_toJson'] = iterations_toJson
    #dict['best_run_toJson'] = best_run_toJson
    #print(best_run.get_file_names())
    #Register the model
    #from datetime import date
    model = remote_run.register_model(model_name=best_model,
                                      description='AutoML Model')
    print(model.name, model.id, model.version, sep='\t')
    best_model = model.name
    best_model
    var1 = "@"
    var2 = var1 + best_model
    return '{} {}'.format(iterations_toJson, var2)
Esempio n. 11
0
#Write Files to local file storage
import os
#Also works: "/dbfs/tmp/models/worst_regression/dacrook/"
prefix = "file:/models/worst_regression/"
if not os.path.exists(prefix):
  os.makedirs(prefix)
with open(prefix + "x_scaler.pkl", "wb") as handle:
  pickle.dump(x_scaler, handle)
with open(prefix + "y_scaler.pkl", "wb") as handle:
  pickle.dump(y_scaler, handle)
with open(prefix + "model.pkl", "wb") as handle:
  pickle.dump(model, handle)
  
#Create an Azure ML Model out of it tagged as dev
from azureml.core.workspace import Workspace
from azureml.core.authentication import ServicePrincipalAuthentication
from azureml.core.model import Model
az_sp = ServicePrincipalAuthentication(secrets["sp_tenant_id"], secrets["sp_app_id"], secrets["sp_password"]) #tenant id
az_ws = Workspace(secrets["subscription_id"], secrets["resource_group"], secrets["ml_workspace_name"], auth= az_sp)
print("Logged in and workspace retreived.")
Model.register(az_ws, model_path = prefix, model_name = "worst_regression", tags={"state" : secrets["alg_state"], "created_by" : secrets["created_by"]})

# COMMAND ----------

#finally unmount the mount.
try:
  dbutils.fs.unmount("/mnt/datalake")
except Exception as e:
  print("already unmounted; no need to unmount again.")
Esempio n. 12
0
def DeployAzureACI():
    subscription_id = request.json['subscription_id']
    resource_group = request.json['resource_group']
    workspace_name = request.json['workspace_name']
    location = request.json['location']
    best_model = request.json['best_model']
    Model_path = request.json['Model_path']
    #cluster_name = request.json['cluster_name']
    service_name = request.json['service_name']
    Reg_model_name = request.json['Reg_model_name']

    ws = Workspace(subscription_id=subscription_id,
                   resource_group=resource_group,
                   workspace_name=workspace_name)

    print("Found workspace {} at location {}".format(ws.name, ws.location))
    print('Found existing Workspace.')

    from azureml.core.model import Model
    model = Model(ws, name=Reg_model_name)
    print(model)

    from azureml.core.model import InferenceConfig
    from azureml.core.webservice import AciWebservice
    from azureml.core.webservice import Webservice
    from azureml.core.model import Model
    from azureml.core.environment import Environment

    cwd = 'D:\\DCSAIAUTOML\\BestModels\\Azure'
    model_path = os.path.join(cwd, Model_path, best_model, "outputs")
    #model_path1 = os.path.join(model_path, "outputs", "model.pkl")
    print(model_path)
    os.chdir(model_path)
    #import importlib
    script_file_name = 'scoring_file_v_1_0_0.py'
    conda_env_file_name = 'conda_env_v_1_0_0.yml'
    #importlib.import_module('scoring_file_v_1_0_0.py')
    #script_file_name = joblib.load('scoring_file_v_1_0_0.py')
    #import yaml
    #conda_env_file_name = yaml.load(open('conda_env_v_1_0_0.yml'))
    #conda_env_file_name = joblib.load('conda_env_v_1_0_0.yml')

    myenv = Environment.from_conda_specification(name="myenv",
                                                 file_path=conda_env_file_name)
    inference_config = InferenceConfig(entry_script=script_file_name,
                                       environment=myenv)

    try:
        deployment_config = AciWebservice.deploy_configuration(cpu_cores=2,
                                                               memory_gb=8)
        service = Model.deploy(ws, service_name, [model], inference_config,
                               deployment_config)
        service.wait_for_deployment(show_output=True)
        print(service.state)
        compute_type = service.compute_type
        state = service.state
        url = service.scoring_uri
        s_url = service.swagger_uri
        #created_time = service.created_time
        #updated_time = service.updated_time
        v1 = "@"
        v2 = "Deployed Successfully"
        print(v2)
        return '{} {} {} {} {} {} {} {} {}'.format(v2, v1, compute_type, v1,
                                                   state, v1, url, v1, s_url)

    except Exception as e:
        error_statement = str(e)
        print("Error statement: ", error_statement)
        return error_statement
Esempio n. 13
0
def DeployAzureAKS():
    subscription_id = request.json['subscription_id']
    resource_group = request.json['resource_group']
    workspace_name = request.json['workspace_name']
    location = request.json['location']
    best_model = request.json['best_model']
    Model_path = request.json['Model_path']
    cluster_name = request.json['cluster_name']
    service_name = request.json['service_name']
    Reg_model_name = request.json['Reg_model_name']

    ws = Workspace(subscription_id=subscription_id,
                   resource_group=resource_group,
                   workspace_name=workspace_name)

    print("Found workspace {} at location {}".format(ws.name, ws.location))
    print('Found existing Workspace.')

    from azureml.core.model import Model
    model = Model(ws, name=Reg_model_name)
    print(model)

    from azureml.core.model import InferenceConfig
    from azureml.core.webservice import AciWebservice
    from azureml.core.webservice import Webservice
    from azureml.core.model import Model
    from azureml.core.environment import Environment

    from sklearn.externals import joblib
    cwd = 'D:\\DCSAIAUTOML\\BestModels\\Azure'
    model_path = os.path.join(cwd, Model_path, best_model, "outputs")
    #model_path1 = os.path.join(model_path, "outputs", "model.pkl")
    print(model_path)
    os.chdir(model_path)
    #import importlib
    script_file_name = 'scoring_file_v_1_0_0.py'
    conda_env_file_name = 'conda_env_v_1_0_0.yml'
    #importlib.import_module('scoring_file_v_1_0_0.py')
    #script_file_name = joblib.load('scoring_file_v_1_0_0.py')
    #import yaml
    #conda_env_file_name = yaml.load(open('conda_env_v_1_0_0.yml'))
    #conda_env_file_name = joblib.load('conda_env_v_1_0_0.yml')

    myenv = Environment.from_conda_specification(name="myenv",
                                                 file_path=conda_env_file_name)
    inference_config = InferenceConfig(entry_script=script_file_name,
                                       environment=myenv)

    aks_target = AksCompute(ws, cluster_name)
    # If deploying to a cluster configured for dev/test, ensure that it was created with enough
    # cores and memory to handle this deployment configuration. Note that memory is also used by
    # things such as dependencies and AML components.
    try:
        deployment_config = AksWebservice.deploy_configuration(
            cpu_cores=2,
            memory_gb=16,
            enable_app_insights=True,
            collect_model_data=True,
        )
        service = Model.deploy(ws, service_name, [model], inference_config,
                               deployment_config, aks_target)
        service.wait_for_deployment(show_output=True)
        print(service.state)
        compute_type = service.compute_type
        state = service.state
        url = service.scoring_uri
        s_url = service.swagger_uri
        #created_time = service.created_time
        #updated_time = service.updated_time
        v1 = "@"
        v2 = "Deployed Successfully"
        print(v2)
        return '{} {} {} {} {} {} {} {} {}'.format(v2, v1, compute_type, v1,
                                                   state, v1, url, v1, s_url)

    except Exception as e:
        error_statement = str(e)
        print("Error statement: ", error_statement)
        return error_statement
Esempio n. 14
0
def RunAutoMLReg():
    subscription_id = request.json['subscription_id']
    resource_group = request.json['resource_group']
    workspace_name = request.json['workspace_name']
    file_name = request.json['file_name']
    location = request.json['location']
    target_var = request.json['target_var']
    cluster_name = request.json['cluster_name']
    best_model = request.json['best_model']
    #best_model = request.json['best_model']

    ws = Workspace(subscription_id=subscription_id,
                   resource_group=resource_group,
                   workspace_name=workspace_name)

    print("Found workspace {} at location {}".format(ws.name, ws.location))
    print('Found existing Workspace.')
    #compute_target = AmlCompute(ws, cluster_name)
    compute_target = ws.compute_targets[cluster_name]
    print('Found existing AML compute context.')
    dataset_name = file_name

    # Get a dataset by name
    df = Dataset.get_by_name(workspace=ws, name=dataset_name)
    #stock_dataset_df = df.to_pandas_dataframe()
    print('file successfully recieved.')
    #stock_dataset_df.head()
    #stock_dataset_json = stock_dataset_df.to_json(orient='split')
    #print(stock_dataset_json)
    X = df.drop_columns(columns=[target_var])
    y = df.keep_columns(columns=[target_var], validate=True)
    #y_df = stock_dataset_df[target_var].values
    #x_df = stock_dataset_df.drop([target_var], axis=1)
    print(y)
    # create a new RunConfig object
    conda_run_config = RunConfiguration(framework="python")
    conda_run_config.environment.docker.enabled = True
    conda_run_config.environment.docker.base_image = azureml.core.runconfig.DEFAULT_CPU_IMAGE
    cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]'],
                                  conda_packages=['numpy', 'py-xgboost<=0.90'])
    conda_run_config.environment.python.conda_dependencies = cd
    print('run config is ready')
    ExperimentName = request.json['ExperimentName']
    tasks = request.json['tasks']
    iterations = request.json['iterations']
    n_cross_validations = request.json['n_cross_validations']
    iteration_timeout_minutes = request.json['iteration_timeout_minutes']
    primary_metric = request.json['primary_metric']
    max_concurrent_iterations = request.json['max_concurrent_iterations']

    try:
        automl_settings = {
            "name": ExperimentName,
            "iteration_timeout_minutes": iteration_timeout_minutes,
            "featurization": 'auto',
            "iterations": iterations,
            "n_cross_validations": n_cross_validations,
            "primary_metric": primary_metric,
            "preprocess": True,
            "max_concurrent_iterations": max_concurrent_iterations
            #"verbosity": logging.INFO
        }

        automl_config = AutoMLConfig(
            task=tasks,
            debug_log='automl_errors.log',
            blacklist_models=['XGBoost'],
            #path=os.getcwd(),
            compute_target=compute_target,
            #run_configuration=conda_run_config,
            X=X,
            y=y,
            **automl_settings,
        )

        experiment = Experiment(ws, ExperimentName)
        remote_run = experiment.submit(automl_config, show_output=True)
        remote_run.flush(timeout_seconds=400)
        children = list(remote_run.get_children())
        metricslist = {}
        for run in children:
            properties = run.get_properties()
            metrics = {
                k: v
                for k, v in run.get_metrics().items() if isinstance(v, float)
            }
            metricslist[int(properties['iteration'])] = metrics

        rundata = pd.DataFrame(metricslist).sort_index(axis=1,
                                                       by=primary_metric)
        rundata = rundata.drop([
            'mean_absolute_percentage_error',
            'normalized_median_absolute_error',
            'normalized_root_mean_squared_log_error',
            'root_mean_squared_log_error'
        ])
        rundata.rename(columns={
            0: "one",
            1: "two",
            2: "three",
            3: "four",
            4: "five",
            5: "six",
            6: "seven",
            7: "eight",
            8: "nine",
            9: "ten",
        },
                       inplace=True)
        iterations_toJson = rundata.to_json(orient='columns')
        print(iterations_toJson)
        best_run, fitted_model = remote_run.get_output()
        best_run_toJson = best_run.get_metrics()
        cwd = 'D:/DCSAIAUTOML/BestModels/Azure'
        best_model_name = best_run.name
        model = remote_run.register_model(description=best_model)
        print(model.name, model.id, model.version, sep='\t')
        model_path = os.path.join(cwd, best_model, best_model_name)
        print(model_path)
        #print("Model DownLoad Complete")
        #model = Model(workspace=ws, name=model.name)
        #model.download_files(target_dir=model_path)
        #dict = {}
        #dict['iterations_toJson'] = iterations_toJson
        #dict['best_run_toJson'] = best_run_toJson
        #print(best_run.get_file_names())
        #Register the model
        #from datetime import date

        best_model_id = best_run.name

        var1 = "@"
        var2 = var1 + best_model_id

        Reg_model_name = model.name
        var4 = var1 + Reg_model_name

        best_run.flush(timeout_seconds=3600)
        best_run.download_files(output_directory=model_path)
        # importing required modules
        #import shutil
        #output_path = os.path.join(model_path, best_model_id)
        #dir_name1 = "D:\\DCSAIAUTOML\\BestModels\\Azure\\my_azure_best"
        #dir_name1 = "D:\\DCSAIAUTOML\\BestModels\\Azure\\my_azure_best\\my_azure_best"
        #shutil.make_archive(model_path,'zip',model_path)

        #zipf = zipfile.ZipFile(best_model_id+'.zip', 'w', zipfile.ZIP_DEFLATED)
        #for root, dirs, files in os.walk(model_path):
        #for file in files:
        #zipf.write(os.path.join(root, file))

        #def zipdir(path, ziph):
        # ziph is zipfile handle
        #import os
        #for root, dirs, files in os.walk(path):
        #for file in files:
        #ziph.write(os.path.join(root, file))

        #zipdir(model_path, zipf)
        #remote_run.clean_preprocessor_cache()
        print("ready to return")
        var5 = "no exception"
        return '{} {} {} {} {}'.format(iterations_toJson, var2, var4, var1,
                                       var5)
        #return iterations_toJson
    except Exception as e:
        error_statement = str(e)
        print("Error statement: ", error_statement)
        model_path1 = os.path.join(model_path, 'outputs')
        file_name = 'model.pkl'
        print("in exception: ", model_path1)
        src = 'D:\\Final Script_dev'
        full_file_name = os.path.join(src, file_name)
        import shutil
        #remote_run.download_file('model.pkl', output_file_path=model_path1)
        if os.path.isfile(full_file_name):
            shutil.copy(full_file_name, model_path1)
        return '{} {} {} {} {}'.format(iterations_toJson, var2, var4, var1,
                                       error_statement)
################ Workspace ##############################
with open("./../config/aml_config.json") as f:
    config = json.load(f)

workspace_name = config["workspace_name"]
resource_group = config["resource_group"]
subscription_id = config["subscription_id"]
workspace_region = config["location"]

max_depth = randint(2, 10)
n_estimators = int(randrange(2000, 5000, 100))

#Interactive Authentication
ws = Workspace(workspace_name=workspace_name,
               subscription_id=subscription_id,
               resource_group=resource_group,
               auth=cli_auth)

ws.write_config()
############# Experiement gbr-turbofan ######################
experiment = Experiment(ws, 'gbr-turbofan')

train = pd.read_csv("data/turbofan.csv")

X = train.drop('rul', axis=1)
y = pd.Series(train.rul)

run = experiment.start_logging()
run.tag("python version", sys.version[0:6])

# Log the algorithm parameter alpha to the run
        # apply whatever remedies you deem appropriate
        pass
    return v


if __name__ == "__main__":
    # Retrieve data from Azure
    spa = ServicePrincipalAuthentication(
        tenant_id=tenant_id,
        service_principal_id=service_principal_id,
        service_principal_password=auth_secret)

    ws = Workspace(subscription_id,
                   resource_group,
                   workspace_name,
                   auth=spa,
                   _location="westeurope",
                   _disable_service_check=False,
                   _workspace_id=None,
                   sku='basic')

    datastore = ws.get_default_datastore()
    datastore.download("../Data/", prefix="CSV/")

    # Read downloaded data
    df = pd.read_csv("../Data/CSV/extraction.csv",
                     date_parser=lambda x: parse_date(x),
                     usecols=cols,
                     parse_dates=['DateOut', 'DateIn'],
                     encoding="UTF-16 LE",
                     sep=';',
                     quotechar='"',
Esempio n. 17
0
"""
Created on Fri Sep  6 16:06:57 2019

@author: datacore
"""

from azureml.core.workspace import Workspace
import azureml.core
import pandas as pd

from azureml.core.authentication import AzureCliAuthentication
import azure.cli.core

#cli_auth = AzureCliAuthentication()
ws = Workspace(subscription_id="b613a36c-3018-4806-9e67-2af6fc80b3b3",
               resource_group="PMGDemo",
               workspace_name="AutoML_demo")

print("Found workspace {} at location {}".format(ws.name, ws.location))

ws = Workspace.from_config()

# Choose a name for the experiment and specify the project folder.
experiment_name = 'Capstone_Project'
project_folder = 'D:\\Stock_Prediction\\AutoML_Azure\\Python_AutoML'

from azureml.core.experiment import Experiment
experiment = Experiment(ws, experiment_name)

output = {}
output['SDK version'] = azureml.core.VERSION
# COMMAND ----------

import os

project_folder = './sample_projects/automl-demo-predmain'

# COMMAND ----------

subscription_id = ""  #you should be owner or contributor
resource_group = ""  #you should be owner or contributor
workspace_name = ""  #your workspace name
workspace_region = ""  #your region

try:
    ws = Workspace.from_config()
    print("Using Config File")
except Exception:
    ws = Workspace(workspace_name=workspace_name,
                   subscription_id=subscription_id,
                   resource_group=resource_group)
    print("Unable to find config")

# COMMAND ----------

# MAGIC %md Create *automl-predictive-rul* experiment for Regressive AutoML using the Nasa Turbofan data

# COMMAND ----------

experiment_name = 'automl-predictive-rul'
experiment = Experiment(ws, experiment_name)
Esempio n. 19
0
def ml_workspace_create_resources(
        auth,
        client,
        resource_group_name,
        workspace_name,
        location,
        vnet_location,
        subscription_id,
        friendly_name=None,
        storage_account=None,
        key_vault=None,
        app_insights=None,
        containerRegistry=None,
        adbWorkspace=None,
        primary_user_assigned_identity=None,
        cmk_keyvault=None,
        resource_cmk_uri=None,
        hbi_workspace=False,
        default_cpu_compute_target=None,
        default_gpu_compute_target=None,
        private_endpoint_config=None,
        private_endpoint_auto_approval=None,
        exist_ok=False,
        show_output=True,
        sku='basic',
        tags=None,
        user_assigned_identity_for_cmk_encryption=None,
        system_datastores_auth_mode='accesskey',
        is_update_dependent_resources=False):
    """
    Create a new machine learning workspace along with dependent resources.
    :param auth:
    :type auth: azureml.core.authentication.AbstractAuthentication
    :param client:
    :param resource_group_name:
    :param workspace_name:
    :param location:
    :param subscription_id: Subscription id to use.
    :param adbWorkspace: Adb Workspace linked with AML Workspace
    :param sku: Sku name
    :param friendly_name:
    :return:
    """
    check_valid_resource_name(workspace_name, "Workspace")
    # First check if the workspace already exists
    try:
        existing_workspace = client.get(resource_group_name, workspace_name)
        if exist_ok and not is_update_dependent_resources:
            return existing_workspace
        elif not exist_ok:
            from azureml._base_sdk_common.common import get_http_exception_response_string
            raise WorkspaceException(
                "Workspace with name '{0}' already exists under"
                " resource group with name '{1}'.".format(
                    workspace_name, resource_group_name))
    except ErrorResponseWrapperException as response_exception:
        if response_exception.response.status_code != 404:
            from azureml._base_sdk_common.common import get_http_exception_response_string
            raise WorkspaceException(
                get_http_exception_response_string(
                    response_exception.response))

    # Workspace does not exist. go ahead and create
    if location is None:
        location = get_location_from_resource_group(auth, resource_group_name,
                                                    subscription_id)

    if not friendly_name:
        friendly_name = workspace_name

    inner_exception = None
    error_message = ""

    # Use this way for deployment until we get our template shipped in PROD to
    # support MSI
    '''Note: az core deploy template spawns a new daemon thread to track the status of a deployment.
    If the operation fails then az throws an exception in the daemon thread. Hence, we catch the exception here
    when trying to fetch the workspace. Workspace get will throw exception when the template deployment fail and
    we would need to roll back in such case'''

    orchestrator = WorkspaceArmDeploymentOrchestrator(
        auth,
        resource_group_name,
        location,
        subscription_id,
        workspace_name,
        storage=storage_account,
        keyVault=key_vault,
        containerRegistry=containerRegistry,
        adbWorkspace=adbWorkspace,
        primary_user_assigned_identity=primary_user_assigned_identity,
        appInsights=app_insights,
        cmkKeyVault=cmk_keyvault,
        resourceCmkUri=resource_cmk_uri,
        hbiWorkspace=hbi_workspace,
        sku=sku,
        tags=tags,
        user_assigned_identity_for_cmk_encryption=
        user_assigned_identity_for_cmk_encryption,
        systemDatastoresAuthMode=system_datastores_auth_mode)

    orchestrator.deploy_workspace(show_output=show_output)

    if orchestrator.error is not None:
        try:
            _delete_resources(auth,
                              subscription_id,
                              resource_group_name,
                              storage_account=storage_account,
                              app_insights=app_insights,
                              key_vault=key_vault,
                              storage_name=orchestrator.storage_name,
                              insights_name=orchestrator.insights_name,
                              vault_name=orchestrator.vault_name,
                              workspace_name=workspace_name,
                              delete_ws=False)
            error_message = orchestrator.error.message
        except Exception:
            pass
        raise WorkspaceException(
            "Unable to create the workspace. {}".format(error_message),
            inner_exception=orchestrator.error)

    if private_endpoint_config:
        try:
            create_workspace_private_endpoint(auth, resource_group_name,
                                              vnet_location, subscription_id,
                                              workspace_name,
                                              private_endpoint_config,
                                              private_endpoint_auto_approval,
                                              tags, show_output)
        except Exception:
            _delete_resources(auth,
                              subscription_id,
                              resource_group_name,
                              storage_account=storage_account,
                              app_insights=app_insights,
                              key_vault=key_vault,
                              storage_name=orchestrator.storage_name,
                              insights_name=orchestrator.insights_name,
                              vault_name=orchestrator.vault_name,
                              workspace_name=workspace_name,
                              delete_ws=True)
            raise

    if (default_cpu_compute_target or default_gpu_compute_target):
        workspace = Workspace(subscription_id,
                              resource_group_name,
                              workspace_name,
                              auth=auth,
                              _disable_service_check=True)
        try:
            _deploy_azuremlcompute_clusters(
                workspace,
                default_cpu_compute_target=default_cpu_compute_target,
                default_gpu_compute_target=default_gpu_compute_target,
                show_output=show_output)
        except Exception as ex:
            _delete_resources(auth,
                              subscription_id,
                              resource_group_name,
                              storage_account=storage_account,
                              app_insights=app_insights,
                              key_vault=key_vault,
                              storage_name=orchestrator.storage_name,
                              insights_name=orchestrator.insights_name,
                              vault_name=orchestrator.vault_name,
                              workspace_name=workspace_name,
                              delete_ws=True)
            inner_exception = ex
            error_message = ex.message
        if inner_exception is not None:
            raise WorkspaceException(
                "Unable to create the workspace. {}".format(error_message),
                inner_exception=inner_exception)

    # Should this also be wrapped in a try-catch and raise a WorkspaceException
    # when it fails with a different message
    created_workspace = client.get(resource_group_name, workspace_name)
    return created_workspace
Esempio n. 20
0
from azureml.train.automl import AutoMLConfig
from azureml.train.automl.run import AutoMLRun
from azureml.core import Workspace

# ## Create and connect to an Azure Machine Learning Workspace
# Run the following cell to create a new Azure Machine Learning **Workspace** and save the configuration to disk (next to the Jupyter notebook).
#
# **Important Note**: You will be prompted to login in the text that is output below the cell. Be sure to navigate to the URL displayed and enter the code that is provided. Once you have entered the code, return to this notebook and wait for the output to read `Workspace configuration succeeded`.

# In[ ]:

# By using the exist_ok param, if the worskpace already exists you get a reference to the existing workspace
# allowing you to re-run this cell multiple times as desired (which is fairly common in notebooks).
ws = Workspace.create(name=workspace_name,
                      subscription_id=subscription_id,
                      resource_group=resource_group,
                      location=workspace_region,
                      exist_ok=True)

ws.write_config()
print('Workspace configuration succeeded')

# ### Create AML Compute Cluster
# Now you are ready to create the GPU compute cluster. Run the following cell to create a new compute cluster (or retrieve the existing cluster if it already exists). The code below will create a *GPU based* cluster where each node in the cluster is of the size `Standard_NC12`, and the cluster is restricted to use 1 node. This will take couple of minutes to create.

# In[ ]:

### Create AML CPU based Compute Cluster
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
def RunAutoML():
    subscription_id = request.json['subscription_id']
    resource_group = request.json['resource_group']
    workspace_name = request.json['workspace_name']
    file_name = request.json['file_name']
    #location = request.json['location']

    ws = Workspace(subscription_id=subscription_id,
                   resource_group=resource_group,
                   workspace_name=workspace_name)

    print("Found workspace {} at location {}".format(ws.name, ws.location))
    print('Found existing Workspace.')

    dataset_name = file_name

    # Get a dataset by name
    df = Dataset.get_by_name(workspace=ws, name=dataset_name)
    stock_dataset_df = df.to_pandas_dataframe()
    print('file successfully recieved.')
    stock_dataset_df.head()
    #stock_dataset_json = stock_dataset_df.to_json(orient='split')
    #print(stock_dataset_json)
    y_df = stock_dataset_df['ActionTaken'].values
    x_df = stock_dataset_df.drop(['ActionTaken'], axis=1)
    print(y_df)
    ExperimentName = request.json['ExperimentName']
    tasks = request.json['tasks']
    iterations = request.json['iterations']
    n_cross_validations = request.json['n_cross_validations']
    iteration_timeout_minutes = request.json['iteration_timeout_minutes']
    primary_metric = request.json['primary_metric']
    max_concurrent_iterations = request.json['max_concurrent_iterations']

    #n_cross_validations = request.json['n_cross_validations']

    try:
        automl_settings = {
            "name": ExperimentName,
            "iteration_timeout_minutes": iteration_timeout_minutes,
            "iterations": iterations,
            "n_cross_validations": n_cross_validations,
            "primary_metric": primary_metric,
            "preprocess": True,
            "max_concurrent_iterations": max_concurrent_iterations,
            "verbosity": logging.INFO
        }

        automl_config = AutoMLConfig(
            task=tasks,
            debug_log='automl_errors.log',
            path=os.getcwd(),
            #compute_target = 'Automlvm',
            X=x_df,
            y=y_df,
            **automl_settings,
        )

        experiment = Experiment(ws, 'automl_local_v2')
        remote_run = experiment.submit(automl_config, show_output=True)
        children = list(remote_run.get_children())
        metricslist = {}
        for run in children:
            properties = run.get_properties()
            metrics = {
                k: v
                for k, v in run.get_metrics().items() if isinstance(v, float)
            }
            metricslist[int(properties['iteration'])] = metrics

        rundata = pd.DataFrame(metricslist).sort_index(1)
        rundata_toJson = rundata.to_json(orient='columns')

        return rundata_toJson
    except:

        return 'error'
Esempio n. 22
0
acr_details.address = os.environ.get('ACR_ADDRESS')
acr_details.username = os.environ.get('ACR_USERNAME')
acr_details.password = os.environ.get('ACR_PASSWORD')
acr_image = 'aml-r'

# R Script related information
r_script = 'hello.r'

#   1. Authenticate with Azure ML Service
auth = ServicePrincipalAuthentication(
    tenant_id=azure_tentant_id,
    service_principal_id=azure_app_id,
    service_principal_password=azure_app_secret)

aml_workspace = Workspace.get(name=aml_workspace_name,
                              auth=auth,
                              subscription_id=azure_subscription_id,
                              resource_group=azure_resource_group)

if (aml_workspace):
    print(f'Connected to AML Workspace {aml_workspace._workspace_name}')
else:
    print(f'ERROR: Not connected to AML Workspace {aml_workspace_name}')
    exit(-1)

bootstrap_args = [r_script]

estimator = Estimator(source_directory='src',
                      entry_script='bootstrapper.py',
                      compute_target=aml_compute_target,
                      custom_docker_image=acr_image,
                      image_registry_details=acr_details,
Esempio n. 23
0
    exp = get_experiment(ws, experiment_name)
    print(
        'Cancelling existing experiment with name: {}'.format(experiment_name))
    for run in tqdm(list(exp.get_runs())):
        run.cancel()


if __name__ == "__main__":
    print("SDK Version:", azureml.core.VERSION)
    set_diagnostics_collection(send_diagnostics=True)

    # Read in config
    conf = Config(config_filepath='~/aml_secrets/aml_secrets_rr2msrlabs.yaml')

    # Config region
    conf_aml = conf['aml_config']
    conf_cluster = conf['cluster_config']
    conf_docker = conf['azure_docker']
    conf_experiment = conf['experiment']
    # endregion

    # Initialize workspace
    # Make sure you have downloaded your workspace config
    ws = Workspace.from_config(path=conf_aml['aml_config_file'])
    print('Workspace name: ' + ws.name,
          'Azure region: ' + ws.location,
          'Subscription id: ' + ws.subscription_id,
          'Resource group: ' + ws.resource_group,
          sep='\n')

    launch_experiment(ws, conf_aml, conf_cluster, conf_docker, conf_experiment)
Esempio n. 24
0
def aml_pipeline_mocks(mocker: MockFixture):
    """ aml_pipeline
        Fixture - needs to be "imported" by test case via parameters

        Sets mocks for AML SDK related to AML Pipeline build scripts:
            - Workspace.Get
            - PythonScriptStep
            - Pipeline
            - Pipeline.publish
            - ComputeTarget (AmlCompute only)
            - Environment

        returns tuple of mock objects
        (workspace, amlcompute, mock_workspace_get, mock_pipeline_publish)
    """
    # Load Mocked environment variables
    e = Env()

    # Mock file read in Conda Dependencies
    mocker.patch('azureml.core.conda_dependencies.open',
                 mock_open(read_data='name: unit_test'))

    # Mock external dependencies in workspace retrieval
    mocker.patch('azureml.core.authentication.perform_interactive_login')
    mocker.patch(
        'azureml.core.authentication.InteractiveLoginAuthentication.'
        '_check_if_subscription_exists',
        return_value=True)

    # Mock workspace retrieval
    # Object of azureml.core.workspace.Workspace
    workspace = Workspace(subscription_id=e.subscription_id,
                          resource_group=e.resource_group,
                          workspace_name=e.workspace_name,
                          _disable_service_check=True)

    mock_workspace_get = mocker.patch('azureml.core.Workspace.get',
                                      return_value=workspace)

    # Object of azureml.core.compute.amlcompute.AmlCompute
    amlcd = {
        "name": e.compute_name,
        "location": "mocklocation",
        "tags": "",
        "properties": {
            "properties": {},
            "provisioningErrors": "",
            "provisioningState": "",
            "resourceId": "",
            "description": "",
            "computeType": "AmlCompute",
            "isAttachedCompute": "false"
        }
    }

    mocker.patch('azureml.core.compute.compute.ComputeTarget._get',
                 return_value=amlcd)
    amlcompute = AmlCompute(workspace=workspace, name=e.compute_name)
    ct = {e.compute_name: amlcompute}
    mocker.patch('azureml.core.Workspace.compute_targets', return_value=ct)

    # Mock ComputeTarget create
    mocker.patch('azureml.core.compute.compute.ComputeTarget.create',
                 return_value=amlcompute)

    # Mock External dependency in azureml.core.compute.amlcompute.AmlCompute
    mocker.patch('azureml.core.compute.amlcompute.AmlCompute._wait_for_nodes',
                 return_value=(True, False, False, False))

    # Mock the environment to return a already existing environment
    mocker.patch('azureml.core.Environment.list',
                 return_value={
                     "mock_env_name":
                     MagicMock(spec='azureml.core.Environment')
                 })

    # Skip real registering of Environment
    mocker.patch('azureml.core.Environment.register')

    # Mock for Datastore
    ds = AbstractDatastore(workspace=workspace,
                           name=e.datastore_name,
                           datastore_type='mocked')
    mocker.patch('azureml.core.Datastore.__new__', return_value=ds)

    # Additional dependency mocks for Pipeline object
    # to be created successfully
    mocker.patch('azureml.core.workspace.Workspace.service_context')
    mocker.patch('azureml.core.workspace.Workspace.get_default_datastore',
                 return_value=ds)
    # Enables the Pipeline object to be CREATED without external call
    mocker.patch('azureml.pipeline.core._aeva_provider'
                 '._AevaWorkflowProvider.create_provider')

    mock_pipeline_publish = mocker.patch('azureml.pipeline.core.'
                                         'Pipeline.publish')

    return (workspace, amlcompute, mock_workspace_get, mock_pipeline_publish)
Esempio n. 25
0
def load_workspace_from_config():
    return Workspace.from_config()
from azureml.core.image import ContainerImage
from azureml.core.experiment import Experiment
from azureml.core.workspace import Workspace
import json

webservice_name = 'turbofan-rul'

# COMMAND ----------

subscription_id = ""  #you should be owner or contributor
resource_group = ""  #you should be owner or contributor
workspace_name = ""  #your workspace name
workspace_region = ""  #your region

try:
    ws = Workspace.from_config()
    print("Using Config File")
except Exception:
    ws = Workspace(workspace_name=workspace_name,
                   subscription_id=subscription_id,
                   resource_group=resource_group)
    print("Unable to find config")

# COMMAND ----------

from azureml.core.model import Model
run = [x for x in ws.experiments['gbt-turbofan'].get_runs()][0]
model_name = "gbt-turbofan"
print(run)

mymodel = run.register_model(model_name="gbt-turbofan",
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 18 17:20:22 2019

@author: datacore
"""

from azureml.core.authentication import AzureCliAuthentication
import azure.cli.core
#cli_auth = AzureCliAuthentication()
from azureml.core.workspace import Workspace

ws = Workspace(subscription_id="24075937-2687-4457-bac6-ec16dec514c3",
               resource_group="VstsRG-784AbhijitC-8a31",
               workspace_name="automldc")

from azureml.core.experiment import Experiment
from azureml.core import Run
experiment = Experiment(ws, 'Myexp2_v1_test21')
best_run = Run(experiment=experiment,
               run_id='AutoML_74e9d9dc-f347-4392-b8bb-3edeb4a6afad_8')
fitted_model = Run(experiment=experiment,
                   run_id='AutoML_74e9d9dc-f347-4392-b8bb-3edeb4a6afad_8')
#print(best_run.register_model()
print(fitted_model)

# Get a dataset by name
from azureml.core.dataset import Dataset

file_name = '2018Q4PredictionTrainedSet101.csv'
stock_dataset = Dataset.get_by_name(ws, '2018Q4PredictionTrainedSet101.csv')
Esempio n. 28
0
def create_workspace(workspace_name,
                     resource_group_name=None,
                     location=None,
                     friendly_name=None,
                     storage_account=None,
                     key_vault=None,
                     app_insights=None,
                     container_registry=None,
                     adb_workspace=None,
                     primary_user_assigned_identity=None,
                     cmk_keyvault=None,
                     resource_cmk_uri=None,
                     hbi_workspace=False,
                     create_resource_group=None,
                     exist_ok=False,
                     sku='basic',
                     tags=None,
                     pe_name=None,
                     pe_vnet_name=None,
                     pe_subnet_name=None,
                     pe_subscription_id=None,
                     pe_resource_group=None,
                     pe_auto_approval=None,
                     user_assigned_identity_for_cmk_encryption=None,
                     system_datastores_auth_mode='accessKey'):

    from azureml._base_sdk_common.cli_wrapper._common import get_cli_specific_auth, get_default_subscription_id, \
        get_resource_group_or_default_name

    auth = get_cli_specific_auth()
    default_subscription_id = get_default_subscription_id(auth)

    # resource group can be None, as we create on user's behalf.
    resource_group_name = get_resource_group_or_default_name(
        resource_group_name, auth=auth)

    private_endpoint_config = None
    if pe_name is not None:
        private_endpoint_config = PrivateEndPointConfig(
            pe_name, pe_vnet_name, pe_subnet_name, pe_subscription_id,
            pe_resource_group)

    workspace_object = Workspace.create(
        workspace_name,
        auth=auth,
        subscription_id=default_subscription_id,
        resource_group=resource_group_name,
        location=location,
        create_resource_group=create_resource_group,
        friendly_name=friendly_name,
        storage_account=storage_account,
        key_vault=key_vault,
        app_insights=app_insights,
        container_registry=container_registry,
        adb_workspace=adb_workspace,
        primary_user_assigned_identity=primary_user_assigned_identity,
        cmk_keyvault=cmk_keyvault,
        resource_cmk_uri=resource_cmk_uri,
        hbi_workspace=hbi_workspace,
        exist_ok=exist_ok,
        sku=sku,
        tags=get_tags_dict(tags),
        private_endpoint_config=private_endpoint_config,
        private_endpoint_auto_approval=pe_auto_approval,
        user_assigned_identity_for_cmk_encryption=
        user_assigned_identity_for_cmk_encryption,
        system_datastores_auth_mode=system_datastores_auth_mode)

    # TODO: Need to add a message that workspace created successfully.
    return workspace_object._get_create_status_dict()
# auth = InteractiveLoginAuthentication(tenant_id = 'mytenantid')
# ws = Workspace.from_config(auth = auth)
# ```
#
# If you need to run in an environment where interactive login is not possible, you can use Service Principal authentication by replacing the `ws = Workspace.from_config()` line in the cell below with the following:
#
# ```
# from azureml.core.authentication import ServicePrincipalAuthentication
# auth = auth = ServicePrincipalAuthentication('mytenantid', 'myappid', 'mypassword')
# ws = Workspace.from_config(auth = auth)
# ```
# For more details, see [aka.ms/aml-notebook-auth](http://aka.ms/aml-notebook-auth)

# In[85]:

ws = Workspace.from_config()

# Choose a name for the experiment and specify the project folder.
experiment_name = 'automl-classification'
project_folder = './sample_projects/automl-classification'

experiment = Experiment(ws, experiment_name)

output = {}
output['SDK version'] = azureml.core.VERSION
output['Subscription ID'] = ws.subscription_id
output['Workspace Name'] = ws.name
output['Resource Group'] = ws.resource_group
output['Location'] = ws.location
output['Project Directory'] = project_folder
output['Experiment Name'] = experiment.name
Esempio n. 30
0
# Check core SDK version number for debugging purposes
import azureml.core
print("SDK Version:", azureml.core.VERSION)

subscription_id = "fac34303-435d-4486-8c3f-7094d82a0b60"
resource_group = "aml-notebooks"
workspace_name = "haieastus2ws3"
workspace_region = 'eastus2'  # or eastus2euap

# import the Workspace class and check the azureml SDK version
from azureml.core.workspace import Workspace, WorkspaceException

ws = Workspace.create(name=workspace_name,
                      subscription_id=subscription_id,
                      resource_group=resource_group,
                      location=workspace_region)
ws.get_details()

ws.write_config()

# load workspace configuratio from ./aml_config/config.json file.
my_workspace = Workspace.from_config()

print(my_workspace.get_details())