Esempio n. 1
0
# convert to relative paths
prefix = Path(__file__).parent
source_dir = str(prefix.joinpath(source_dir))
environment_file = str(prefix.joinpath(environment_file))

# get workspace
ws = Workspace.from_config()

# create dataset
ds = Dataset.File.from_files(data_uri)

# create environment
env = Environment.from_pip_requirements(environment_name, environment_file)

# setup entry script arguments
args = ["--data-dir", ds.as_mount()]

# create a job configuration
src = ScriptRunConfig(
    source_directory=source_dir,
    script=entry_script,
    arguments=args,
    environment=env,
    compute_target=compute_name,
)

# run the job
run = Experiment(ws, experiment_name).submit(src)
run.wait_for_completion(show_output=True)
Esempio n. 2
0
# Build Pipeline
pipeline1 = Pipeline(workspace=ws, steps=steps)
print("Pipeline is built")

# Validate Pipeline
pipeline1.validate()
print("Pipeline validation complete")

# Submit unpublished pipeline with small data set for test
if args.pipeline_action == "pipeline-test":
    pipeline_run1 = Experiment(ws,
                               experiment_name).submit(pipeline1,
                                                       regenerate_outputs=True)
    print("Pipeline is submitted for execution")
    pipeline_run1.wait_for_completion(show_output=True)

# RunDetails(pipeline_run1).show()

# Define pipeline parameters
# run_env = PipelineParameter(
#   name="dev_flag",
#   default_value=True)

# dbname = PipelineParameter(
#   name="dbname",
#   default_value='opex')

# Publish Pipeline
if args.pipeline_action == "publish":
    published_pipeline1 = pipeline1.publish(
Esempio n. 3
0
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\n')

datastore = ws.get_default_datastore()

input_ds = Dataset.get_by_name(ws, dataset_name)
batch_data = DatasetConsumptionConfig("batch_dataset", input_ds, mode='mount')

output_dir = PipelineData(name='batch_output', datastore=datastore)

parallel_run_config = ParallelRunConfig.load_yaml(workspace=ws,
                                                  path='convert_parallel.yml')

batch_step = ParallelRunStep(name="batch-conversion-step",
                             parallel_run_config=parallel_run_config,
                             arguments=['--data_output_path', output_dir],
                             inputs=[batch_data],
                             output=output_dir,
                             allow_reuse=False)

steps = [batch_step]

pipeline = Pipeline(workspace=ws, steps=steps)
pipeline.validate()

pipeline_run = Experiment(ws, 'convert-batch-pipeline').submit(pipeline)
pipeline_run.wait_for_completion()

#published_pipeline = pipeline.publish(pipeline_name)
#print("Published pipeline id: ", published_pipeline.id)
evaluateStep.run_after(trainStep)
steps = [evaluateStep]

pipeline = Pipeline(workspace=ws, steps=steps)
print ("Pipeline is built")

pipeline.validate()
print("Simple validation complete")

run = Run.get_context()
experiment_name = run.experiment.name

pipeline_run = Experiment(ws, experiment_name).submit(pipeline)
print("Pipeline is submitted for execution")

pipeline_run.wait_for_completion(show_output=True, timeout_seconds=43200)

print("Get StepRun for evaluate step...")
pipeline_run_id = pipeline_run.id
step_run_id = pipeline_run.find_step_run('evaluate')[0].id
node_id = pipeline_run.get_graph().node_name_dict['evaluate'][0].node_id
print('Pipeline Run ID: {} Step Run ID: {}, Step Run Node ID: {}'.format(pipeline_run_id, step_run_id, node_id))
step_run = StepRun(run.experiment, step_run_id, pipeline_run_id, node_id)
print(step_run)

print("Downloading evaluation results...")
# access the evaluate_output
#data = pipeline_run.find_step_run('evaluate')[0].get_output_data('evaluate_output')
data = step_run.get_output_data('evaluate_output')
# download the predictions to local path
data.download('.', show_progress=True)
modelEvalReg = PythonScriptStep(
    name="Evaluate and Register Model",
    script_name="evaluate_model.py",
    arguments=["--release_id", 0, '--model_name', model_name],
    compute_target=compute_target_cpu,
    source_directory=source_directory,
    runconfig=run_config,
    allow_reuse=False)

modelEvalReg.run_after(trainingStep)
print("Model Evaluation and Registration Step is Created")

from azureml.pipeline.core import Pipeline
from azureml.core import Experiment
pipeline = Pipeline(workspace=ws,
                    steps=[extractDataStep, trainingStep, modelEvalReg])
pipeline_run = Experiment(ws, pipeline_experiment_name).submit(pipeline)

pipeline_run.wait_for_completion(show_output=True, raise_on_error=True)

published_pipeline = pipeline_run.publish_pipeline(
    name=pipeline_name,
    description=
    "Steps are: data preparation, training, model validation and model registration",
    version="0.1",
    continue_on_step_failure=False)

## Free up compute resource(s) after computation is completed!
print(f'Deleting compute resource: [{cluster_name_cpu}]')
compute_target_cpu.delete()
# list of steps to run
compareModels = [trainStep]

from azureml.pipeline.core import Pipeline

# Build the pipeline
pipeline1 = Pipeline(workspace=ws, steps=[compareModels])
run = Run.get_context()

# Envoyer le pipeline
from azureml.core import Experiment

# Submit the pipeline to be run
pipeline_run1 = Experiment(ws, 'Compare_Models_Exp').submit(pipeline1)
pipeline_run1.wait_for_completion(False, 0, True)

#Récupération du run id de la step 1 sur 1 de la pipeline
step1_runid = [e for e in pipeline_run1.get_steps()][0].id
#création du path complet dans le blob storage
path_model_datastore = "azureml/" + step1_runid + "/outputs/"
#download en local du modele
def_data_store.download(target_path='datastore', prefix=path_model_datastore)
#chargement dans la workspace a partir du modele download en local
model = Model.register(workspace=ws,
                       model_path='datastore/' + path_model_datastore +
                       model_pklname + '.pkl',
                       model_name=model_basename)

#Create environment file
from azureml.core.conda_dependencies import CondaDependencies
Esempio n. 7
0
def main():
    train_file = r"EdwardFry_Microsoft_issueDataset.csv"
    ws = Workspace.from_config()

    # Default datastore
    def_data_store = ws.get_default_datastore()  # Loads config.json

    # Get the blob storage associated with the workspace
    def_blob_store = Datastore(ws, "workspaceblobstore")

    # Get file storage associated with the workspace
    def_file_store = Datastore(ws, "workspacefilestore")

    # Set data input and output
    xyz_phishing_dataset = Dataset.File.from_files([(def_blob_store,
                                                     train_file)])
    output_data1 = OutputFileDatasetConfig(
        destination=(datastore, 'outputdataset/{run-id}'))
    output_data_dataset = output_data1.register_on_complete(
        name='prepared_output_data')

    # Set compute
    compute_name = "aml-compute"
    vm_size = "STANDARD_NC6"
    if compute_name in ws.compute_targets:
        compute_target = ws.compute_targets[compute_name]
        if compute_target and type(compute_target) is AmlCompute:
            print('Found compute target: ' + compute_name)
    else:
        print('Creating a new compute target...')
        provisioning_config = AmlCompute.provisioning_configuration(
            vm_size=vm_size,  # STANDARD_NC6 is GPU-enabled
            min_nodes=0,
            max_nodes=4)
        # create the compute target
        compute_target = ComputeTarget.create(ws, compute_name,
                                              provisioning_config)

        # Can poll for a minimum number of nodes and for a specific timeout.
        # If no min node count is provided it will use the scale settings for the cluster
        compute_target.wait_for_completion(show_output=True,
                                           min_node_count=None,
                                           timeout_in_minutes=20)

        # For a more detailed view of current cluster status, use the 'status' property
        print(compute_target.status.serialize())

    aml_run_config = RunConfiguration()
    # `compute_target` as defined in "Azure Machine Learning compute" section above
    aml_run_config.target = compute_target

    USE_CURATED_ENV = True
    if USE_CURATED_ENV:
        curated_environment = Environment.get(workspace=ws,
                                              name="AzureML-Tutorial")
        aml_run_config.environment = curated_environment
    else:
        aml_run_config.environment.python.user_managed_dependencies = False

        # Add some packages relied on by data prep step
        aml_run_config.environment.python.conda_dependencies = CondaDependencies.create(
            conda_packages=['pandas', 'scikit-learn'],
            pip_packages=['azureml-sdk', 'azureml-dataprep[fuse,pandas]'],
            pin_sdk_version=False)

    dataprep_source_dir = "./dataprep_src"
    entry_point = "prepare.py"
    # `my_dataset` as defined above
    ds_input = xyz_phishing_dataset.as_named_input('input1')

    # `output_data1`, `compute_target`, `aml_run_config` as defined above
    data_prep_step = PythonScriptStep(script_name=entry_point,
                                      source_directory=dataprep_source_dir,
                                      arguments=[
                                          "--input",
                                          ds_input.as_download(), "--output",
                                          output_data1
                                      ],
                                      compute_target=compute_target,
                                      runconfig=aml_run_config,
                                      allow_reuse=True)

    train_source_dir = "./train_src"
    train_entry_point = "train.py"

    training_results = OutputFileDatasetConfig(name="training_results",
                                               destination=def_blob_store)

    train_step = PythonScriptStep(script_name=train_entry_point,
                                  source_directory=train_source_dir,
                                  arguments=[
                                      "--prepped_data",
                                      output_data1.as_input(),
                                      "--training_results", training_results
                                  ],
                                  compute_target=compute_target,
                                  runconfig=aml_run_config,
                                  allow_reuse=True)

    # list of steps to run (`compare_step` definition not shown)
    compare_models = [data_prep_step, train_step, compare_step]

    # Build the pipeline
    pipeline1 = Pipeline(workspace=ws, steps=[compare_models])

    #dataset_consuming_step = PythonScriptStep(
    #    script_name="iris_train.py",
    #    inputs=[iris_tabular_dataset.as_named_input("iris_data")],
    #    compute_target=compute_target,
    #    source_directory=project_folder
    #)

    #run_context = Run.get_context()
    #iris_dataset = run_context.input_datasets['iris_data']
    #dataframe = iris_dataset.to_pandas_dataframe()

    ## Within a PythonScriptStep

    #ws = Run.get_context().experiment.workspace

    #step = PythonScriptStep(name="Hello World",
    #                        script_name="hello_world.py",
    #                        compute_target=aml_compute,
    #                        source_directory=source_directory,
    #                        allow_reuse=False,
    #                        hash_paths=['hello_world.ipynb'])

    # Submit the pipeline to be run
    pipeline_run1 = Experiment(ws, 'Compare_Models_Exp').submit(pipeline1)
    pipeline_run1.wait_for_completion()
                                               min_node_count=None,
                                               timeout_in_minutes=0)

    script_params = {}
    estimator = PyTorch(source_directory='./',
                        script_params=script_params,
                        compute_target=compute_target_gpu,
                        entry_script='train.py',
                        use_gpu=True,
                        pip_packages=[],
                        framework_version='1.2')

    est_step = EstimatorStep(name="Train_Step",
                             estimator=estimator,
                             estimator_entry_script_arguments=auth_params,
                             runconfig_pipeline_params=None,
                             inputs=[],
                             outputs=[],
                             compute_target=compute_target_gpu)
    est_step.run_after(process_step)

    #     step 3

    pipeline = Pipeline(workspace=ws, steps=[process_step, est_step])
    pipeline_run_first = Experiment(
        ws, project_config['experiment_name']).submit(pipeline)
    #https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.pipelinerun?view=azure-ml-py
    pipeline_run_first.wait_for_completion(show_output=True,
                                           timeout_seconds=9223372036854775807,
                                           raise_on_error=True)
Esempio n. 9
0
print("..8. completed")
print('')
print('')

print("9. Create run object for the experiment...")
print('.............................................')
run = Run.get_context()
experimentName = run.experiment.name
print("..9. completed")
print('')
print('')

print("10. Submit build pipeline run, synchronously/blocking...")
print('.............................................')
pipelineRun = Experiment(amlWs, experimentName).submit(pipeline)
pipelineRun.wait_for_completion(show_output=True)
print("..10. completed")
print('')
print('')

print("11. Download pipeline output...")
print('.............................................')
# Get a handle to the output of containerize pipeline stage
pipelineStagesLog = pipelineRun.find_step_run(
    'containerize')[0].get_output_data('containerize_output')
# Download locally
pipelineStagesLog.download('.', show_progress=True)
print("..11. completed")
print('')
print('')
Esempio n. 10
0
def run_pipeline_steps(workspace, steps, experiment_name):
    mlapp_pipeline = Pipeline(workspace=workspace, steps=steps)
    pipeline_run = Experiment(workspace, experiment_name).submit(mlapp_pipeline)
    pipeline_run.wait_for_completion()
Esempio n. 11
0
def run_parallel(distributed_csv_iris_step,aml_interface):
    ws = aml_interface.workspace
    pipeline = Pipeline(workspace=ws, steps=[distributed_csv_iris_step])   
    pipeline_run = Experiment(ws, 'iris-prs').submit(pipeline) 
    pipeline_run.wait_for_completion(show_output=True)
Esempio n. 12
0
def main():
    # Ger our configs
    with open("ptgnn/authentication.json") as jsonFile:
        authData = json.load(jsonFile)[args.auth_cluster]

    # Copy the convertCorpus script here. Done so we don't upload the corpus to Azure, or keep a copy of the script in here.
    # (It's weird, I know. It works and has a purpose though)
    convertCorpusLocation = Path("../convertCorpusForML.py")
    convertCorpusAzureLocation = Path("./convertCorpusForML.py")
    shutil.copy(convertCorpusLocation, convertCorpusAzureLocation)

    # Grab the authentication data from the JSON file
    subID = authData["subID"]  # Get from Azure Portal; used for billing
    resGroup = authData["resGroup"]  # Name for the resource group
    wsName = authData["wsName"]  # Name for the workspace, which is the collection of compute clusters + experiments
    computeName = authData["computeName"]  # Name for computer cluster
    datastoreName = authData["datastoreName"]

    # Get the workspace, the compute target and the datastore
    ws = Workspace.get(wsName, subscription_id=subID, resource_group=resGroup)
    computeTarget = ComputeTarget(ws, computeName)
    datastore = Datastore(ws, name=datastoreName)

    # Download the entire corpus to the compute target. Save the DataReference obj here
    # as_mount is also possible, but slows things down due to network opening of files
    corpus_location = datastore.path(args.aml_location).as_download()
    output_location = "./"
    # The files that will be uploaded for usage by our script (everything in the azure folder)
    source_directory = "."

    # params for the script
    params = {
        "--corpus_location": corpus_location,
        "--output_folder": output_location,
        "--aml": "",
        "--training_percent": args.training_percent,
        "--validation_percent": args.validation_percent,
        "-c": ""
    }
    if args.log_num is not None:
        params["-l"] = args.log_num
        tags = {
            "logs": str(args.log_num)
        }
    else:
        tags = {
            "logs": "MAX"
        }
    if args.statement_generation:
        params["-s"] = ""
        tags["generationType"] = "Statement"
    else:
        tags["generationType"] = "Severity"
    # Set up the estimator object. Note the inputs element, it tells azure that corpus_location in params
    # will be a DataReference Object.
    est = Estimator(source_directory=source_directory,
                    compute_target=computeTarget,
                    entry_script='convertCorpusForML.py',
                    script_params=params,
                    inputs=[corpus_location],
                    conda_packages=["pip"],
                    pip_packages=["azureml-core", "tqdm", "numpy", "protobuf"],
                    use_docker=True,
                    use_gpu=False)
    # Start the experiment
    run = Experiment(ws, args.exp_name).submit(config=est, tags=tags)
    # remove the copy of convertCorpus (Remember, don't question this)
    convertCorpusAzureLocation.unlink()
    # print out the portral URL
    # print("Portal URL: ", run.get_portal_url())
    # this will stream everything that the compute target does.
    print("Experiment Started. Remember you can exit out of this program but the experiment will still run on Azure!")
    run.wait_for_completion(show_output=True)
Esempio n. 13
0
def main():
    # Replace this with the Use of Service Principal for authenticating with the Workspace
    ws = Workspace.from_config()

    # Choose a name for your CPU cluster
    cpu_cluster_name = "cpu-cluster"

    # Verify that cluster does not exist already
    try:
        cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name)
        print('Found existing cluster, use it.')
    except ComputeTargetException:
        compute_config = AmlCompute.provisioning_configuration(
            vm_size='STANDARD_D2_V2', max_nodes=4)
        cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name,
                                           compute_config)

    cpu_cluster.wait_for_completion(show_output=True)

    # Run configuration for R
    rc = RunConfiguration()
    rc.framework = "R"

    # Run configuration for python
    py_rc = RunConfiguration()
    py_rc.framework = "Python"
    py_rc.environment.docker.enabled = True

    # Combine GitHub and Cran packages for R env
    rc.environment.r = RSection()
    rc.environment.docker.enabled = True

    # Upload iris data to the datastore
    # target_path = "iris_data"
    # upload_files_to_datastore(ds,
    #                         list("./iris.csv"),
    #                         target_path = target_path,
    #                         overwrite = TRUE)

    training_data = DataReference(
        datastore=ws.get_default_datastore(),
        data_reference_name="iris_data",
        path_on_datastore="iris_data/iris.csv",
    )

    print('Succesfull')
    print(training_data)

    # PipelineData object for newly trained model
    # trained_model_dir = PipelineData(
    #     name="trained_model", datastore=ws.get_default_datastore(), is_directory=True
    # )

    # Train and Register the model
    train_step = RScriptStep(
        script_name="train.R",
        arguments=[training_data],
        inputs=[training_data],
        compute_target=cpu_cluster_name,
        source_directory=".",
        runconfig=rc,
        allow_reuse=True,
    )

    # Deploy the trained model

    print("Step Train created")

    steps = [train_step]

    train_pipeline = Pipeline(workspace=ws, steps=steps)
    train_pipeline.validate()
    pipeline_run = Experiment(ws, 'iris_training').submit(train_pipeline)
    pipeline_run.wait_for_completion(show_output=True)

    published_pipeline = train_pipeline.publish(
        name="iris-train",
        description="Model training/retraining pipeline",
    )
    print(f"Published pipeline: {published_pipeline.name}")
    print(f"for build {published_pipeline.version}")
    ws = Workspace(sub_id, db_rg, az_ml_ws_name, auth=sp)
    return ws


def get_db_compute(ws: Workspace) -> DatabricksCompute:
    db_compute = None
    try:
        db_compute = ComputeTarget(ws, db_compute_name)
    except ComputeTargetException:
        attach_config = DatabricksCompute.attach_configuration(
            resource_group=db_rg,
            workspace_name=db_workspace_name,
            access_token=db_access_token)
        db_compute = ComputeTarget.attach(ws, db_compute_name, attach_config)
        db_compute.wait_for_completion(True)
    return db_compute


ws = get_workspace()
db_compute = get_db_compute(ws)

train_step = PythonScriptStep(script_name="train.py",
                              compute_target=db_compute,
                              source_directory="./notebooks",
                              num_workers=4)

steps = [train_step]
pipeline = Pipeline(workspace=ws, steps=steps)
run = Experiment(ws, "anomaly_detector").submit(pipeline)
run.wait_for_completion()