print ("Pipeline is built")

pipeline.validate()
print("Simple validation complete")

run = Run.get_context()
experiment_name = run.experiment.name

pipeline_run = Experiment(ws, experiment_name).submit(pipeline)
print("Pipeline is submitted for execution")

pipeline_run.wait_for_completion(show_output=True, timeout_seconds=43200)

print("Get StepRun for evaluate step...")
pipeline_run_id = pipeline_run.id
step_run_id = pipeline_run.find_step_run('evaluate')[0].id
node_id = pipeline_run.get_graph().node_name_dict['evaluate'][0].node_id
print('Pipeline Run ID: {} Step Run ID: {}, Step Run Node ID: {}'.format(pipeline_run_id, step_run_id, node_id))
step_run = StepRun(run.experiment, step_run_id, pipeline_run_id, node_id)
print(step_run)

print("Downloading evaluation results...")
# access the evaluate_output
#data = pipeline_run.find_step_run('evaluate')[0].get_output_data('evaluate_output')
data = step_run.get_output_data('evaluate_output')
# download the predictions to local path
data.download('.', show_progress=True)

import json
# load the eval info json
with open(os.path.join('./', data.path_on_datastore, 'eval_info.json')) as f:
print("Pipeline is built")

pipeline.validate()
print("Simple validation complete")

run = Run.get_context()
experiment_name = run.experiment.name

pipeline_run = Experiment(ws, experiment_name).submit(pipeline)
print("Pipeline is submitted for execution")

pipeline_run.wait_for_completion(show_output=True)

print("Downloading evaluation results...")
# access the evaluate_output
data = pipeline_run.find_step_run('evaluate')[0].get_output_data(
    'evaluate_output')
# download the predictions to local path
data.download('.', show_progress=True)

import json
# load the eval info json
with open(os.path.join('./', data.path_on_datastore, 'eval_info.json')) as f:
    eval_info = json.load(f)
print("Printing evaluation results...")
print(eval_info)

print("Saving evaluation results for release pipeline...")
output_dir = os.path.join(args.path, 'outputs')
os.makedirs(output_dir, exist_ok=True)
filepath = os.path.join(output_dir, 'eval_info.json')
Пример #3
0
print("..9. completed")
print('')
print('')

print("10. Submit build pipeline run, synchronously/blocking...")
print('.............................................')
pipelineRun = Experiment(amlWs, experimentName).submit(pipeline)
pipelineRun.wait_for_completion(show_output=True)
print("..10. completed")
print('')
print('')

print("11. Download pipeline output...")
print('.............................................')
# Get a handle to the output of containerize pipeline stage
pipelineStagesLog = pipelineRun.find_step_run(
    'containerize')[0].get_output_data('containerize_output')
# Download locally
pipelineStagesLog.download('.', show_progress=True)
print("..11. completed")
print('')
print('')

print("12. Parse pipeline stages log into JSON...")
print('.............................................')
import json
# load the pipeline output json
with open(
        os.path.join('./', pipelineStagesLog.path_on_datastore,
                     'containerize_info.json')) as f:
    buildPipelineOutputVarsJson = json.load(f)