modelEvalReg = PythonScriptStep(
    name="Evaluate and Register Model",
    script_name="evaluate_model.py",
    arguments=["--release_id", 0, '--model_name', model_name],
    compute_target=compute_target_cpu,
    source_directory=source_directory,
    runconfig=run_config,
    allow_reuse=False)

modelEvalReg.run_after(trainingStep)
print("Model Evaluation and Registration Step is Created")

from azureml.pipeline.core import Pipeline
from azureml.core import Experiment
pipeline = Pipeline(workspace=ws,
                    steps=[extractDataStep, trainingStep, modelEvalReg])
pipeline_run = Experiment(ws, pipeline_experiment_name).submit(pipeline)

pipeline_run.wait_for_completion(show_output=True, raise_on_error=True)

published_pipeline = pipeline_run.publish_pipeline(
    name=pipeline_name,
    description=
    "Steps are: data preparation, training, model validation and model registration",
    version="0.1",
    continue_on_step_failure=False)

## Free up compute resource(s) after computation is completed!
print(f'Deleting compute resource: [{cluster_name_cpu}]')
compute_target_cpu.delete()
예제 #2
0
# RunDetails(pipeline_run1).show()

step_runs = pipeline_run1.get_children()
for step_run in step_runs:
    status = step_run.get_status()
    print('Script:', step_run.name, 'status:', status)

    # Change this if you want to see details even if the Step has succeeded.
    if status == "Failed":
        joblog = step_run.get_job_log()
        print('job log:', joblog)

pipeline_param = PipelineParameter(name="pipeline_arg", default_value=10)

published_pipeline1 = pipeline_run1.publish_pipeline(
    name="Published_Titanic_Pipeline_Notebook",
    description="Titanic_Pipeline_Notebook Published Pipeline Description",
    version="1.0")

from azureml.pipeline.core import PublishedPipeline
import requests

response = requests.post(published_pipeline1.endpoint,
                         json={
                             "ExperimentName": "Titanic_Pipeline_Notebook",
                             "ParameterAssignments": {
                                 "pipeline_arg": 20
                             }
                         })
                               arguments=["--modelName", modelName],
                               runconfig=run_config,
                               allow_reuse=False)
print("Scoring and output Step created")

##------------- Create Pipeline

trainingStep.run_after(preprocessingStep)
scoringStep.run_after(trainingStep)

qualityMLPipeline = Pipeline(workspace=ws, steps=[scoringStep])
print("Quality Prediction pipeline is built")

qualityMLPipeline.validate()
print("Quality Prediction pipeline simple validation complete")

##------------- Submit an Experiement using the Pipeline

pipelineRun = Experiment(ws, 'quality_prediction_gb').submit(qualityMLPipeline)
print("Quality Prediction pipeline submitted for execution")

##------------- Publish Pipeline

#publishedPipeline = qualityMLPipeline.publish(name="NewQualityPrediction-Pipeline", description="Quality Prediction pipeline",version="0.1")
publishedPipeline = pipelineRun.publish_pipeline(
    name="NewQualityPrediction-Pipeline",
    description="Quality Prediction pipeline",
    version="0.1")
print("Newly published pipeline id => ", publishedPipeline.id)
print("Newly published pipeline endpoint => ", publishedPipeline.endpoint)
예제 #4
0
파일: batchscore.py 프로젝트: mindis/mlops
    allow_reuse=False)

from azureml.core import Experiment
from azureml.pipeline.core import Pipeline

pipeline = Pipeline(workspace=ws, steps=[batch_score_step])
pipeline_run = Experiment(ws, 'batch_scoring').submit(pipeline)
pipeline_run.wait_for_completion(show_output=True)

import pandas as pd

batch_run = next(pipeline_run.get_children())
batch_output = batch_run.get_output_data("scores")
batch_output.download(local_path="inception_results")

for root, dirs, files in os.walk("inception_results"):
    for file in files:
        if file.endswith("parallel_run_step.txt"):
            result_file = os.path.join(root, file)

df = pd.read_csv(result_file, delimiter=":", header=None)
df.columns = ["Filename", "Prediction"]
print("Prediction has ", df.shape[0], " rows")
df.head(10)

published_pipeline = pipeline_run.publish_pipeline(
    name="Inception_v3_scoring",
    description="Batch scoring using Inception v3 model",
    version="1.0")

published_pipeline
예제 #5
0
for root, dirs, files in os.walk('results'):
    for file in files:
        if file.endswith('parallel_run_step.txt'):
            result_file = os.path.join(root,file)

# Load and display the results
df = pd.read_csv(result_file, delimiter=":", header=None)
df.columns = ["File", "Prediction"]
print(df)


#step 5 - publish 

#REST service
published_pipeline = pipeline_run.publish_pipeline(name='Batch_Prediction_Pipeline',
                                                   description='Batch pipeline',
                                                   version='1.0')
rest_endpoint = published_pipeline.endpoint

#use endpoint to start job
import requests

response = requests.post(rest_endpoint,
                         headers=auth_header,
                         json={"ExperimentName": "Batch_Prediction"})
run_id = response.json()["Id"]


#have it run auto
from azureml.pipeline.core import ScheduleRecurrence, Schedule