output=output_dir, arguments=["--model_name", "inception", "--labels_dir", label_config], side_inputs=[label_config], parallel_run_config=parallel_run_config, allow_reuse=False) from azureml.core import Experiment from azureml.pipeline.core import Pipeline pipeline = Pipeline(workspace=ws, steps=[batch_score_step]) pipeline_run = Experiment(ws, 'batch_scoring').submit(pipeline) pipeline_run.wait_for_completion(show_output=True) import pandas as pd batch_run = next(pipeline_run.get_children()) batch_output = batch_run.get_output_data("scores") batch_output.download(local_path="inception_results") for root, dirs, files in os.walk("inception_results"): for file in files: if file.endswith("parallel_run_step.txt"): result_file = os.path.join(root, file) df = pd.read_csv(result_file, delimiter=":", header=None) df.columns = ["Filename", "Prediction"] print("Prediction has ", df.shape[0], " rows") df.head(10) published_pipeline = pipeline_run.publish_pipeline( name="Inception_v3_scoring",
compare_models = [train_step] # Build the pipeline pipeline1 = Pipeline(workspace=ws, steps=train_step) pipeline1.validate() print("Pipeline validation complete") # Submit the pipeline to be run pipeline_run1 = Experiment(ws, 'Titanic_Pipeline_Notebook').submit(pipeline1) pipeline_run1.wait_for_completion() # RunDetails(pipeline_run1).show() step_runs = pipeline_run1.get_children() for step_run in step_runs: status = step_run.get_status() print('Script:', step_run.name, 'status:', status) # Change this if you want to see details even if the Step has succeeded. if status == "Failed": joblog = step_run.get_job_log() print('job log:', joblog) pipeline_param = PipelineParameter(name="pipeline_arg", default_value=10) published_pipeline1 = pipeline_run1.publish_pipeline( name="Published_Titanic_Pipeline_Notebook", description="Titanic_Pipeline_Notebook Published Pipeline Description", version="1.0")
arguments=[], allow_reuse=True ) # Create the pipeline pipeline = Pipeline(workspace=ws, steps=[parallelrun_step]) #step 4 - run pipeline and get step output from azureml.core import Experiment # Run the pipeline as an experiment pipeline_run = Experiment(ws, 'batch_prediction_pipeline').submit(pipeline) pipeline_run.wait_for_completion(show_output=True) # Get the outputs from the first (and only) step prediction_run = next(pipeline_run.get_children()) prediction_output = prediction_run.get_output_data('inferences') prediction_output.download(local_path='results') # Find the parallel_run_step.txt file for root, dirs, files in os.walk('results'): for file in files: if file.endswith('parallel_run_step.txt'): result_file = os.path.join(root,file) # Load and display the results df = pd.read_csv(result_file, delimiter=":", header=None) df.columns = ["File", "Prediction"] print(df)