)

pytorch_step = EstimatorStep(
    name="PyTorch_Train",
    estimator=pytorch_est,
    estimator_entry_script_arguments=["--data_dir", output],
    inputs=[output],
    compute_target=cpu_cluster,
    allow_reuse=True,
)

pipeline = Pipeline(workspace=ws, steps=[preprocessing_step, pytorch_step])
run = Experiment(ws, args.experiment).submit(pipeline)
run.wait_for_completion(show_output=True)

preprocessing_run = next(step for step in run.get_steps()
                         if step.name == "Preprocessing_Train")
pytorch_run = next(step for step in run.get_steps()
                   if step.name == "PyTorch_Train")

preprocessing_model = preprocessing_run.register_model(
    model_name='sklearn_preprocessing',
    model_path='outputs/preprocessing.joblib')
print(preprocessing_model.name,
      preprocessing_model.id,
      preprocessing_model.version,
      sep='\t')

pytorch_model = pytorch_run.register_model(model_name='autoencoder',
                                           model_path='outputs/model.pt')
print(pytorch_model.name, pytorch_model.id, pytorch_model.version, sep='\t')
from azureml.pipeline.core import Pipeline

# Build the pipeline
pipeline1 = Pipeline(workspace=ws, steps=[compareModels])
run = Run.get_context()

# Envoyer le pipeline
from azureml.core import Experiment

# Submit the pipeline to be run
pipeline_run1 = Experiment(ws, 'Compare_Models_Exp').submit(pipeline1)
pipeline_run1.wait_for_completion(False, 0, True)

#Récupération du run id de la step 1 sur 1 de la pipeline
step1_runid = [e for e in pipeline_run1.get_steps()][0].id
#création du path complet dans le blob storage
path_model_datastore = "azureml/" + step1_runid + "/outputs/"
#download en local du modele
def_data_store.download(target_path='datastore', prefix=path_model_datastore)
#chargement dans la workspace a partir du modele download en local
model = Model.register(workspace=ws,
                       model_path='datastore/' + path_model_datastore +
                       model_pklname + '.pkl',
                       model_name=model_basename)

#Create environment file
from azureml.core.conda_dependencies import CondaDependencies

myenv = CondaDependencies()
myenv.add_conda_package("scikit-learn")
예제 #3
0
    allow_reuse=True
)

# Build pipeline

pipeline_steps = [
    dataprep_step,
    train_step
]

pipeline = Pipeline(workspace=workspace, steps=[pipeline_steps])

# Run pipeline

run = Experiment(workspace=workspace, name='gensim_lda-pipeline').submit(pipeline)

run.wait_for_completion(show_output=True)

# Get training step

run_train_step = [s for s in run.get_steps() if s.name == 'train.py'][0]

print(run_train_step.get_metrics())
print(run_train_step.get_file_names())

# Register model

model = run_train_step.register_model(model_name='gensim_lda', model_path='outputs')

print(model.name, model.id, model.version, sep='\t')