示例#1
0
    def __get_run_config(self,
                         compute_target,
                         channels=None,
                         conda_packages=None,
                         pip_packages=None):
        # Load the "cpu-dsvm.runconfig" file (created by the above attach operation) in memory
        run_config = RunConfiguration(framework="python")

        # Set compute target to the Linux DSVM
        run_config.target = compute_target.name

        # Use Docker in the remote VM
        run_config.environment.docker.enabled = False

        # Ask system to provision a new one based on the conda_dependencies.yml file
        run_config.environment.python.user_managed_dependencies = False

        # Prepare the Docker and conda environment automatically when used the first time.
        run_config.auto_prepare_environment = True

        # specify dependencies obj
        conda_dependencies = CondaDependencies.create(
            conda_packages=conda_packages, pip_packages=pip_packages)
        if (channels):
            for channel in channels:
                conda_dependencies.add_channel(channel)

        run_config.environment.python.conda_dependencies = conda_dependencies

        return run_config
示例#2
0
                                     path_on_datastore=dataPathRemote,
                                     path_on_compute='/tmp',
                                     mode='download',
                                     overwrite=False)

# Create a new RunConfiguration and attach data
runConfig = RunConfiguration()
runConfig.data_references = {
    ds.name: dataRef
}  # This syntax is not documented!

if not os.environ.get('AZML_RUN_LOCAL', 'false') == "true":
    # Set it up for running in Azure ML compute
    runConfig.target = computeTarget
    runConfig.environment.docker.enabled = True
    runConfig.auto_prepare_environment = True
    runConfig.environment.python.conda_dependencies = CondaDependencies.create(
        conda_packages=['scikit-learn==0.20.3', 'pandas', 'matplotlib'])
    print(
        f"### Will execute script {trainingScriptDir}/{trainingScript} on REMOTE compute"
    )
else:
    # OR set up RunConfig to run local, needs a pre-set up Python 3 virtual env
    runConfig.environment.python.user_managed_dependencies = True
    runConfig.environment.python.interpreter_path = os.environ[
        'VIRTUAL_ENV'] + "/bin/python"
    print(
        f"### Will execute script {trainingScriptDir}/{trainingScript} on LOCAL compute"
    )

# Pass two args to the training script
run_amlcompute = RunConfiguration()

# Use the cpu_cluster you created above.
run_amlcompute.target = args.aml_compute_target

# Enable Docker
run_amlcompute.environment.docker.enabled = True

# Set Docker base image to the default CPU-based image
run_amlcompute.environment.docker.base_image = DEFAULT_CPU_IMAGE

# Use conda_dependencies.yml to create a conda environment in the Docker image for execution
run_amlcompute.environment.python.user_managed_dependencies = False

# Auto-prepare the Docker image when used for execution (if it is not already prepared)
run_amlcompute.auto_prepare_environment = True

# Specify CondaDependencies obj, add necessary packages
run_amlcompute.environment.python.conda_dependencies = CondaDependencies.create(
    pip_packages=[
        'numpy', 'pandas', 'scikit-learn==0.20.3', 'sklearn_pandas',
        'azureml-sdk'
    ])

scripts_folder = 'scripts'
def_blob_store = ws.get_default_datastore()

train_output = PipelineData('train_output', datastore=def_blob_store)
print("train_output PipelineData object created")

trainStep = PythonScriptStep(name="train",
示例#4
0
print('')

print("5. Instantiate and configure run object for the managed compute...")
print('.............................................')
# Create runconfig object
amlComputeRunConf = RunConfiguration()
# Use the compute provisioned
amlComputeRunConf.target = args.aml_compute_target
# Enable Docker
amlComputeRunConf.environment.docker.enabled = True
# Set Docker base image to the default CPU-based image
amlComputeRunConf.environment.docker.base_image = DEFAULT_CPU_IMAGE
# Use conda_dependencies.yml to create a conda environment in the Docker image for execution
amlComputeRunConf.environment.python.user_managed_dependencies = False
# Auto-prepare the Docker image when used for execution (if it is not already prepared)
amlComputeRunConf.auto_prepare_environment = True
# Specify CondaDependencies obj, add necessary packages
amlComputeRunConf.environment.python.conda_dependencies = CondaDependencies.create(
    pip_packages=['numpy', 'pandas', 'scikit-learn', 'azureml-sdk'])
print("..5. completed")
print('')
print('')

print("6. Define pipeline stage - training...")
print('.............................................')
training_output = PipelineData('train_output', datastore=amlWsStorageRef)
trainPipelineStep = PythonScriptStep(name="train",
                                     script_name="train.py",
                                     arguments=[
                                         "--model_name", args.model_name,
                                         "--build_number", args.build_number