Пример #1
0
# set Docker base image to the default CPU-based image
run_config.environment.docker.base_image = azureml.core.runconfig.DEFAULT_MMLSPARK_CPU_IMAGE
print('base image is', run_config.environment.docker.base_image)
#run_config.environment.docker.base_image = 'microsoft/mmlspark:plus-0.9.9'

# use conda_dependencies.yml to create a conda environment in the Docker image for execution
# please update this file if you need additional packages.
run_config.environment.python.user_managed_dependencies = False

cd = CondaDependencies()
cd.add_conda_package('numpy')
# overwrite the default conda_dependencies.yml file
cd.save_to_file(project_dir = project_folder, file_name='conda_dependencies.yml')

# auto-prepare the Docker image when used for execution (if it is not already prepared)
run_config.prepare_environment = True

print()
print('##################################################')
print('submitting {} for a Spark run on ACI...'.format(train_script))
print('##################################################')
print()

run = Run.submit(project_object = project, 
                 run_config = run_config, 
                 script_to_run = "train-spark.py")

print(helpers.get_run_history_url(run))

# Shows output of the run on stdout.
run.wait_for_completion(show_output = True)
Пример #2
0
        vm_size="STANDARD_D2_V2",  # for GPU, use "STANDARD_NC6"
        #vm_priority = 'lowpriority', # optional
        autoscale_enabled=True,
        cluster_min_nodes=1,
        cluster_max_nodes=4)

    # create the cluster
    compute_target = ws.create_compute_target(batchai_cluster_name,
                                              provisioning_config)
    compute_target.wait_for_provisioning(show_output=True)

print('create Batch AI run config')

rc = RunConfiguration(project, "dask_run_config")
rc.environment.docker.enabled = True
rc.prepare_environment = True
rc.target = batchai_cluster_name
rc.environment.python.user_managed_dependencies = False
rc.batchai.node_count = 2

# create a new CondaDependencies obj
cd = CondaDependencies()
# add scikit-learn as a conda dependency
cd.add_conda_package('dask')
cd.add_conda_package('joblib')
cd.add_pip_package('azureml-contrib-daskonbatch')

# overwrite the default conda_dependencies.yml file
cd.save_to_file(project_dir=project_folder, file_name='conda_dependencies.yml')

print()
cli_auth = AzureCliAuthentication()

# Get workspace
ws = Workspace.from_config(auth=cli_auth)

# Attach Experiment
experiment_name = "devops-ai-demo"
exp = Experiment(workspace=ws, name=experiment_name)
print(exp.name, exp.workspace.name, sep="\n")

# Editing a run configuration property on-fly.
run_config_system_managed = RunConfiguration()
# Use a new conda environment that is to be created from the conda_dependencies.yml file
run_config_system_managed.environment.python.user_managed_dependencies = False
# Automatically create the conda environment before the run
run_config_system_managed.prepare_environment = True

# # add scikit-learn to the conda_dependencies.yml file
# Specify conda dependencies with scikit-learn
# run_config_system_managed.environment.python.conda_dependencies = CondaDependencies.create(conda_packages=['scikit-learn'])

print("Submitting an experiment to new conda virtual env")
src = ScriptRunConfig(
    source_directory="./code",
    script="training/train.py",
    run_config=run_config_user_managed,
)
run = exp.submit(src)

# Shows output of the run on stdout.
run.wait_for_completion(show_output=True, wait_post_processing=True)
Пример #4
0
run_dsvm = RunConfiguration(framework = "python")

# Set the compute target to the Linux DSVM
run_dsvm.target = compute_target_name 

# Use Docker in the remote VM
run_dsvm.environment.docker.enabled = True

# Use the CPU base image 
# To use GPU in DSVM, you must also use the GPU base Docker image "azureml.core.runconfig.DEFAULT_GPU_IMAGE"
run_dsvm.environment.docker.base_image = azureml.core.runconfig.DEFAULT_CPU_IMAGE
print('Base Docker image is:', run_dsvm.environment.docker.base_image)

# Prepare the Docker and conda environment automatically when they're used for the first time 
run_dsvm.prepare_environment = True

# Specify the CondaDependencies object
run_dsvm.environment.python.conda_dependencies = CondaDependencies.create(conda_packages=['scikit-learn'])
#</run_dsvm>
hdi_compute.name = "blah"
from azureml.core.runconfig import RunConfiguration
from azureml.core.conda_dependencies import CondaDependencies


# use pyspark framework
hdi_run_config = RunConfiguration(framework="pyspark")

# Set compute target to the HDI cluster
hdi_run_config.target = hdi_compute.name