Esempio n. 1
0
def tb():
    import os
    ws = Workspace.from_config()
    print(ws.name)
    proj = Project.attach(ws, 'tbhistory', '/tmp/tb-test')
    shutil.copy('tftb.py', os.path.join(proj.project_directory, 'tftb.py'))
    from azureml.core.compute_target import RemoteTarget
    rt = RemoteTarget(name='dsvm', address='hai2.eastus2.cloudapp.azure.com:5022', username='******', password='******')
    
    proj.attach_legacy_compute_target(rt)
    rc = RunConfiguration.load(proj, "dsvm")
    rc.environment.python.user_managed_dependencies = True
    rc.environment.python.interpreter_path = '/anaconda/envs/tf/bin/python'
    print(rc.target)

    run = Run.submit(proj, rc, 'tftb.py')
    print(run.id)
    #run.wait_for_completion(show_output=True)
    from azureml.contrib.tensorboard import Tensorboard
    
    tb = Tensorboard([run])
    print('starting tensorboard...')
    print(tb.start())
    print('tensorboard started.')
    run.wait_for_completion(show_output=True)
    tb.stop()
Esempio n. 2
0
def submit_job():
    ws = Workspace.from_config()
    proj = Project.attach(ws, 'util', '/tmp/random_proj')
    rc = RunConfiguration(proj, "local")
    shutil.copy('./train-sklearn-one-model.py', '/tmp/random_proj/train-sklearn-one-model.py')
    #run = Run.submit(proj, rc, "train-sklearn-one-model.py", "--alpha 0.9")
    run = Run.submit(proj, rc, "train-sklearn-one-model.py", arguments_list=["--alpha", "0.9"])
    run.wait_for_completion(show_output=True)
Esempio n. 3
0
def test():
    ws = Workspace.from_config()
    proj = Project.attach(ws, 'test_rh', '/tmp/randomproj1')
    rc = RunConfiguration(proj, 'local')
    rc.environment.python.interpreter_path = '/Users/haining/miniconda3/envs/comet/bin/python'
    with open('/tmp/randomproj1/test.py', 'w') as file:
        file.write('import sys; print(sys.version);import os;os.makedirs("./outputs",  exist_ok=True);fs=open("./outputs/f.txt","w");fs.write("hello!");')
    r = Run.submit(proj, rc, 'test.py')
    print(helpers.get_run_history_url(r))
    r.wait_for_completion(show_output=True)
Esempio n. 4
0
cd = CondaDependencies()
cd.add_conda_package('numpy')
# overwrite the default conda_dependencies.yml file
cd.save_to_file(project_dir = project_folder, file_name='conda_dependencies.yml')

# auto-prepare the Docker image when used for execution (if it is not already prepared)
run_config.prepare_environment = True

print()
print('##################################################')
print('submitting {} for a Spark run on ACI...'.format(train_script))
print('##################################################')
print()

run = Run.submit(project_object = project, 
                 run_config = run_config, 
                 script_to_run = "train-spark.py")

print(helpers.get_run_history_url(run))

# Shows output of the run on stdout.
run.wait_for_completion(show_output = True)

print('attach a VM target:')

from azureml.exceptions.azureml_exception import UserErrorException
from azureml.core.compute_target import RemoteTarget

try:
    # Attaches a remote docker on a remote vm as a compute target.
    project.attach_legacy_compute_target(RemoteTarget(name = "cpu-dsvm",
Esempio n. 5
0
cd.add_conda_package('joblib')
cd.add_pip_package('azureml-contrib-daskonbatch')

# overwrite the default conda_dependencies.yml file
cd.save_to_file(project_dir=project_folder, file_name='conda_dependencies.yml')

print()
print('##################################################')
print('submitting {} for a batch ai run...'.format(train_script))
print('##################################################')
print()

print("prepare run...")
prep = Run.prepare_compute_target(project_object=project, run_config=rc)

print(helpers.get_run_history_url(prep))

prep.wait_for_completion(show_output=True)

print('now run...')

run = Run.submit(project_object=project,
                 run_config=rc,
                 script_to_run=train_script)

print("run history URL is here:")
print(helpers.get_run_history_url(run))

run.wait_for_completion(show_output=True)
print(helpers.get_run_history_url(run))