Beispiel #1
0
def test():
    ws = Workspace.from_config()
    proj = Project.attach(ws, 'test_rh', '/tmp/randomproj1')
    rc = RunConfiguration(proj, 'local')
    rc.environment.python.interpreter_path = '/Users/haining/miniconda3/envs/comet/bin/python'
    with open('/tmp/randomproj1/test.py', 'w') as file:
        file.write('import sys; print(sys.version);import os;os.makedirs("./outputs",  exist_ok=True);fs=open("./outputs/f.txt","w");fs.write("hello!");')
    r = Run.submit(proj, rc, 'test.py')
    print(helpers.get_run_history_url(r))
    r.wait_for_completion(show_output=True)
Beispiel #2
0
cd.save_to_file(project_dir = project_folder, file_name='conda_dependencies.yml')

# auto-prepare the Docker image when used for execution (if it is not already prepared)
run_config.prepare_environment = True

print()
print('##################################################')
print('submitting {} for a Spark run on ACI...'.format(train_script))
print('##################################################')
print()

run = Run.submit(project_object = project, 
                 run_config = run_config, 
                 script_to_run = "train-spark.py")

print(helpers.get_run_history_url(run))

# Shows output of the run on stdout.
run.wait_for_completion(show_output = True)

print('attach a VM target:')

from azureml.exceptions.azureml_exception import UserErrorException
from azureml.core.compute_target import RemoteTarget

try:
    # Attaches a remote docker on a remote vm as a compute target.
    project.attach_legacy_compute_target(RemoteTarget(name = "cpu-dsvm",
                                                   address = "hai2.eastus2.cloudapp.azure.com:5022", 
                                                   username = "******", 
                                                   password = '******'))
Beispiel #3
0
# List all runs
child_runs = {}
child_run_metrics = {}

for r in root_run.get_children():
    child_runs[r.id] = r
    child_run_metrics[r.id] = r.get_metrics()

best_run_id = min(child_run_metrics, key=lambda k: child_run_metrics[k]['mse'])
best_run = child_runs[best_run_id]
print('Best run is:', best_run_id)
print('Metrics:', child_run_metrics[best_run_id])

print('Run history url:')
print(helpers.get_run_history_url(best_run))

best_model_file_name = "best_model.pkl"
best_run.download_file(name='outputs/' + model_file_name,
                       output_file_path=best_model_file_name)

print('plot mse_over_alpha.png')
best_alpha = child_run_metrics[best_run_id]['alpha']
min_mse = child_run_metrics[best_run_id]['mse']

alpha_mse = np.array([(child_run_metrics[k]['alpha'],
                       child_run_metrics[k]['mse'])
                      for k in child_run_metrics.keys()])

plt.plot(alpha_mse[:, 0], alpha_mse[:, 1], 'r--')
plt.plot(alpha_mse[:, 0], alpha_mse[:, 1], 'bo')