def load_data(dataset, input_name): data = Data( data_location=DataLocation(dataset=RunDataset(dataset_id=dataset.id)), create_output_directories=False, mechanism='mount', environment_variable_name=input_name, overwrite=True) return data
compute_name = 'cpu-cluster' # Define the script run config src = ScriptRunConfig( source_directory='scripts', script='train.py', arguments=[ '--data-folder', 'DatasetConsumptionConfig:{}'.format(input_name) ]) # Define the data section of the runconfig src.run_config.data = { input_name: Data( data_location=DataLocation( dataset=RunDataset(dataset_id=dataset.id)), create_output_directories=False, mechanism='mount', environment_variable_name=input_name, overwrite=False ) } # Set other parameters for the run src.run_config.framework = 'python' src.run_config.environment = conda_env src.run_config.target = compute_name src.run_config.node_count = 4 # Save the run configuration as a .azureml/mnist.runconfig get_run_config_from_script_run(src).save(name='mnist.runconfig')
# Define the environment variable/where data will be mounted input_name = 'mnist' # Define the name of the compute target for training compute_name = 'cpu-cluster' # Define the script run config src = ScriptRunConfig(source_directory='scripts', script='train.py', arguments=[ '--data-folder', 'DatasetConsumptionConfig:{}'.format(input_name) ]) # Define the data section of the runconfig src.run_config.data = { input_name: Data(data_location=DataLocation(dataset=RunDataset(dataset_id=dataset.id)), create_output_directories=False, mechanism='mount', environment_variable_name=input_name, overwrite=False) } # Set other parameters for the run src.run_config.framework = 'python' src.run_config.environment = conda_env src.run_config.target = compute_name src.run_config.node_count = 4 # Save the run configuration as a .azureml/mnist.runconfig get_run_config_from_script_run(src).save(name='mnist.runconfig')