def __init__(self, mounts=None, docker_img='python:3', docker_output_dir='/data', local_output_dir='/data/docker', gcp_bucket_name=None, gcp_image=None, gcp_project=None, ): if mounts is None: mounts = [] self.image = docker_img #self.python_cmd = python_cmd self.mode_local = doodad.mode.LocalMode() # always include doodad #mounts.append(mount.MountLocal(local_dir=REPO_DIR, pythonpath=True)) self.docker_output_dir = docker_output_dir self.mounts = mounts self.mount_out_local = mount.MountLocal(local_dir=local_output_dir, mount_point=docker_output_dir, output=True) self.gcp_bucket_name = gcp_bucket_name self.gcp_image = gcp_image self.gcp_project = gcp_project self.mount_out_gcp = mount.MountGCP(gcp_path='exp_logs', mount_point=docker_output_dir, output=True)
def __init__( self, mounts, docker_img='python:3.5', docker_output_dir='/data', local_output_dir='data/docker', python_cmd='python', gcp_bucket_name=None, gcp_image=None, gcp_project=None, ): self.image = docker_img self.python_cmd = python_cmd self.mode_local = doodad.mode.LocalDocker(image=docker_img) # always include doodad mounts.append(mount.MountLocal(local_dir=REPO_DIR, pythonpath=True)) self.docker_output_dir = docker_output_dir self.mounts = mounts self.mount_out_local = mount.MountLocal(local_dir=local_output_dir, mount_point=docker_output_dir, output=True) self.mount_out_s3 = mount.MountS3(s3_path='exp_logs', mount_point=docker_output_dir, output=True) self.gcp_bucket_name = gcp_bucket_name self.gcp_image = gcp_image self.gcp_project = gcp_project self.mount_out_gcp = mount.MountGCP(gcp_path='exp_logs', gcp_bucket_name=gcp_bucket_name, mount_point=docker_output_dir, output=True)
def __init__(self, mounts=None, docker_img='python:3', docker_output_dir='/data', local_output_dir='/data/docker', gcp_bucket_name=None, gcp_image=None, gcp_project=None, azure_subscription_id=None, azure_storage_connection_str=None, azure_client_id=None, azure_authentication_key=None, azure_tenant_id=None, azure_storage_container=None, mount_out_azure=None, local_shell_interpreter='sh', local_async_run=False, local_use_gpu=False): if mounts is None: mounts = [] self.image = docker_img #self.python_cmd = python_cmd self.mode_local = doodad.mode.LocalMode( shell_interpreter=local_shell_interpreter, async_run=local_async_run, use_gpu=local_use_gpu, ) # always include doodad #mounts.append(mount.MountLocal(local_dir=REPO_DIR, pythonpath=True)) self.docker_output_dir = docker_output_dir self.mounts = mounts self.mount_out_local = mount.MountLocal(local_dir=local_output_dir, mount_point=docker_output_dir, output=True) self.gcp_bucket_name = gcp_bucket_name self.gcp_image = gcp_image self.gcp_project = gcp_project self.mount_out_gcp = mount.MountGCP(gcp_path='exp_logs', mount_point=docker_output_dir) self.azure_subscription_id = azure_subscription_id self.azure_storage_connection_str = azure_storage_connection_str self.azure_client_id = azure_client_id self.azure_authentication_key = azure_authentication_key self.azure_tenant_id = azure_tenant_id self.azure_storage_container = azure_storage_container self.mount_out_azure = (mount_out_azure or mount.MountAzure( azure_path='azure_script_output', mount_point=docker_output_dir))
def test_gcp(self): """ Dry-run test for MountGCP This test doesn't actually run things it just makes sure nothing errors. (we don't actually want to launch a test on EC2 and spend money) """ mnts = [] mnts.append(mount.MountGCP( gcp_path='test_dir', dry=True, )) payload_script = 'echo hello123' archive = archive_builder_docker.build_archive( payload_script=payload_script, verbose=False, docker_image='python:3', mounts=mnts)
def create_mounts( mode, base_log_dir, sync_interval=180, local_input_dir_to_mount_point_dict=None, ): if mode == 'sss': code_mounts = SSS_CODE_MOUNTS non_code_mounts = SSS_NON_CODE_MOUNTS else: code_mounts = CODE_MOUNTS non_code_mounts = NON_CODE_MOUNTS if local_input_dir_to_mount_point_dict is None: local_input_dir_to_mount_point_dict = {} else: raise NotImplementedError("TODO(vitchyr): Implement this") mounts = [m for m in code_mounts] for dir, mount_point in local_input_dir_to_mount_point_dict.items(): mounts.append(mount.MountLocal( local_dir=dir, mount_point=mount_point, pythonpath=False, )) if mode != 'local': for m in non_code_mounts: mounts.append(m) if mode == 'ec2': output_mount = mount.MountS3( s3_path='', mount_point=conf.OUTPUT_DIR_FOR_DOODAD_TARGET, output=True, sync_interval=sync_interval, include_types=('*.txt', '*.csv', '*.json', '*.gz', '*.tar', '*.log', '*.pkl', '*.mp4', '*.png', '*.jpg', '*.jpeg', '*.patch'), ) elif mode == 'gcp': output_mount = mount.MountGCP( gcp_path='', mount_point=conf.OUTPUT_DIR_FOR_DOODAD_TARGET, output=True, gcp_bucket_name=conf.GCP_BUCKET_NAME, sync_interval=sync_interval, include_types=('*.txt', '*.csv', '*.json', '*.gz', '*.tar', '*.log', '*.pkl', '*.mp4', '*.png', '*.jpg', '*.jpeg', '*.patch'), ) elif mode in ['local', 'local_singularity', 'slurm_singularity', 'sss']: # To save directly to local files (singularity does this), skip mounting output_mount = mount.MountLocal( local_dir=base_log_dir, mount_point=None, output=True, ) elif mode == 'local_docker': output_mount = mount.MountLocal( local_dir=base_log_dir, mount_point=conf.OUTPUT_DIR_FOR_DOODAD_TARGET, output=True, ) elif mode == 'ssh': output_mount = mount.MountLocal( local_dir=base_log_dir, mount_point=conf.OUTPUT_DIR_FOR_DOODAD_TARGET, output=True, ) else: raise NotImplementedError("Mode not supported: {}".format(mode)) mounts.append(output_mount) return mounts
def create_mounts(mode, base_log_dir, sync_interval=180, local_input_dir_to_mount_point_dict=None): if mode == "sss": code_mounts = SSS_CODE_MOUNTS non_code_mounts = SSS_NON_CODE_MOUNTS else: code_mounts = CODE_MOUNTS non_code_mounts = NON_CODE_MOUNTS if local_input_dir_to_mount_point_dict is None: local_input_dir_to_mount_point_dict = {} else: raise NotImplementedError("TODO(vitchyr): Implement this") mounts = [m for m in code_mounts] for dir, mount_point in local_input_dir_to_mount_point_dict.items(): mounts.append( mount.MountLocal(local_dir=dir, mount_point=mount_point, pythonpath=False)) if mode != "local": for m in non_code_mounts: mounts.append(m) if mode == "ec2": output_mount = mount.MountS3( s3_path="", mount_point=conf.OUTPUT_DIR_FOR_DOODAD_TARGET, output=True, sync_interval=sync_interval, include_types=( "*.txt", "*.csv", "*.json", "*.gz", "*.tar", "*.log", "*.pkl", "*.mp4", "*.png", "*.jpg", "*.jpeg", "*.patch", ), ) elif mode == "gcp": output_mount = mount.MountGCP( gcp_path="", mount_point=conf.OUTPUT_DIR_FOR_DOODAD_TARGET, output=True, gcp_bucket_name=conf.GCP_BUCKET_NAME, sync_interval=sync_interval, include_types=( "*.txt", "*.csv", "*.json", "*.gz", "*.tar", "*.log", "*.pkl", "*.mp4", "*.png", "*.jpg", "*.jpeg", "*.patch", ), ) elif mode in ["local", "local_singularity", "slurm_singularity", "sss"]: # To save directly to local files (singularity does this), skip mounting output_mount = mount.MountLocal(local_dir=base_log_dir, mount_point=None, output=True) elif mode == "local_docker": output_mount = mount.MountLocal( local_dir=base_log_dir, mount_point=conf.OUTPUT_DIR_FOR_DOODAD_TARGET, output=True, ) elif mode == "ssh": output_mount = mount.MountLocal( local_dir=base_log_dir, mount_point=conf.OUTPUT_DIR_FOR_DOODAD_TARGET, output=True, ) else: raise NotImplementedError("Mode not supported: {}".format(mode)) mounts.append(output_mount) return mounts
image='kylehsu/umrl:latest', credentials=ssh.SSHCredentials(hostname='alan.ist.berkeley.edu', username='******', identity_file='~/.ssh/id_rsa'), ) mode = mode_gcp mounts = [ mount.MountLocal(local_dir=os.path.dirname(os.path.realpath(__file__)), filter_dir=('output',)) ] output_dir = '/home/docker/store/umrl/output' if mode == mode_gcp: output_mount = mount.MountGCP(gcp_path='output', gcp_bucket_name='umrl', mount_point=output_dir, output=True, include_types=('*.txt', '*.csv', '*.json', '*.gz', '*.tar', '*.log', '*.pkl', '*.png', '*.html', '*.mp4')) else: output_mount = mount.MountLocal(local_dir='/home/kylehsu/experiments/umrl/output', mount_point=output_dir, output=True) mounts.append(output_mount) dd.launch_python( target=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'main_contextual.py'), mode=mode, mount_points=mounts, args=dict(log_dir_root=output_dir), python_cmd='source activate umrl && python -m ipdb -c continue', fake_display=False )
def create_mounts( mode, base_log_dir, sync_interval=180, local_input_dir_to_mount_point_dict=None, ): if mode in {"sss", "htp"}: code_mounts = SSS_CODE_MOUNTS non_code_mounts = SSS_NON_CODE_MOUNTS else: code_mounts = CODE_MOUNTS non_code_mounts = NON_CODE_MOUNTS if local_input_dir_to_mount_point_dict is None: local_input_dir_to_mount_point_dict = {} else: raise NotImplementedError("TODO(vitchyr): Implement this") mounts = [m for m in code_mounts] for dir, mount_point in local_input_dir_to_mount_point_dict.items(): mounts.append( mount.MountLocal( local_dir=dir, mount_point=mount_point, pythonpath=False, ) ) if mode != "local": for m in non_code_mounts: mounts.append(m) if mode == "ec2": output_mount = mount.MountS3( s3_path="", mount_point=config.OUTPUT_DIR_FOR_DOODAD_TARGET, output=True, sync_interval=sync_interval, include_types=config.AWS_FILE_TYPES_TO_SAVE, ) elif mode == "gcp": output_mount = mount.MountGCP( gcp_path="", mount_point=config.OUTPUT_DIR_FOR_DOODAD_TARGET, output=True, gcp_bucket_name=config.GCP_BUCKET_NAME, sync_interval=sync_interval, include_types=config.GCP_FILE_TYPES_TO_SAVE, ) elif mode in {"local", "local_singularity", "slurm_singularity", "sss", "htp"}: # To save directly to local files, skip mounting output_mount = mount.MountLocal( local_dir=base_log_dir, mount_point=None, output=True, ) elif mode == "local_docker": output_mount = mount.MountLocal( local_dir=base_log_dir, mount_point=config.OUTPUT_DIR_FOR_DOODAD_TARGET, output=True, ) elif mode == "ssh": output_mount = mount.MountLocal( local_dir=base_log_dir, mount_point=config.OUTPUT_DIR_FOR_DOODAD_TARGET, output=True, ) else: raise NotImplementedError("Mode not supported: {}".format(mode)) mounts.append(output_mount) return mounts