def rsync(config_file, source, target, override_cluster_name, down): """Rsyncs files. Arguments: config_file: path to the cluster yaml source: source dir target: target dir override_cluster_name: set the name of the cluster down: whether we're syncing remote -> local """ assert bool(source) == bool(target), ( "Must either provide both or neither source and target.") config = yaml.safe_load(open(config_file).read()) if override_cluster_name is not None: config["cluster_name"] = override_cluster_name config = _bootstrap_config(config) head_node = _get_head_node(config, config_file, override_cluster_name, create_if_needed=False) provider = get_node_provider(config["provider"], config["cluster_name"]) try: updater = NodeUpdaterThread( node_id=head_node, provider_config=config["provider"], provider=provider, auth_config=config["auth"], cluster_name=config["cluster_name"], file_mounts=config["file_mounts"], initialization_commands=[], setup_commands=[], ray_start_commands=[], runtime_hash="", ) if down: rsync = updater.rsync_down else: rsync = updater.rsync_up if source and target: rsync(source, target) else: updater.sync_file_mounts(rsync) finally: provider.cleanup()
def rsync(config_file, source, target, override_cluster_name, down, all_nodes=False): """Rsyncs files. Arguments: config_file: path to the cluster yaml source: source dir target: target dir override_cluster_name: set the name of the cluster down: whether we're syncing remote -> local all_nodes: whether to sync worker nodes in addition to the head node """ assert bool(source) == bool(target), ( "Must either provide both or neither source and target.") config = yaml.safe_load(open(config_file).read()) if override_cluster_name is not None: config["cluster_name"] = override_cluster_name config = _bootstrap_config(config) provider = get_node_provider(config["provider"], config["cluster_name"]) try: nodes = [] if all_nodes: # technically we re-open the provider for no reason # in get_worker_nodes but it's cleaner this way # and _get_head_node does this too nodes = _get_worker_nodes(config, override_cluster_name) nodes += [ _get_head_node( config, config_file, override_cluster_name, create_if_needed=False) ] for node_id in nodes: updater = NodeUpdaterThread( node_id=node_id, provider_config=config["provider"], provider=provider, auth_config=config["auth"], cluster_name=config["cluster_name"], file_mounts=config["file_mounts"], initialization_commands=[], setup_commands=[], ray_start_commands=[], runtime_hash="", ) if down: rsync = updater.rsync_down else: rsync = updater.rsync_up if source and target: rsync(source, target) else: updater.sync_file_mounts(rsync) finally: provider.cleanup()
def rsync(config_file: str, source: Optional[str], target: Optional[str], override_cluster_name: Optional[str], down: bool, no_config_cache: bool = False, all_nodes: bool = False): """Rsyncs files. Arguments: config_file: path to the cluster yaml source: source dir target: target dir override_cluster_name: set the name of the cluster down: whether we're syncing remote -> local all_nodes: whether to sync worker nodes in addition to the head node """ if bool(source) != bool(target): cli_logger.abort( "Expected either both a source and a target, or neither.") assert bool(source) == bool(target), ( "Must either provide both or neither source and target.") config = yaml.safe_load(open(config_file).read()) if override_cluster_name is not None: config["cluster_name"] = override_cluster_name config = _bootstrap_config(config, no_config_cache=no_config_cache) is_file_mount = False for remote_mount in config.get("file_mounts", {}).keys(): if remote_mount in (source if down else target): is_file_mount = True break provider = get_node_provider(config["provider"], config["cluster_name"]) try: nodes = [] if all_nodes: # technically we re-open the provider for no reason # in get_worker_nodes but it's cleaner this way # and _get_head_node does this too nodes = _get_worker_nodes(config, override_cluster_name) head_node = _get_head_node(config, config_file, override_cluster_name, create_if_needed=False) nodes += [head_node] for node_id in nodes: updater = NodeUpdaterThread(node_id=node_id, provider_config=config["provider"], provider=provider, auth_config=config["auth"], cluster_name=config["cluster_name"], file_mounts=config["file_mounts"], initialization_commands=[], setup_commands=[], ray_start_commands=[], runtime_hash="", file_mounts_contents_hash="", is_head_node=(node_id == head_node), docker_config=config.get("docker")) if down: rsync = updater.rsync_down else: rsync = updater.rsync_up if source and target: # print rsync progress for single file rsync cmd_output_util.set_output_redirected(False) set_rsync_silent(False) rsync(source, target, is_file_mount) else: updater.sync_file_mounts(rsync) finally: provider.cleanup()