def all_clients(task: Callable, *args, **kwargs): """Execute the decorated function on all remote client machines.""" helper = args[0] hosts = helper.cluster_spec.workers execute(parallel(task), *args, hosts=hosts, **kwargs)
def all_servers(task, *args, **kwargs): """Execute the decorated function on all remote server nodes.""" helper = args[0] hosts = helper.cluster_spec.servers return execute(parallel(task), *args, hosts=hosts, **kwargs)
def all_clients(task: Callable, *args, **kwargs): """Execute the decorated function on all remote client machines.""" helper = args[0] hosts = helper.cluster_spec.workers return execute(parallel(task), *args, hosts=hosts, **kwargs)
def wrapper(task, *args, **kwargs): helper = args[0] hosts = [] for role in roles: hosts += helper.cluster_spec.servers_by_role(role=role) execute(parallel(task), *args, hosts=hosts, **kwargs)
def cmd(stackname, command=None, username=DEPLOY_USER): if command is None: abort("Please specify a command e.g. ./bldr cmd:%s,ls" % stackname) LOG.info("Connecting to: %s", stackname) stack_all_ec2_nodes(stackname, (parallel(run), { 'command': command }), username=username, abort_on_prompts=True)
def _parallel_task(task, *args, **kargs): self = args[0] if server_side: hosts = self.hosts else: hosts = self.workers with settings(user=self.user, password=self.password, warn_only=True): with hide("running", "output"): return execute(parallel(task), *args, hosts=hosts, **kargs)
def update_ec2_stack(stackname): """installs/updates the ec2 instance attached to the specified stackname. Once AWS has finished creating an EC2 instance for us, we need to install Salt and get it talking to the master server. Salt comes with a bootstrap script that can be downloaded from the web and then very conveniently installs it's own dependencies. Once Salt is installed we give it an ID (the given `stackname`), the address of the master server """ pdata = project_data_for_stackname(stackname) if not pdata['aws']['ec2']: return region = pdata['aws']['region'] is_master = core.is_master_server_stack(stackname) def _update_ec2_node(): # upload private key if not present remotely if not files.exists("/root/.ssh/id_rsa", use_sudo=True): # if it also doesn't exist on the filesystem, die horribly. # regular updates shouldn't have to deal with this. pem = stack_pem(stackname, die_if_doesnt_exist=True) put(pem, "/root/.ssh/id_rsa", use_sudo=True) # write out environment config so Salt can read CFN outputs write_environment_info(stackname) salt_version = pdata['salt'] install_master_flag = str(is_master).lower() # ll: 'true' master_ip = master(region, 'private_ip_address') # TODO: this is a little gnarly. I think I'd prefer this logic in the script: # if [ cat /etc/build-vars.json | grep 'nodename' ]; then ... fi # it will do for now, though. build_vars = bvars.read_from_current_host() if 'nodename' in build_vars: minion_id = build_vars['nodename'] else: minion_id = stackname run_script('bootstrap.sh', salt_version, minion_id, install_master_flag, master_ip) # /TODO if is_master: builder_private_repo = pdata['private-repo'] run_script('init-master.sh', stackname, builder_private_repo) run_script('update-master.sh', stackname, builder_private_repo) # this will tell the machine to update itself run_script('highstate.sh') stack_all_ec2_nodes(stackname, parallel(_update_ec2_node), username=BOOTSTRAP_USER)
def run_fab_task(task_fn, hosts, username, password, parallel_pool_size=1): from fabric.api import execute, env, parallel if env.ssh_config_path and os.path.isfile(os.path.expanduser(env.ssh_config_path)): env.use_ssh_config = True env.forward_agent = True # pass `-E` to sudo to preserve environment for ssh agent forwarding env.sudo_prefix = "sudo -SE -p '%(sudo_prompt)s' " env.user = username env.password = password env.hosts = hosts env.warn_only = True task = task_fn if parallel_pool_size > 1: task = parallel(pool_size=parallel_pool_size)(task_fn) res = execute(task) return res
def run_command(self, hosts, parallel_pool_size=1): from fabric.api import execute, sudo, env, parallel if env.ssh_config_path and os.path.isfile(os.path.expanduser(env.ssh_config_path)): env.use_ssh_config = True env.forward_agent = True # pass `-E` to sudo to preserve environment for ssh agent forwarding env.sudo_prefix = "sudo -SE -p '%(sudo_prompt)s' " env.user = self.user_name env.password = self.password env.hosts = hosts env.warn_only = True def _task(): result = sudo(self.command, user=self.user_as) return result task = _task if parallel_pool_size > 1: task = parallel(pool_size=parallel_pool_size)(_task) res = execute(task) return res
def all_gateloads(task, *args, **kargs): self = args[0] return execute(parallel(task), *args, hosts=self.gateloads, **kargs)
def all_clients(task, *args, **kargs): self = args[0] return execute(parallel(task), *args, hosts=self.cluster_spec.workers, **kargs)
def all_hosts(task, *args, **kargs): self = args[0] return execute(parallel(task), *args, hosts=self.hosts, **kargs)
def all_kv_nodes(task, *args, **kargs): self = args[0] self.host_index = 0 return execute(parallel(task), *args, hosts=self.kv_hosts, **kargs)
def all_gateways(task, *args, **kargs): self = args[0] return execute(parallel(task), *args, hosts=self.cluster_spec.gateways, **kargs)
def parallel_work(single_node_work, params): with settings(**params): return execute(parallel(single_node_work), hosts=params['public_ips'].values())
def all_servers(task, *args, **kwargs): hosts = ip_list_public_dns return execute(parallel(task), *args, hosts=hosts, **kwargs)
def multi_node_task(task, *args, **kargs): self = args[0] with settings(user=self.user, password=self.password, warn_only=True): with hide("running", "output"): return execute(parallel(task), *args, hosts=self.hosts, **kargs)
def parallel_work(single_node_work, params): with settings(**params): return execute(parallel(single_node_work), hosts=list(params['public_ips'].values()))
def all_gateways(task, *args, **kargs): self = args[0] return execute( parallel(task), *args, hosts=self.cluster_spec.gateways, **kargs )
def all_servers(task, *args, **kwargs): test = args[0] hosts = test.ip_list_public_dns return execute(parallel(task), *args, hosts=hosts, **kwargs)