def add_nodes(nodes, tag): return [ Node(node, tag, default_port=int(config.hacky_default_get('ssh_port', 22))) for node in nodes ]
def test_tags_async(sshd_manager, loop): with sshd_manager.run(1) as sshd_ports: workspace = str(sshd_manager.tmpdir) host_ports = ['127.0.0.1:{}'.format(port) for port in sshd_ports] targets = [] for _port in sshd_ports: _host = Node('127.0.0.1:{}'.format(_port), { 'tag1': 'test1', 'tag2': 'test2' }) targets.append(_host) runner = MultiRunner(targets, user=getpass.getuser(), key_path=workspace + '/host_key') chain = CommandChain('test') chain.add_execute(['sleep', '1']) try: loop.run_until_complete( runner.run_commands_chain_async([chain], block=True, state_json_dir=workspace)) finally: loop.close() with open(workspace + '/test.json') as fh: result_json = json.load(fh) for host_port in host_ports: assert 'tags' in result_json['hosts'][host_port] assert len(result_json['hosts'][host_port]['tags']) == 2 assert result_json['hosts'][host_port]['tags'][ 'tag1'] == 'test1' assert result_json['hosts'][host_port]['tags'][ 'tag2'] == 'test2' assert result_json['hosts'][host_port]['commands'][0][ 'cmd'] == [ "/usr/bin/ssh", "-oConnectTimeout=10", "-oStrictHostKeyChecking=no", "-oUserKnownHostsFile=/dev/null", "-oBatchMode=yes", "-oPasswordAuthentication=no", "-p{}".format(sshd_ports[0]), "-i", "{}/host_key".format(workspace), "-tt", "{}@127.0.0.1".format(getpass.getuser()), "sleep", "1" ]
def action_action_name(request): """Return /action/<action_name> :param request: a web requeest object. :type request: request | None """ global current_action action_name = request.match_info['action_name'] # Update the global action json_state = read_json_state(action_name) current_action = action_name if request.method == 'GET': log.info('GET {}'.format(action_name)) if json_state: return web.json_response(json_state) return web.json_response({}) elif request.method == 'POST': log.info('POST {}'.format(action_name)) action = action_map.get(action_name) # If the action name is preflight, attempt to run configuration # generation. If genconf fails, present the UI with a usable error # for the end-user if action_name == 'preflight': try: log.warning("GENERATING CONFIGURATION") backend.do_configure() except: genconf_failure = { "errors": "Configuration generation failed, please see command line for details" } return web.json_response(genconf_failure, status=400) params = yield from request.post() if json_state: if action_name == 'deploy' and 'retry' in params: if 'hosts' in json_state: failed_hosts = [] for deploy_host, deploy_params in json_state[ 'hosts'].items(): if deploy_params['host_status'] != 'success': failed_hosts.append( Node(deploy_host, tags=deploy_params['tags'], default_port=int( Config(CONFIG_PATH).hacky_default_get( 'ssh_port', 22)))) log.debug('failed hosts: {}'.format(failed_hosts)) if failed_hosts: yield from asyncio. async (action( Config(CONFIG_PATH), state_json_dir=STATE_DIR, hosts=failed_hosts, try_remove_stale_dcos=True, **params)) return web.json_response({ 'status': 'retried', 'details': sorted([ '{}:{}'.format(node.ip, node.port) for node in failed_hosts ]) }) if action_name not in remove_on_done: return web.json_response({ 'status': '{} was already executed, skipping'.format(action_name) }) running = False for host, attributes in json_state['hosts'].items(): if attributes['host_status'].lower() == 'running': running = True log.debug('is action running: {}'.format(running)) if running: return web.json_response( {'status': '{} is running, skipping'.format(action_name)}) else: unlink_state_file(action_name) yield from asyncio. async (action(Config(CONFIG_PATH), state_json_dir=STATE_DIR, options=options, **params)) return web.json_response({'status': '{} started'.format(action_name)})
def add_nodes(nodes, tag): return [Node(node, tag) for node in nodes]
def install_dcos(config, block=False, state_json_dir=None, hosts=None, async_delegate=None, try_remove_stale_dcos=False, **kwargs): if hosts is None: hosts = [] assert isinstance(hosts, list) # Role specific parameters role_params = { 'master': { 'tags': {'role': 'master', 'dcos_install_param': 'master'}, 'hosts': config['master_list'] }, 'agent': { 'tags': {'role': 'agent', 'dcos_install_param': 'slave'}, 'hosts': config.hacky_default_get('agent_list', []) }, 'public_agent': { 'tags': {'role': 'public_agent', 'dcos_install_param': 'slave_public'}, 'hosts': config.hacky_default_get('public_agent_list', []) } } bootstrap_tarball = _get_bootstrap_tarball() log.debug("Local bootstrap found: %s", bootstrap_tarball) targets = [] if hosts: targets = hosts else: for role, params in role_params.items(): targets += [Node(node, params['tags']) for node in params['hosts']] runner = get_async_runner(config, targets, async_delegate=async_delegate) chains = [] if try_remove_stale_dcos: pkgpanda_uninstall_chain = ssh.utils.CommandChain('remove_stale_dcos') pkgpanda_uninstall_chain.add_execute(['sudo', '-i', '/opt/mesosphere/bin/pkgpanda', 'uninstall'], stage='Trying pkgpanda uninstall') chains.append(pkgpanda_uninstall_chain) remove_dcos_chain = ssh.utils.CommandChain('remove_stale_dcos') remove_dcos_chain.add_execute(['rm', '-rf', '/opt/mesosphere', '/etc/mesosphere'], stage="Removing DC/OS files") chains.append(remove_dcos_chain) chain = ssh.utils.CommandChain('deploy') chains.append(chain) add_pre_action(chain, runner.user) _add_copy_dcos_install(chain) _add_copy_packages(chain) _add_copy_bootstap(chain, bootstrap_tarball) chain.add_execute( lambda node: ( 'sudo bash {}/dcos_install.sh {}'.format(REMOTE_TEMP_DIR, node.tags['dcos_install_param'])).split(), stage=lambda node: 'Installing DC/OS' ) # UI expects total_masters, total_agents to be top level keys in deploy.json delegate_extra_params = nodes_count_by_type(config) if kwargs.get('retry') and state_json_dir: state_file_path = os.path.join(state_json_dir, 'deploy.json') log.debug('retry executed for a state file deploy.json') for _host in hosts: _remove_host(state_file_path, '{}:{}'.format(_host.ip, _host.port)) # We also need to update total number of hosts json_state = _read_state_file(state_file_path) delegate_extra_params['total_hosts'] = json_state['total_hosts'] # Setup the cleanup chain cleanup_chain = ssh.utils.CommandChain('deploy_cleanup') add_post_action(cleanup_chain) chains.append(cleanup_chain) result = yield from runner.run_commands_chain_async(chains, block=block, state_json_dir=state_json_dir, delegate_extra_params=delegate_extra_params) return result