def minion_accept(environment, group='hosts'): ''' Accept available keys for all minions in a specific environment and of a specific type CLI Example: .. code-block:: bash salt-run alchemy.minion_accept dev salt-run alchemy.minion_accept dev containers salt-run alchemy.minion_accept dev hosts :param environment: The pillar environment to act on :param group: The group to act on :return: A dict with the minions affected ''' wheel = salt.wheel.Wheel(__opts__) nodes = cpillar.fake_pillar(None, environment, '1nc', __opts__).get(group, {}) minions = [] for node, nodedata in nodes.iteritems(): minions.append(nodedata.get('fqdn')) return wheel.call_func('key.accept_dict', match={'minions_pre': minions})
def gen_dhcp_all(top="1nc", test=False): ''' Call gen_dhcp for all environments defined in the environments pillar CLI Example: .. code-block:: bash salt-run nodes.gen_dhcp_all :param top: The top level domain if none is given in the pillar configuration for the environment/network :param test: If true, only return the data - do not call the states :return: If just for test, returns the data created. If run, it returns the state results ''' _pillar = cpillar.fake_pillar(None, "cde", "1nc", __opts__) envdata = _pillar.get('environments') res = {} for env in envdata['active'].keys(): res[env] = gen_dhcp(env, top, test) return res
def status(environment): ''' Print a list of all the expected minions and their state - True (up) or false CLI Example: .. code-block:: bash salt-run alchemy.status dev :param environment: Specify the pillar environment for which you want to get the list :return: A simple dict the minion id as key and the status as value ''' _pillar = cpillar.fake_pillar(None, environment, '1nc', __opts__) domain = cpillar.dig(_pillar, 'defaults:network:manage:domain', '1nc') nodes = _pillar.get('hosts', {}) nodes.update(_pillar.get('containers', {})) client = salt.client.LocalClient(__opts__['conf_file']) minions_status = {} for node, nodedata in nodes.iteritems(): minions_status[nodedata.get('fqdn')] = False minions_found = client.cmd('*.' + domain, 'test.ping', timeout=1) for minion in sorted(minions_found): minions_status[minion] = True return minions_status
def look(environment, pillar=""): ''' Lookup the pillar for a specific pillar environment CLI Example: .. code-block:: bash salt-run test.look dev defaults:network :param environment: The name of the pillar environment to get :param pillar: The pillar path to return in colon notation :return: The pillar dict found or none ''' _pillar = cpillar.fake_pillar(None, environment, '1nc', __opts__) leaf = cpillar.dig(_pillar, pillar) res = leaf tree = pillar.split(':') for branch in tree: res = {branch: res} return res
def ipillar(domain, saltenv=None, top="1nc"): _pillar = cpillar.fake_pillar(domain, saltenv, top, __opts__) res = {} defaults = _pillar.get('defaults', {}) nodes = _pillar.get('hosts', {}) nodes.update(_pillar.get('containers', {})) for id, node in nodes.items(): ip = node.get('ip4') active = node.get('active', True) mole = node.get('mole', defaults.get('monitor:mole', 'hosts')) cpillar.gen_networks(res, id, ip, active, mole, defaults.get('hosts').get('network'), node.get('network')) return res
def hosts_destroy(environment, name=None): ''' Destroy a single host or all hosts in a salt environment. This includes completely erasing their data - definition and historical - from the MaaS database. CLI Example: .. code-block:: bash salt-run alchemy.hosts_destroy dev salt-run alchemy.hosts_destroy dev node-01.dev.1nc :param environment: Name of the pillar environment to work on :param name: Optional name of a known host in the working environment :return: The data structure returned by the client call ''' params = {} res = {} if name is not None: params = 'name={}'.format(name) glob = name else: _pillar = cpillar.fake_pillar(None, environment, '1nc', __opts__) glob = '*.{}'.format( cpillar.dig(_pillar, 'defaults:network:manage:domain')) zone = cpillar.dig(_pillar, 'defaults:maas:zone') params = 'name={}'.format(zone) # delete all matched keys - it is either name or all _get_wheel().call_func('key.delete', match=glob) opt = {'timeout': 1, 'expr_form': 'compound'} target = 'G@roles:maas and *.cde.1nc' client = salt.client.LocalClient(__opts__['conf_file'], __opts__) res['release'] = client.cmd(target, 'maas.node_release', [params], **opt) res['delete'] = client.cmd(target, 'maas.node_delete', [params], **opt) return res
def masterlist(environment, name=None, target=None, role=None, active_only=True): import cpillar callerrole = __opts__.get('__role', None) if callerrole is None or callerrole == 'minion': log.error("Masterlist called with callerrole %s!", callerrole) raise CommandExecutionError( 'Func masterlist not usable in minion context! Use list instead!') log.warning("Masterlist called with callerrole %s!", callerrole) _pillar = cpillar.fake_pillar(None, environment, "1nc", __opts__) container_pillar = cpillar.dig(_pillar, 'containers') return _getcontainerlist(name, target, role, active_only, container_pillar)
def gen_dns(environment, top="1nc", test=False): ''' Function that creates the dns zone data for an environment and passes it to the corresponding state on the dns servers. The state will create the zone files and include them into the server coniguration CLI Example: .. code-block:: bash salt-run nodes.gen_dns dev :param environment: The environment you want the dns zone files be created for :param top: The top level domain if none is given in the pillar configuration for the environment/network :param test: If true, only return the data - do not call the states :return: If just for test, returns the data created. If run, it returns the state results ''' _pillar = cpillar.fake_pillar(None, environment, top, __opts__) _nodes = _get_nodes(_pillar) # ensure that we have the same soa for all changed files # soa = {'serial': time.strftime('%Y%m%d%H%M%S')} serial = time.strftime('%s') # add static dns records first _dns_zone_default = _get_static_dns_records( cpillar.dig(_pillar, "defaults:dns:records", {})).get('default') _zones = _get_static_dns_records( cpillar.dig(_pillar, "defaults:dns:records", {}), False, _dns_zone_default) _soa = {} for id, node in _nodes.iteritems(): # outer loop: iterate all networks for network, netdata in node.get('network', {}).iteritems(): # dont handle that network at all if no domain is set ... if 'domain' not in netdata: continue domain = netdata.get('domain') arpa = _get_arpa(netdata.get('ip4')) if domain not in _zones: _zones[domain] = list(_dns_zone_default) _zones[domain].append('{:20} IN {:10} {}'.format( id, "A", netdata.get('ip4'))) for cname in netdata.get('cnames', []): _zones[domain].append('{:20} IN {:10} {}'.format( cname, "CNAME", id)) if arpa not in _zones: _zones[arpa] = list(_dns_zone_default) _zones[arpa].append('{:<20} IN {:10} {}.'.format( node.get('ip4'), "PTR", netdata.get('fqdn'))) for zone in _zones.keys(): if zone.endswith('arpa'): _soa[zone] = {'type': 'arpa', 'serial': serial} else: _soa[zone] = {'type': 'zone', 'serial': serial} _statepillar = {"zones": _zones, "soa": _soa} if test: return _statepillar client = salt.client.LocalClient(__opts__['conf_file'], __opts__) opt = {'expr_form': 'compound'} _t = 'micros-a?.cde.1nc and G@roles:dns' staterun = client.cmd(_t, 'state.sls', ['bind.zones'], kwarg={'pillar': _statepillar}, **opt) return staterun
def gen_dhcp(environment, top="1nc", test=False): ''' Function that creates the dhcp subnet data for an environment and passes it to the corresponding state on the dhcp servers. The state will create the subnet files and include them into the server configuration. CLI Example: .. code-block:: bash salt-run nodes.gen_dhcp dev :param environment: The environment you want the dhcp subnet files be created for :param top: The top level domain if none is given in the pillar configuration for the environment/network :param test: If true, only return the data - do not call the states :return: If just for test, returns the data created. If run, it returns the state results ''' _pillar = cpillar.fake_pillar(None, environment, top, __opts__) _defaults = _pillar.get('defaults', {}) _nodes = _pillar.get("hosts", {}) _network = cpillar.dig(_pillar, "defaults:network:manage", {}) _schema = cpillar.dig(_pillar, "defaults:network:schema", None) nameservers = ", ".join( cpillar.dig(_defaults, "dns:servers", ['127.10.10.1'])) if _schema == 'racked': _ipnetwork = _network.get('ip4net', { 'rack01': '127.10.10.{0}/24' }).get('rack01').format('0') else: _ipnetwork = _network.get('ip4net', '127.10.10.{0}/24').format('0') _subnet = {} _subnet['group'] = environment _subnet['address'] = ipaddr.IPv4Network(_ipnetwork).network.__str__() _subnet['netmask'] = ipaddr.IPv4Network(_ipnetwork).netmask.__str__() _subnet['interface'] = _network.get('phys', 'eth0') # TODO: define currently hardcoded options in config or deduct from config _options = {} _options["subnet-mask"] = _subnet['netmask'] _options["broadcast-address"] = ipaddr.IPv4Network( _ipnetwork).broadcast.__str__() _options["domain-name-servers"] = nameservers _options["ntp-servers"] = ", ".join( cpillar.dig(_defaults, "ntp-servers:internal", { 'localhost': '127.10.10.1' }).values()) _options["domain-search"] = '"' + _network.get("domain", "1nc") + '"' _options["domain-name"] = '"' + _network.get("domain", "1nc") + '"' _options["routers"] = _network.get('gateway', '127.10.10.1') # host loop _hosts = [] for id, node in _nodes.iteritems(): # if the node does not have the network we need, hopp over if "manage" not in node['network']: continue _host = {} _host['name'] = id _host['ip'] = cpillar.dig(node, "network:manage:ip4", "") _host['fqdn'] = cpillar.dig(node, "network:manage:fqdn", "") _host['mac'] = cpillar.dig(node, "network:manage:mac", "") _hosts.append(_host) _hosts.sort(key=itemgetter('ip')) _statepillar = { 'dhcpd': { 'subnet': _subnet, 'options': _options, 'hosts': _hosts } } if test: return _statepillar client = salt.client.LocalClient(__opts__['conf_file'], __opts__) _t = 'micros-a?.cde.1nc and G@roles:dhcp' staterun = client.cmd(_t, 'state.sls', ['dhcpd.subnet'], expr_form="compound", kwarg={'pillar': _statepillar}) return staterun
def enlist(environment, test=False, force=False, execute=True, commission=True): ''' Create a bash script on the maas server that does the enlisting of all machines belonging to the named salt environment. :param environment: Salt pillar environment that we want to enlist the machines for :param test: Dry run - only tell what would be done :param force: If a node exists already, force it to be deleted and recreated :param execute: Execute the script once it has been generated :param commission: Start commissioning of the machines once added :return: A state result dict ''' _pillar = cpillar.fake_pillar(None, environment, '1nc', __opts__) defaults = _pillar.get('defaults', {}) maas = defaults.get('maas', {}) nodes = _pillar.get('hosts', {}) cluster = maas.get('cluster', 'cde') domain = cpillar.dig(defaults, 'network:manage:domain', 'localnet') res = [] for id, node in nodes.items(): _node = {} _node['name'] = cpillar.dig(node, 'network:manage:fqdn') _node['mac'] = cpillar.dig(node, 'network:manage:mac') _node['arch'] = defaults.get('arch', 'amd64') _node['sub'] = maas.get('sub', '') _node['powertype'] = maas.get('powertype', 'ipmi') if _node['powertype'] == 'virsh': _node['poweraddress'] = cpillar.dig(node, 'network:console:poweraddress', cpillar.dig(node, 'network:console:ip4')) else: _node['poweraddress'] = cpillar.dig(node, 'network:console:ip4') _node['powerpass'] = maas.get('powerpass', '') _node['powerid'] = id _node['partitions'] = node.get('partitions', '') _node['zone'] = maas.get('zone', '') _node['maas'] = node.get('no_maas', False) is False res.append(_node) _state_pillar = { 'enlist': {'nodes': res, 'domain': domain, 'cluster': cluster, 'commission': commission, 'execute': execute, 'force_nodes': force}} if test: return _state_pillar target = 'maas-a1.cde.1nc' client = salt.client.LocalClient(__opts__['conf_file'], __opts__) staterun = client.cmd(target, 'state.sls', ['maas.enlist'], kwarg={'pillar': _state_pillar}) ret = {'data': {target: staterun}} ret['data']['retcode'] = 0 if salt.utils.check_state_result(ret['data']) else 1 return ret
def hosts_deploy(environment, nodes=None): ''' Deploy a selected OS to all hosts of a named salt envrionment. This creates a public and a private key for the minion on the master, stores the public key in the masters accepted keys folder and sends both keys to the new host. As the private key is only stored on the minion itself, the master has no way to restore it if the host needs to be redeployed. Thus a new keypair will be created each time this function is run. CLI Example: .. code-block:: bash salt-run alchemy.hosts_deploy dev salt-run alchemy.hosts_deploy dev node-01.dev.1nc :param environment: Name of the salt pillar environment :param nodes: Optional name of a known host in the working environment :return: The data structure returned by the client call ''' import time wheel = _get_wheel() _pillar = cpillar.fake_pillar(None, environment, '1nc', __opts__) _nodes = _pillar.get('hosts', {}) _zone = cpillar.dig(_pillar, 'defaults:maas:zone') opt = {'timeout': 1, 'expr_form': 'compound'} target = 'G@roles:maas and *.cde.1nc' client = salt.client.LocalClient(__opts__['conf_file'], __opts__) if nodes is not None: nodes = nodes.split(',') res = {} for nid, node in _nodes.iteritems(): fqdn = node.get('fqdn') if nodes is not None and fqdn not in nodes: log.debug("Skipping node %s", fqdn) continue log.info("Deploying node %s", nid) keys = wheel.call_func('key.gen_accept', args=[fqdn], kwargs={'force': True}) userdata = _minion_key_wrapper(keys) actions = {} actions['acquired'] = client.cmd(target, 'maas.node_acquire', [fqdn], **opt) time.sleep(5) actions['deploy'] = client.cmd(target, 'maas.node_power_on', [fqdn, userdata], **opt) res[nid] = actions return res
def list_nodes(environment, groups="hosts", top="1nc"): ''' Function that lists out all nodes in an environment - for manual configuration checks CLI Example: .. code-block:: bash salt-run test.list_nodes dev salt-run test.list_nodes dev containers :param environment: The saltenv you want the dns zone files be created for :param top: The top level domain if none is given in the pillar configuration for the environment/network :return: If just for test, returns the data created. If run, it returns the state results ''' _pillar = cpillar.fake_pillar(None, environment, top, __opts__) _networks = cpillar.dig(_pillar, "defaults:network", {}) _ignorelist = ['common', 'schema'] _nodes = {} groups = groups.split(',') for group in groups: partial = cpillar.dig(_pillar, group, {}) _nodes.update(partial) snodelist = sorted(_nodes.keys()) headers = ['id', 'nature', 'ip', 'target', 'rackid'] for network in _networks.keys(): if network in _ignorelist: continue headers.append(network + ':fqdn') headers.append(network + ':ip4') headers.append(network + ':mac') _infonodes = [] for id in snodelist: node = _nodes.get(id, {}) if node.get('nature') == 'hosts': node['target'] = cpillar.dig(node, 'network:manage:fqdn', id) _nodenetworks = [ id, node.get('nature', ''), node.get('ip4').__str__(), node.get('target', ''), node.get('rackid', '') ] for network in _networks.keys(): if network in _ignorelist: continue _ndata = node.get('network', {}).get(network, {}) _nodenetworks.append(_ndata.get('fqdn', '')) _nodenetworks.append(_ndata.get('ip4', '')) _nodenetworks.append(_ndata.get('mac', '')) _infonodes.append(','.join(_nodenetworks)) _infonodes.insert(0, ','.join(headers)) return '\n'.join(_infonodes)