Example #1
0
def status(environment):
    '''
  Print a list of all the expected minions and their state - True (up) or false

  CLI Example:

  .. code-block:: bash

      salt-run alchemy.status dev

  :param environment:
    Specify the pillar environment for which you want to get the list

  :return:
    A simple dict the minion id as key and the status as value
  '''

    _pillar = cpillar.fake_pillar(None, environment, '1nc', __opts__)
    domain = cpillar.dig(_pillar, 'defaults:network:manage:domain', '1nc')
    nodes = _pillar.get('hosts', {})
    nodes.update(_pillar.get('containers', {}))
    client = salt.client.LocalClient(__opts__['conf_file'])

    minions_status = {}
    for node, nodedata in nodes.iteritems():
        minions_status[nodedata.get('fqdn')] = False

    minions_found = client.cmd('*.' + domain, 'test.ping', timeout=1)
    for minion in sorted(minions_found):
        minions_status[minion] = True

    return minions_status
Example #2
0
def _get_master_pillar(full_path, default=None):
    '''
  This function instantiates a masterminion and compiles its
  pillar data. Returns either the pillar or parts of it.

  Remember pillar is compiled based on the pillar top file. So
  you might not get the full pillar back - the restriction is that
  the salt id of the master will be fqdn.of.host_master. Your
  targets in the top file need to match that!

  :param full_path:
    Path to the specific pillar item you want to be returned

  :param default:
    What to return if the data at path is not found

  :return:
    Returns a dict with the requested pillar data
  '''

    opts = salt.config.master_config('/etc/salt/master')
    opts['quiet'] = True
    opts['file_client'] = 'local'

    minion = salt.minion.MasterMinion(opts)
    _pillar = minion.functions['pillar.items']()

    data = cpillar.dig(_pillar, full_path, default)

    # Channel through json to get clean dict instead of odict
    import json
    data_clean = json.loads(json.dumps(data))

    return data_clean
Example #3
0
def look(environment, pillar=""):
    '''
  Lookup the pillar for a specific pillar environment

  CLI Example:

  .. code-block:: bash

      salt-run test.look dev defaults:network

  :param environment:
    The name of the pillar environment to get

  :param pillar:
    The pillar path to return in colon notation

  :return:
    The pillar dict found or none
  '''
    _pillar = cpillar.fake_pillar(None, environment, '1nc', __opts__)
    leaf = cpillar.dig(_pillar, pillar)

    res = leaf
    tree = pillar.split(':')

    for branch in tree:
        res = {branch: res}

    return res
Example #4
0
def hosts_destroy(environment, name=None):
    '''
  Destroy a single host or all hosts in a salt environment. This includes completely erasing
  their data - definition and historical - from the MaaS database.

  CLI Example:

  .. code-block:: bash

      salt-run alchemy.hosts_destroy dev
      salt-run alchemy.hosts_destroy dev node-01.dev.1nc

  :param environment:
    Name of the pillar environment to work on

  :param name:
    Optional name of a known host in the working environment

  :return:
    The data structure returned by the client call
  '''

    params = {}
    res = {}

    if name is not None:
        params = 'name={}'.format(name)
        glob = name
    else:
        _pillar = cpillar.fake_pillar(None, environment, '1nc', __opts__)
        glob = '*.{}'.format(
            cpillar.dig(_pillar, 'defaults:network:manage:domain'))
        zone = cpillar.dig(_pillar, 'defaults:maas:zone')
        params = 'name={}'.format(zone)

    # delete all matched keys - it is either name or all
    _get_wheel().call_func('key.delete', match=glob)

    opt = {'timeout': 1, 'expr_form': 'compound'}
    target = 'G@roles:maas and *.cde.1nc'

    client = salt.client.LocalClient(__opts__['conf_file'], __opts__)
    res['release'] = client.cmd(target, 'maas.node_release', [params], **opt)
    res['delete'] = client.cmd(target, 'maas.node_delete', [params], **opt)
    return res
Example #5
0
def _merge_defaults_for_group(defaults, group):
    '''
  Merge top level defaults into a groups defaults. Currently this will merge defaults:network
  into defaults:[group], but may be extended to do more in the future

  :param defaults:
    The full dict of pillar:defaults

  :param group:
    The name of the group as string

  :return:
    The dict for pillar:defaults:[group] with defaults:[xxx] merged into it

  '''
    # what we have in our tummy - the original defaults for the given group
    group_defaults = cpillar.dig(defaults, group, {})

    # the real network defaults
    master_defaults = {"network": cpillar.dig(defaults, "network", {})}

    # Merge but prioritise group values over default values
    return udict.merge_recurse(master_defaults, group_defaults)
Example #6
0
def masterlist(environment,
               name=None,
               target=None,
               role=None,
               active_only=True):
    import cpillar

    callerrole = __opts__.get('__role', None)
    if callerrole is None or callerrole == 'minion':
        log.error("Masterlist called with callerrole %s!", callerrole)
        raise CommandExecutionError(
            'Func masterlist not usable in minion context! Use list instead!')

    log.warning("Masterlist called with callerrole %s!", callerrole)

    _pillar = cpillar.fake_pillar(None, environment, "1nc", __opts__)
    container_pillar = cpillar.dig(_pillar, 'containers')

    return _getcontainerlist(name, target, role, active_only, container_pillar)
Example #7
0
def gen_dns(environment, top="1nc", test=False):
    '''
  Function that creates the dns zone data for an environment and passes it to the corresponding state
  on the dns servers. The state will create the zone files and include them into the server coniguration

  CLI Example:

  .. code-block:: bash

      salt-run nodes.gen_dns dev

  :param environment:
    The environment you want the dns zone files be created for

  :param top:
    The top level domain if none is given in the pillar configuration for the environment/network

  :param test:
    If true, only return the data - do not call the states

  :return:
    If just for test, returns the data created. If run, it returns the state results

  '''
    _pillar = cpillar.fake_pillar(None, environment, top, __opts__)
    _nodes = _get_nodes(_pillar)

    # ensure that we have the same soa for all changed files
    # soa = {'serial': time.strftime('%Y%m%d%H%M%S')}
    serial = time.strftime('%s')

    # add static dns records first
    _dns_zone_default = _get_static_dns_records(
        cpillar.dig(_pillar, "defaults:dns:records", {})).get('default')
    _zones = _get_static_dns_records(
        cpillar.dig(_pillar, "defaults:dns:records", {}), False,
        _dns_zone_default)

    _soa = {}
    for id, node in _nodes.iteritems():

        # outer loop: iterate all networks
        for network, netdata in node.get('network', {}).iteritems():

            # dont handle that network at all if no domain is set ...
            if 'domain' not in netdata:
                continue

            domain = netdata.get('domain')
            arpa = _get_arpa(netdata.get('ip4'))

            if domain not in _zones:
                _zones[domain] = list(_dns_zone_default)

            _zones[domain].append('{:20} IN  {:10} {}'.format(
                id, "A", netdata.get('ip4')))

            for cname in netdata.get('cnames', []):
                _zones[domain].append('{:20} IN  {:10} {}'.format(
                    cname, "CNAME", id))

            if arpa not in _zones:
                _zones[arpa] = list(_dns_zone_default)

            _zones[arpa].append('{:<20} IN  {:10} {}.'.format(
                node.get('ip4'), "PTR", netdata.get('fqdn')))

    for zone in _zones.keys():
        if zone.endswith('arpa'):
            _soa[zone] = {'type': 'arpa', 'serial': serial}
        else:
            _soa[zone] = {'type': 'zone', 'serial': serial}

    _statepillar = {"zones": _zones, "soa": _soa}

    if test:
        return _statepillar

    client = salt.client.LocalClient(__opts__['conf_file'], __opts__)
    opt = {'expr_form': 'compound'}
    _t = 'micros-a?.cde.1nc and G@roles:dns'
    staterun = client.cmd(_t,
                          'state.sls', ['bind.zones'],
                          kwarg={'pillar': _statepillar},
                          **opt)

    return staterun
Example #8
0
def gen_dhcp(environment, top="1nc", test=False):
    '''
  Function that creates the dhcp subnet data for an environment and passes it to the corresponding state
  on the dhcp servers. The state will create the subnet files and include them into the server configuration.

  CLI Example:

  .. code-block:: bash

      salt-run nodes.gen_dhcp dev

  :param environment:
    The environment you want the dhcp subnet files be created for

  :param top:
    The top level domain if none is given in the pillar configuration for the environment/network

  :param test:
    If true, only return the data - do not call the states

  :return:
    If just for test, returns the data created. If run, it returns the state results
  '''

    _pillar = cpillar.fake_pillar(None, environment, top, __opts__)
    _defaults = _pillar.get('defaults', {})
    _nodes = _pillar.get("hosts", {})
    _network = cpillar.dig(_pillar, "defaults:network:manage", {})
    _schema = cpillar.dig(_pillar, "defaults:network:schema", None)

    nameservers = ", ".join(
        cpillar.dig(_defaults, "dns:servers", ['127.10.10.1']))

    if _schema == 'racked':
        _ipnetwork = _network.get('ip4net', {
            'rack01': '127.10.10.{0}/24'
        }).get('rack01').format('0')
    else:
        _ipnetwork = _network.get('ip4net', '127.10.10.{0}/24').format('0')

    _subnet = {}
    _subnet['group'] = environment
    _subnet['address'] = ipaddr.IPv4Network(_ipnetwork).network.__str__()
    _subnet['netmask'] = ipaddr.IPv4Network(_ipnetwork).netmask.__str__()
    _subnet['interface'] = _network.get('phys', 'eth0')

    # TODO: define currently hardcoded options in config or deduct from config
    _options = {}
    _options["subnet-mask"] = _subnet['netmask']
    _options["broadcast-address"] = ipaddr.IPv4Network(
        _ipnetwork).broadcast.__str__()

    _options["domain-name-servers"] = nameservers
    _options["ntp-servers"] = ", ".join(
        cpillar.dig(_defaults, "ntp-servers:internal", {
            'localhost': '127.10.10.1'
        }).values())

    _options["domain-search"] = '"' + _network.get("domain", "1nc") + '"'
    _options["domain-name"] = '"' + _network.get("domain", "1nc") + '"'
    _options["routers"] = _network.get('gateway', '127.10.10.1')

    # host loop
    _hosts = []
    for id, node in _nodes.iteritems():

        # if the node does not have the network we need, hopp over
        if "manage" not in node['network']:
            continue

        _host = {}
        _host['name'] = id
        _host['ip'] = cpillar.dig(node, "network:manage:ip4", "")
        _host['fqdn'] = cpillar.dig(node, "network:manage:fqdn", "")
        _host['mac'] = cpillar.dig(node, "network:manage:mac", "")
        _hosts.append(_host)

    _hosts.sort(key=itemgetter('ip'))
    _statepillar = {
        'dhcpd': {
            'subnet': _subnet,
            'options': _options,
            'hosts': _hosts
        }
    }

    if test:
        return _statepillar

    client = salt.client.LocalClient(__opts__['conf_file'], __opts__)
    _t = 'micros-a?.cde.1nc and G@roles:dhcp'
    staterun = client.cmd(_t,
                          'state.sls', ['dhcpd.subnet'],
                          expr_form="compound",
                          kwarg={'pillar': _statepillar})

    return staterun
Example #9
0
def enlist(environment, test=False, force=False, execute=True, commission=True):
  '''
  Create a bash script on the maas server that does the enlisting of all machines belonging to
  the named salt environment.

  :param environment:
    Salt pillar environment that we want to enlist the machines for

  :param test:
    Dry run - only tell what would be done

  :param force:
    If a node exists already, force it to be deleted and recreated

  :param execute:
    Execute the script once it has been generated

  :param commission:
    Start commissioning of the machines once added

  :return:
    A state result dict
  '''

  _pillar = cpillar.fake_pillar(None, environment, '1nc', __opts__)
  defaults = _pillar.get('defaults', {})
  maas = defaults.get('maas', {})
  nodes = _pillar.get('hosts', {})
  cluster = maas.get('cluster', 'cde')
  domain = cpillar.dig(defaults, 'network:manage:domain', 'localnet')

  res = []
  for id, node in nodes.items():
    _node = {}
    _node['name'] = cpillar.dig(node, 'network:manage:fqdn')
    _node['mac'] = cpillar.dig(node, 'network:manage:mac')
    _node['arch'] = defaults.get('arch', 'amd64')
    _node['sub'] = maas.get('sub', '')
    _node['powertype'] = maas.get('powertype', 'ipmi')

    if _node['powertype'] == 'virsh':
      _node['poweraddress'] = cpillar.dig(node, 'network:console:poweraddress', cpillar.dig(node, 'network:console:ip4'))
    else:
      _node['poweraddress'] = cpillar.dig(node, 'network:console:ip4')

    _node['powerpass'] = maas.get('powerpass', '')
    _node['powerid'] = id
    _node['partitions'] = node.get('partitions', '')
    _node['zone'] = maas.get('zone', '')
    _node['maas'] = node.get('no_maas', False) is False
    res.append(_node)

  _state_pillar = {
    'enlist': {'nodes': res, 'domain': domain, 'cluster': cluster,
               'commission': commission,
               'execute': execute,
               'force_nodes': force}}

  if test:
    return _state_pillar

  target = 'maas-a1.cde.1nc'

  client = salt.client.LocalClient(__opts__['conf_file'], __opts__)
  staterun = client.cmd(target, 'state.sls', ['maas.enlist'], kwarg={'pillar': _state_pillar})

  ret = {'data': {target: staterun}}
  ret['data']['retcode'] = 0 if salt.utils.check_state_result(ret['data']) else 1

  return ret
Example #10
0
def hosts_deploy(environment, nodes=None):
    '''
  Deploy a selected OS to all hosts of a named salt envrionment. This creates a public
  and a private key for the minion on the master, stores the public key in the masters
  accepted keys folder and sends both keys to the new host.

  As the private key is only stored on the minion itself, the master has no way to restore
  it if the host needs to be redeployed. Thus a new keypair will be created each time
  this function is run.

  CLI Example:

  .. code-block:: bash

      salt-run alchemy.hosts_deploy dev
      salt-run alchemy.hosts_deploy dev node-01.dev.1nc

  :param environment:
    Name of the salt pillar environment

  :param nodes:
    Optional name of a known host in the working environment

  :return:
    The data structure returned by the client call
  '''

    import time

    wheel = _get_wheel()
    _pillar = cpillar.fake_pillar(None, environment, '1nc', __opts__)
    _nodes = _pillar.get('hosts', {})
    _zone = cpillar.dig(_pillar, 'defaults:maas:zone')

    opt = {'timeout': 1, 'expr_form': 'compound'}
    target = 'G@roles:maas and *.cde.1nc'
    client = salt.client.LocalClient(__opts__['conf_file'], __opts__)

    if nodes is not None:
        nodes = nodes.split(',')

    res = {}
    for nid, node in _nodes.iteritems():

        fqdn = node.get('fqdn')
        if nodes is not None and fqdn not in nodes:
            log.debug("Skipping node %s", fqdn)
            continue

        log.info("Deploying node %s", nid)
        keys = wheel.call_func('key.gen_accept',
                               args=[fqdn],
                               kwargs={'force': True})
        userdata = _minion_key_wrapper(keys)

        actions = {}

        actions['acquired'] = client.cmd(target, 'maas.node_acquire', [fqdn],
                                         **opt)
        time.sleep(5)

        actions['deploy'] = client.cmd(target, 'maas.node_power_on',
                                       [fqdn, userdata], **opt)
        res[nid] = actions

    return res
Example #11
0
def minion_assemble(target, caller='cmd', test=False, force=False):
    '''
  This function is called by the assemble reactor when a minion comes up. It needs to
  inspect the current hints of the minion and decide what to do next

  :param target:
    The minion id taken from the start event

  :param caller:
    Assume this function is called from the commandline and return state output. If you
    call it from somewhere else - like a reactor - set this value to something else to
    suppress any return values

  :param test:

  :param force:

  :return:
    Whatever result is returnable.
  '''

    client = salt.client.LocalClient(__opts__['conf_file'], __opts__)
    hints = client.cmd(target, 'grains.get', ['hints', {}]).get(target)

    if hints is None:
        message = 'minon_started: called by a minion that is unknown or not responding! {}'.format(
            target)
        log.critical(message)
        if caller == 'cmd':
            return {'Error': message}

        return

    log.info('minion_assemble: %s with hints=%s', target, hints)

    # compile the pillar for the target - we get it here with all roles reflected
    _pillar = cpillar.minion_pillar(target)

    if 'pillar' in hints:
        _pillar = salt.utils.dictupdate.merge_recurse(_pillar,
                                                      hints.get('pillar'), {})

    # If we are not forced to run in any case check the pillar config if assemblies are enabled
    if not force and not cpillar.dig(_pillar, 'defaults:assemble', True):
        log.info(
            'minion_assemble: Assembly reactor disabled by pillar configuration.'
        )
        return True

    # get the list of available assemblies from the pillar (created from the files in base by external pillar)
    assembly = _pillar.get('assembly', {})

    # get a list assemblies requested to run
    # we want to use the local roles, but we try hints:agenda first
    roles = cpillar.dig(_pillar, 'local:roles', [])
    agenda = list(hints.get('agenda', roles))
    log.info('Agenda for target %s: %s', target, agenda)

    # retrieve the targets pillar environment from the fresh pillar
    pillarenv = _pillar.get('pillarenv', 'base')

    # add ready as last topic on the agenda - in the future we will stop there
    if 'ready' not in agenda:
        agenda.append('ready')

    # forward until we have a runnable topic - some might just be roles or not implemented, so we ignore them
    topic, short_topic = _get_fqtopic(target, pillarenv, hints, agenda,
                                      assembly)

    if topic is None:
        hint_update(target, hints, client)
        client.cmd(
            target, 'event.send',
            ['salt/minion/{}/topic/{}/success'.format(target, short_topic)])
        return True

    if topic is False:
        return False

    # if we run in test mode get out here and provide test data
    if test:
        data = {'module': topic, 'pillarenv': pillarenv, 'kwargs': hints}
        if __opts__.get('log_level', 'info') == 'info':
            data['roles'] = agenda
            data['assemblies'] = assembly

        return {target: data}

    # Run the assembly orchestration package
    log.warning('Running assemble on target %s for topic %s', target, topic)
    res = assemble(target, topic, **hints)

    # log.warning('Result of the call is %s', res)
    retcode = res['data'].get('retcode', 'X')

    # If the call was successful set the hints for the minion to the next topic and fire the proceed event
    if retcode == 0:
        log.debug('minion_assemble: Topic %s applied successfully.', topic)
        hint_update(target, hints, client)
        client.cmd(target, 'event.send',
                   ['salt/minion/{}/topic/{}/success'.format(target, topic)])
        client.cmd(target, 'event.send',
                   ['salt/minion/{}/assemble'.format(target)])
        if caller != 'cmd':
            return True
        else:
            return res

    client.cmd(target, 'event.send',
               ['salt/minion/{}/topic/{}/fail'.format(target, topic)])
    log.error('minion_assemble: Topic %s:%s caused an error (retcode is %s)',
              target, topic, retcode)
    return res
Example #12
0
def ext_pillar(minion_id, pillar, *args, **kwargs):
    '''
  Enrich the simple pillar by preparing proper structures for either hosts or containers.
  This is the central place to do all merge and calculation procedures needed to get the
  full configuration information for a node, which is either a host or a container

  :param minion_id:
    The id of the minion that is requesting this pillar

  :param pillar:
    The compiled pillar for the requesting minion - also processes the top file

  :param args:
    Unnamed positional args

  :param kwargs:
    Named args given

  :return:
    A dict witch will be merged with the original pillar, overwriting the existing elements.
    In the case of this external pillar will be

      {'hosts': [hostconfiguration], 'containers': [containerconfiguration] }

  '''

    nodebase_files = cpillar.dig(pillar, 'defaults:nodebase', [])
    nodebase_data = nodebase.data(nodebase_files, __opts__['file_buffer_size'])

    _nodes = {}
    for group in ["hosts", "containers"]:

        _nodebase_groupnodes = dict(nodebase_data.get(group, {}))

        _newgroupnodes = {}
        _groupnodes = cpillar.dig(pillar, group, {})
        _groupdefaults = _merge_defaults_for_group(pillar.get("defaults", {}),
                                                   group)

        _netschema = cpillar.dig(_groupdefaults, 'network:schema', 'None')

        for nid, node_config in _groupnodes.iteritems():

            # merge the nodes config with the one from the nodebase
            config = udict.merge_recurse(_nodebase_groupnodes.get(nid, {}),
                                         node_config)

            # if we use racked schema, add rackid from node name
            if _netschema == 'racked' and 'rackid' not in config:
                config['rackid'] = 'rack{:0>2}'.format(
                    nid.split('-').pop()[1:])

            _node = udict.merge_recurse(_groupdefaults, config)
            _node['network'] = _merge_networks_for_node(
                nid, config, _groupdefaults)

            _node['fqdn'] = cpillar.dig(_node, 'network:manage:fqdn')
            _node['hostname'] = nid
            _node['nature'] = group

            # TODO: remove when using 2015.8+
            # take special care about lists
            for mergelist in ['roles', 'packages']:
                if mergelist in config or mergelist in _groupdefaults:
                    _node[mergelist] = _merge_lists_for_node(
                        mergelist, config, _groupdefaults)

            if 'target' in _node:
                _node['target'] = '{0}.{1}'.format(
                    _node['target'], cpillar.dig(_node,
                                                 'network:manage:domain'))

            if 'mole' not in _node:
                _node['mole'] = "hosts"

            if 'mount' in _node:
                _node['mount'] = _expand_mounts_for_node(
                    _node.get('fqdn'), _node.get("mount", {}))

            _newgroupnodes[nid] = _node

            if _node['fqdn'] == minion_id:
                _nodes['local'] = _node

        _nodes[group] = _newgroupnodes

    return _nodes
Example #13
0
def list_nodes(environment, groups="hosts", top="1nc"):
    '''
  Function that lists out all nodes in an environment - for manual configuration checks

  CLI Example:

  .. code-block:: bash

      salt-run test.list_nodes dev
      salt-run test.list_nodes dev containers

  :param environment:
    The saltenv you want the dns zone files be created for

  :param top:
    The top level domain if none is given in the pillar configuration for the environment/network

  :return:
    If just for test, returns the data created. If run, it returns the state results

  '''
    _pillar = cpillar.fake_pillar(None, environment, top, __opts__)
    _networks = cpillar.dig(_pillar, "defaults:network", {})
    _ignorelist = ['common', 'schema']

    _nodes = {}
    groups = groups.split(',')
    for group in groups:
        partial = cpillar.dig(_pillar, group, {})
        _nodes.update(partial)

    snodelist = sorted(_nodes.keys())

    headers = ['id', 'nature', 'ip', 'target', 'rackid']

    for network in _networks.keys():

        if network in _ignorelist:
            continue

        headers.append(network + ':fqdn')
        headers.append(network + ':ip4')
        headers.append(network + ':mac')

    _infonodes = []

    for id in snodelist:

        node = _nodes.get(id, {})
        if node.get('nature') == 'hosts':
            node['target'] = cpillar.dig(node, 'network:manage:fqdn', id)

        _nodenetworks = [
            id,
            node.get('nature', ''),
            node.get('ip4').__str__(),
            node.get('target', ''),
            node.get('rackid', '')
        ]
        for network in _networks.keys():

            if network in _ignorelist:
                continue

            _ndata = node.get('network', {}).get(network, {})

            _nodenetworks.append(_ndata.get('fqdn', ''))
            _nodenetworks.append(_ndata.get('ip4', ''))
            _nodenetworks.append(_ndata.get('mac', ''))

        _infonodes.append(','.join(_nodenetworks))

    _infonodes.insert(0, ','.join(headers))
    return '\n'.join(_infonodes)