Beispiel #1
0
 def merge_tops(self, tops):
     '''
     Cleanly merge the top files
     '''
     top = collections.defaultdict(dict)
     orders = collections.defaultdict(dict)
     for ctops in tops.values():
         for ctop in ctops:
             for saltenv, targets in ctop.items():
                 if saltenv == 'include':
                     continue
                 for tgt in targets:
                     matches = []
                     states = OrderedDict()
                     orders[saltenv][tgt] = 0
                     for comp in ctop[saltenv][tgt]:
                         if isinstance(comp, dict):
                             if 'match' in comp:
                                 matches.append(comp)
                             if 'order' in comp:
                                 order = comp['order']
                                 if not isinstance(order, int):
                                     try:
                                         order = int(order)
                                     except ValueError:
                                         order = 0
                                 orders[saltenv][tgt] = order
                         if isinstance(comp, string_types):
                             states[comp] = True
                     top[saltenv][tgt] = matches
                     top[saltenv][tgt].extend(list(states.keys()))
     return self.sort_top_targets(top, orders)
Beispiel #2
0
def _prompt_choice(var_name, options):
    '''
    Prompt the user to choose between a list of options, index each one by adding an enumerator
    based on https://github.com/audreyr/cookiecutter/blob/master/cookiecutter/prompt.py#L51

    :param var_name: The question to ask the user
    :type  var_name: ``str``

    :param options: A list of options
    :type  options: ``list`` of ``tupple``

    :rtype: ``tuple``
    :returns: The selected user
    '''
    choice_map = OrderedDict(
        (u'{0}'.format(i), value) for i, value in enumerate(options, 1) if value[0] != 'test'
    )
    choices = choice_map.keys()
    default = u'1'

    choice_lines = [u'{0} - {1} - {2}'.format(c[0], c[1][0], c[1][1]) for c in choice_map.items()]
    prompt = u'\n'.join((
        u'Select {0}:'.format(var_name),
        u'\n'.join(choice_lines),
        u'Choose from {0}'.format(u', '.join(choices))
    ))

    user_choice = click.prompt(
        prompt, type=click.Choice(choices), default=default
    )
    return choice_map[user_choice]
Beispiel #3
0
def list_vms(search=None, verbose=False):
    '''
    List all vms

    search : string
        filter vms, see the execution module
    verbose : boolean
        print additional information about the vm

    CLI Example:

    .. code-block:: bash

        salt-run vmadm.list
        salt-run vmadm.list search='type=KVM'
        salt-run vmadm.list verbose=True
    '''
    ret = OrderedDict() if verbose else []
    client = salt.client.get_local_client(__opts__['conf_file'])
    try:
        vmadm_args = {}
        vmadm_args['order'] = 'uuid,alias,hostname,state,type,cpu_cap,vcpus,ram'
        if search:
            vmadm_args['search'] = search
        for cn in client.cmd_iter('G@virtual:physical and G@os:smartos',
                                    'vmadm.list', kwarg=vmadm_args,
                                    tgt_type='compound'):
            if not cn:
                continue
            node = next(cn.iterkeys())
            if not isinstance(cn[node], dict) or \
                'ret' not in cn[node] or \
                not isinstance(cn[node]['ret'], dict):
                continue
            for vm in cn[node]['ret'].keys():
                vmcfg = cn[node]['ret'][vm]
                if verbose:
                    ret[vm] = OrderedDict()
                    ret[vm]['hostname'] = vmcfg['hostname']
                    ret[vm]['alias'] = vmcfg['alias']
                    ret[vm]['computenode'] = node
                    ret[vm]['state'] = vmcfg['state']
                    ret[vm]['resources'] = OrderedDict()
                    ret[vm]['resources']['memory'] = vmcfg['ram']
                    if vmcfg['type'] == 'KVM':
                        ret[vm]['resources']['cpu'] = "{0:.2f}".format(int(vmcfg['vcpus']))
                    else:
                        if vmcfg['cpu_cap'] != '':
                            ret[vm]['resources']['cpu'] = "{0:.2f}".format(int(vmcfg['cpu_cap'])/100)
                else:
                    ret.append(vm)
    except SaltClientError as client_error:
        return "{0}".format(client_error)

    if not verbose:
        ret = sorted(ret)

    return ret
Beispiel #4
0
def list_vms(search=None, sort=None, order='uuid,type,ram,state,alias', keyed=True):
    '''
    Return a list of VMs

    search : string
        vmadm filter property
    sort : string
        vmadm sort (-s) property
    order : string
        vmadm order (-o) property -- Default: uuid,type,ram,state,alias
    keyed : boolean
        specified if the output should be an array (False) or dict (True)
            For a dict the key is the first item from the order parameter.
            Note: If key is not unique last vm wins.

    CLI Example:

    .. code-block:: bash

        salt '*' vmadm.list
        salt '*' vmadm.list order=alias,ram,cpu_cap sort=-ram,-cpu_cap
        salt '*' vmadm.list search='type=KVM'
    '''
    ret = {}
    vmadm = _check_vmadm()
    # vmadm list [-p] [-H] [-o field,...] [-s field,...] [field=value ...]
    cmd = '{vmadm} list -p -H {order} {sort} {search}'.format(
        vmadm=vmadm,
        order='-o {0}'.format(order) if order else '',
        sort='-s {0}'.format(sort) if sort else '',
        search=search if search else ''
    )
    res = __salt__['cmd.run_all'](cmd)
    retcode = res['retcode']
    result = OrderedDict() if keyed else []
    if retcode != 0:
        ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode)
        return ret

    fields = order.split(',')

    for vm in res['stdout'].splitlines():
        vm_data = OrderedDict()
        vm = vm.split(':')
        if keyed:
            for field in fields:
                if fields.index(field) == 0:
                    continue
                vm_data[field.strip()] = vm[fields.index(field)].strip()
            result[vm[0]] = vm_data
        else:
            if len(vm) > 1:
                for field in fields:
                    vm_data[field.strip()] = vm[fields.index(field)].strip()
            else:
                vm_data = vm[0]
            result.append(vm_data)
    return result
def get_vms_per_type(target):
    '''Return all vms indexed by virt_type for a special target'''
    all_targets = OrderedDict()
    for virt_type in VIRT_TYPES:
        per_type = all_targets.setdefault(virt_type, set())
        all_infos = get_targets_and_vms_for_virt_type(virt_type)
        for vmname in all_infos.get(target, []):
            per_type.add(vmname)
    for i in [a for a in all_targets]:
        all_targets[i] = [a for a in all_targets[i]]
    return all_targets
Beispiel #6
0
 def gen_ini(self):
     yield '\n[{0}]\n'.format(self.name)
     sections_dict = OrderedDict()
     for name, value in self.iteritems():
         if com_regx.match(name):
             yield '{0}\n'.format(value)
         elif isinstance(value, _Section):
             sections_dict.update({name: value})
         else:
             yield '{0} {1} {2}\n'.format(name, self.sep, value)
     for name, value in sections_dict.iteritems():
         for line in value.gen_ini():
             yield line
Beispiel #7
0
    def test_compare_sls_vs_yaml_with_jinja(self):
        tpl = '{{ data }}'
        env = jinja2.Environment()
        src = '{foo: 1, bar: 2, baz: {qux: true}}'

        sls_src = env.from_string(tpl).render(data=yamlex.deserialize(src))
        yml_src = env.from_string(tpl).render(data=yaml.deserialize(src))

        sls_data = yamlex.deserialize(sls_src)
        yml_data = yaml.deserialize(yml_src)

        # ensure that sls & yaml have the same base
        assert isinstance(sls_data, dict)
        assert isinstance(yml_data, dict)
        # The below has been commented out because something the loader test
        # is modifying the yaml renderer to render things to unicode. Without
        # running the loader test, the below passes. Even reloading the module
        # from disk does not reset its internal state (per the Python docs).
        ##
        #assert sls_data == yml_data

        # ensure that sls is ordered, while yaml not
        assert isinstance(sls_data, OrderedDict)
        assert not isinstance(yml_data, OrderedDict)

        # prove that yaml does not handle well with OrderedDict
        # while sls is jinja friendly.
        obj = OrderedDict([
            ('foo', 1),
            ('bar', 2),
            ('baz', {'qux': True})
        ])

        sls_obj = yamlex.deserialize(yamlex.serialize(obj))
        try:
            yml_obj = yaml.deserialize(yaml.serialize(obj))
        except SerializationError:
            # BLAAM! yaml was unable to serialize OrderedDict,
            # but it's not the purpose of the current test.
            yml_obj = obj.copy()

        sls_src = env.from_string(tpl).render(data=sls_obj)
        yml_src = env.from_string(tpl).render(data=yml_obj)

        final_obj = yaml.deserialize(sls_src)
        assert obj == final_obj

        # BLAAM! yml_src is not valid !
        final_obj = OrderedDict(yaml.deserialize(yml_src))
        assert obj != final_obj
Beispiel #8
0
 def gen_ini(self):
     yield "{0}[{1}]{0}".format(os.linesep, self.name)
     sections_dict = OrderedDict()
     for name, value in six.iteritems(self):
         if com_regx.match(name):
             yield "{0}{1}".format(value, os.linesep)
         elif isinstance(value, _Section):
             sections_dict.update({name: value})
         else:
             yield "{0}{1}{2}{3}".format(
                 name, (" {0} ".format(self.sep) if self.sep != " " else self.sep), value, os.linesep
             )
     for name, value in six.iteritems(sections_dict):
         for line in value.gen_ini():
             yield line
def get_vms():
    '''Return all vms indexed by targets'''
    data = OrderedDict()
    for virt_type in VIRT_TYPES:
        all_infos = get_targets_and_vms_for_virt_type(virt_type)
        for t in all_infos:
            target = data.setdefault(t, {})
            vms = {}
            vts = set()
            for vmname in all_infos[t]:
                vms.setdefault(vmname, virt_type)
                vts.add(virt_type)
            target['virt_types'] = [a for a in vts]
            target['vms'] = vms
    return data
    def test_compare_sls_vs_yaml_with_jinja(self):
        tpl = '{{ data }}'
        env = jinja2.Environment()
        src = '{foo: 1, bar: 2, baz: {qux: true}}'

        sls_src = env.from_string(tpl).render(data=sls.deserialize(src))
        yml_src = env.from_string(tpl).render(data=yaml.deserialize(src))

        sls_data = sls.deserialize(sls_src)
        yml_data = yaml.deserialize(yml_src)

        # ensure that sls & yaml have the same base
        assert isinstance(sls_data, dict)
        assert isinstance(yml_data, dict)
        assert sls_data == yml_data

        # ensure that sls is ordered, while yaml not
        assert isinstance(sls_data, OrderedDict)
        assert not isinstance(yml_data, OrderedDict)

        # prove that yaml does not handle well with OrderedDict
        # while sls is jinja friendly.
        obj = OrderedDict([
            ('foo', 1),
            ('bar', 2),
            ('baz', {'qux': True})
        ])

        sls_obj = sls.deserialize(sls.serialize(obj))
        try:
            yml_obj = yaml.deserialize(yaml.serialize(obj))
        except SerializationError:
            # BLAAM! yaml was unable to serialize OrderedDict,
            # but it's not the purpose of the current test.
            yml_obj = obj.copy()

        sls_src = env.from_string(tpl).render(data=sls_obj)
        yml_src = env.from_string(tpl).render(data=yml_obj)

        final_obj = yaml.deserialize(sls_src)
        assert obj == final_obj

        # BLAAM! yml_src is not valid !
        final_obj = yaml.deserialize(yml_src)
        assert obj != final_obj
    def element(self, root=None, abspath=None, **kwargs):
        '''
        kwargs contain extra information for custom methods.

        This method must return a valid empty object if no vars are passed
        to allow introspection to create patterns.

        :param root:
        :param abspath:
        '''
        if root is None and abspath is None:
            root = os.path.abspath('.')
            abspath = os.path.abspath('.')
        relpath = os.path.relpath(abspath, root)

        try:
            element = self._elements.get(relpath, OrderedDict())
        except AttributeError:
            element = OrderedDict()

        if not element:
            for field in PATHINFO_FIELDS:
                element.setdefault(field, '')
            element['saltenv'] = kwargs.get('saltenv', 'base')
            element['relpath'] = relpath
            element['abspath'] = abspath
            element['is_pillar'] = kwargs.get('is_pillar', False)

        cachedir = kwargs.get('cachedir', '')
        if cachedir and os.path.commonprefix([abspath, cachedir]) == cachedir:
            element['cache_root'] = root
        else:
            element['file_root'] = root

        element_hook = kwargs.get('_element_hook', None)
        if element_hook:
            element = element_hook(self, element, **kwargs)

        return element
Beispiel #12
0
 def merge_tops(self, tops):
     '''
     Cleanly merge the top files
     '''
     top = collections.defaultdict(OrderedDict)
     orders = collections.defaultdict(OrderedDict)
     for ctops in six.itervalues(tops):
         for ctop in ctops:
             for saltenv, targets in six.iteritems(ctop):
                 if saltenv == 'include':
                     continue
                 for tgt in targets:
                     matches = []
                     states = OrderedDict()
                     orders[saltenv][tgt] = 0
                     ignore_missing = False
                     for comp in ctop[saltenv][tgt]:
                         if isinstance(comp, dict):
                             if 'match' in comp:
                                 matches.append(comp)
                             if 'order' in comp:
                                 order = comp['order']
                                 if not isinstance(order, int):
                                     try:
                                         order = int(order)
                                     except ValueError:
                                         order = 0
                                 orders[saltenv][tgt] = order
                             if comp.get('ignore_missing', False):
                                 ignore_missing = True
                         if isinstance(comp, six.string_types):
                             states[comp] = True
                     if ignore_missing:
                         if saltenv not in self.ignored_pillars:
                             self.ignored_pillars[saltenv] = []
                         self.ignored_pillars[saltenv].extend(states.keys())
                     top[saltenv][tgt] = matches
                     top[saltenv][tgt].extend(states)
     return self.sort_top_targets(top, orders)
Beispiel #13
0
 def _uncomment_if_commented(self, opt_key):
     # should be called only if opt_key is not already present
     # will uncomment the key if commented and create a place holder
     # for the key where the correct value can be update later
     # used to preserve the ordering of comments and commented options
     # and to make sure options without sectons go above any section
     options_backup = OrderedDict()
     comment_index = None
     for key, value in self.iteritems():
         if comment_index is not None:
             options_backup.update({key: value})
             continue
         if '#comment' not in key:
             continue
         opt_match = opt_regx.match(value.lstrip('#'))
         if opt_match and opt_match.group(2) == opt_key:
             comment_index = key
     for key in options_backup:
         self.pop(key)
     self.pop(comment_index, None)
     super(_Section, self).update({opt_key: None})
     for key, value in options_backup.iteritems():
         super(_Section, self).update({key: value})
def compile(labels, **patterns):  # pylint: disable=W0622
    '''
    Compile patterns.

    Args:
        labels:
    '''
    pattern = patterns.pop('_pattern', None)
    if pattern:
        return pattern

    regex = patterns.pop('_regex', False)
    escape = patterns.pop('_escape', [])

    if not patterns or not labels:
        return None

    for pattern in list(patterns.keys()):
        if pattern not in labels:
            patterns.pop(pattern)

    default_pattern = get_default_pattern(regex)
    escape = escape if escape else []
    _escape_text = functools.partial(escape_text, regex=regex)

    # Set default values and join patterns for each field
    pattern = OrderedDict.fromkeys(labels, None)
    for label in labels:
        if label in patterns and patterns[label]:
            field = patterns[label]
            if isinstance(field, re._pattern_type):  # pylint: disable=W0212
                field = [field.pattern]
            if isinstance(field, six.string_types):
                field = [field]
            if label in escape or not regex:
                field = [_escape_text(text) for text in field]
        else:
            field = default_pattern
        pattern[label] = r'(?:{0})'.format(r'|'.join(field))

    try:
        return re.compile(
            r'\n'.join(six.itervalues(pattern)), re.MULTILINE | re.DOTALL
        )
    except NameError:
        raise
    def __init__(self, match_each=True, **patterns):
        '''
        match_each:
            If True, each file path is matched which prevents uses less memory
            but sacrifices performance a little bit.  If False, the complete
            list is matched after all the file information has been added to
            pathinfo.

        patterns:
            Contains the patterns to match.
            Example:
                { 'saltenv': 'base', 'relpath': ['*.sls'] }
        '''
        super(PathInfoDict, self).__init__(
            fields=PATHINFO_FIELDS,
            match_each=match_each,
            **patterns
        )
        self._elements = OrderedDict()
Beispiel #16
0
class StateRegistry(object):
    """
    The StateRegistry holds all of the states that have been created.
    """
    def __init__(self):
        self.empty()

    def empty(self):
        self.states = OrderedDict()
        self.requisites = []

    def salt_data(self):
        states = OrderedDict([
            (id_, state())
            for id_, state in self.states.iteritems()
        ])

        self.empty()

        return states

    def add(self, id_, state):
        if id_ in self.states:
            raise DuplicateState("A state with id '%s' already exists" % id_)

        # if we have requisites in our stack then add them to the state
        if len(self.requisites) > 0:
            for req in self.requisites:
                if req.requisite not in state.kwargs:
                    state.kwargs[req.requisite] = []
                state.kwargs[req.requisite].append(req())

        self.states[id_] = state

    def push_requisite(self, requisite):
        self.requisites.append(requisite)

    def pop_requisite(self):
        del self.requisites[-1]
Beispiel #17
0
def iostat(zpool=None, sample_time=0):
    '''
    .. versionchanged:: 2016.3.0

    Display I/O statistics for the given pools

    zpool : string
        optional name of storage pool
    sample_time : int
        seconds to capture data before output

    CLI Example:

    .. code-block:: bash

        salt '*' zpool.iostat myzpool
    '''
    ret = OrderedDict()

    # get zpool list data
    zpool_cmd = _check_zpool()
    cmd = '{zpool_cmd} iostat -v{zpool}{sample_time}'.format(
        zpool_cmd=zpool_cmd,
        zpool=' {0}'.format(zpool) if zpool else '',
        sample_time=' {0} 2'.format(sample_time) if sample_time else ''
    )
    res = __salt__['cmd.run_all'](cmd, python_shell=False)
    if res['retcode'] != 0:
        ret['error'] = res['stderr'] if 'stderr' in res else res['stdout']
        return ret

    # note: hardcoded header fields, the double header is hard to parse
    #                                capacity     operations    bandwidth
    #pool                         alloc   free   read  write   read  write
    header = [
        'pool',
        'capacity-alloc',
        'capacity-free',
        'operations-read',
        'operations-write',
        'bandwith-read',
        'bandwith-write'
    ]
    root_vdev = None
    vdev = None
    dev = None
    config_data = None
    current_pool = None
    for line in res['stdout'].splitlines():
        if line.strip() == '':
            continue

        # ignore header
        if line.startswith('pool') and line.endswith('write'):
            continue
        if line.endswith('bandwidth'):
            continue

        if line.startswith('-') and line.endswith('-'):
            if config_data:
                ret[current_pool] = config_data
            config_data = OrderedDict()
            current_pool = None
        else:
            if not isinstance(config_data, salt.utils.odict.OrderedDict):
                continue

            stat_data = OrderedDict()
            stats = [x for x in line.strip().split(' ') if x not in ['']]
            for prop in header:
                if header.index(prop) < len(stats):
                    if prop == 'pool':
                        if not current_pool:
                            current_pool = stats[header.index(prop)]
                        continue
                    if stats[header.index(prop)] == '-':
                        continue
                    stat_data[prop] = stats[header.index(prop)]

            dev = line.strip().split()[0]

            if line[0:4] != '    ':
                if line[0:2] == '  ':
                    vdev = line.strip().split()[0]
                    dev = None
                else:
                    root_vdev = line.strip().split()[0]
                    vdev = None
                    dev = None

            if root_vdev:
                if not config_data.get(root_vdev):
                    config_data[root_vdev] = {}
                    if len(stat_data) > 0:
                        config_data[root_vdev] = stat_data
                if vdev:
                    if vdev not in config_data[root_vdev]:
                        config_data[root_vdev][vdev] = {}
                        if len(stat_data) > 0:
                            config_data[root_vdev][vdev] = stat_data
                    if dev and dev not in config_data[root_vdev][vdev]:
                        config_data[root_vdev][vdev][dev] = {}
                        if len(stat_data) > 0:
                            config_data[root_vdev][vdev][dev] = stat_data

    return ret
Beispiel #18
0
    def handle_func(self, func, data):
        '''
        Execute this method in a multiprocess or thread
        '''
        if salt.utils.is_windows():
            # Since function references can't be pickled and pickling
            # is required when spawning new processes on Windows, regenerate
            # the functions and returners.
            self.functions = salt.loader.minion_mods(self.opts)
            self.returners = salt.loader.returners(self.opts, self.functions)
        ret = {'id': self.opts.get('id', 'master'),
               'fun': func,
               'schedule': data['name'],
               'jid': salt.utils.jid.gen_jid()}

        if 'metadata' in data:
            if isinstance(data['metadata'], dict):
                ret['metadata'] = data['metadata']
                ret['metadata']['_TOS'] = self.time_offset
                ret['metadata']['_TS'] = time.ctime()
                ret['metadata']['_TT'] = time.strftime('%Y %B %d %a %H %m', time.gmtime())
            else:
                log.warning('schedule: The metadata parameter must be '
                            'specified as a dictionary.  Ignoring.')

        salt.utils.appendproctitle(ret['jid'])

        proc_fn = os.path.join(
            salt.minion.get_proc_dir(self.opts['cachedir']),
            ret['jid']
        )

        # Check to see if there are other jobs with this
        # signature running.  If there are more than maxrunning
        # jobs present then don't start another.
        # If jid_include is False for this job we can ignore all this
        # NOTE--jid_include defaults to True, thus if it is missing from the data
        # dict we treat it like it was there and is True
        if 'jid_include' not in data or data['jid_include']:
            jobcount = 0
            for basefilename in os.listdir(salt.minion.get_proc_dir(self.opts['cachedir'])):
                fn_ = os.path.join(salt.minion.get_proc_dir(self.opts['cachedir']), basefilename)
                if not os.path.exists(fn_):
                    log.debug('schedule.handle_func: {0} was processed '
                              'in another thread, skipping.'.format(
                                  basefilename))
                    continue
                with salt.utils.fopen(fn_, 'rb') as fp_:
                    job = salt.payload.Serial(self.opts).load(fp_)
                    if job:
                        if 'schedule' in job:
                            log.debug('schedule.handle_func: Checking job against '
                                      'fun {0}: {1}'.format(ret['fun'], job))
                            if ret['schedule'] == job['schedule'] and os_is_running(job['pid']):
                                jobcount += 1
                                log.debug(
                                    'schedule.handle_func: Incrementing jobcount, now '
                                    '{0}, maxrunning is {1}'.format(
                                        jobcount, data['maxrunning']))
                                if jobcount >= data['maxrunning']:
                                    log.debug(
                                        'schedule.handle_func: The scheduled job {0} '
                                        'was not started, {1} already running'.format(
                                            ret['schedule'], data['maxrunning']))
                                    return False
                    else:
                        try:
                            log.info('Invalid job file found.  Removing.')
                            os.remove(fn_)
                        except OSError:
                            log.info('Unable to remove file: {0}.'.format(fn_))

        salt.utils.daemonize_if(self.opts)

        ret['pid'] = os.getpid()

        if 'jid_include' not in data or data['jid_include']:
            log.debug('schedule.handle_func: adding this job to the jobcache '
                      'with data {0}'.format(ret))
            # write this to /var/cache/salt/minion/proc
            with salt.utils.fopen(proc_fn, 'w+b') as fp_:
                fp_.write(salt.payload.Serial(self.opts).dumps(ret))

        args = tuple()
        if 'args' in data:
            args = data['args']

        kwargs = {}
        if 'kwargs' in data:
            kwargs = data['kwargs']
        # if the func support **kwargs, lets pack in the pub data we have
        # TODO: pack the *same* pub data as a minion?
        argspec = salt.utils.args.get_function_argspec(self.functions[func])
        if argspec.keywords:
            # this function accepts **kwargs, pack in the publish data
            for key, val in six.iteritems(ret):
                kwargs['__pub_{0}'.format(key)] = val

        try:
            ret['return'] = self.functions[func](*args, **kwargs)

            data_returner = data.get('returner', None)
            if data_returner or self.schedule_returner:
                if 'returner_config' in data:
                    ret['ret_config'] = data['returner_config']
                rets = []
                for returner in [data_returner, self.schedule_returner]:
                    if isinstance(returner, str):
                        rets.append(returner)
                    elif isinstance(returner, list):
                        rets.extend(returner)
                # simple de-duplication with order retained
                for returner in OrderedDict.fromkeys(rets):
                    ret_str = '{0}.returner'.format(returner)
                    if ret_str in self.returners:
                        ret['success'] = True
                        self.returners[ret_str](ret)
                    else:
                        log.info(
                            'Job {0} using invalid returner: {1}. Ignoring.'.format(
                                func, returner
                            )
                        )

            if 'return_job' in data and not data['return_job']:
                pass
            else:
                # Send back to master so the job is included in the job list
                mret = ret.copy()
                mret['jid'] = 'req'
                channel = salt.transport.Channel.factory(self.opts, usage='salt_schedule')
                load = {'cmd': '_return', 'id': self.opts['id']}
                for key, value in six.iteritems(mret):
                    load[key] = value
                channel.send(load)

        except Exception:
            log.exception("Unhandled exception running {0}".format(ret['fun']))
            # Although catch-all exception handlers are bad, the exception here
            # is to let the exception bubble up to the top of the thread context,
            # where the thread will die silently, which is worse.
        finally:
            try:
                log.debug('schedule.handle_func: Removing {0}'.format(proc_fn))
                os.unlink(proc_fn)
            except OSError as exc:
                if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT:
                    # EEXIST and ENOENT are OK because the file is gone and that's what
                    # we wanted
                    pass
                else:
                    log.error("Failed to delete '{0}': {1}".format(proc_fn, exc.errno))
                    # Otherwise, failing to delete this file is not something
                    # we can cleanly handle.
                    raise
Beispiel #19
0
def format_resolve(value,
                   original_dict=None,
                   global_tries=50,
                   this_call=0, topdb=False):
    """Resolve a dict of formatted strings, mappings & list to a valued dict
    Please also read the associated test::

        {"a": ["{b}", "{c}", "{e}"],
         "b": 1,
         "c": "{d}",
         "d": "{b}",
         "e": "{d}",
        }

        ====>
        {"a": ["1", "1", "{e}"],
         "b": 1,
         "c": "{d}",
         "d": "{b}",
         "e": "{d}",
        }

    """
    if not original_dict:
        original_dict = OrderedDict()
    if this_call == 0 and not original_dict and isinstance(value, dict):
        original_dict = value
    left = False
    cycle = True

    if isinstance(value, dict):
        new = OrderedDict()
        for key, val in value.items():
            val = format_resolve(val, original_dict, this_call=this_call + 1, topdb=topdb)
            new[key] = val
    elif isinstance(value, (list, tuple)):
        new = type(value)()
        for v in value:
            val = format_resolve(v, original_dict, this_call=this_call + 1, topdb=topdb)
            new = new + type(value)([val])
    elif isinstance(value, basestring):
        new = value
        if '/downloads' in new:
            topdb= True
        # do not directly call format to handle keyerror in original mapping
        # where we may have yet keyerrors
        if isinstance(original_dict, dict):
            for k in original_dict:
                reprk = k
                if not isinstance(reprk, basestring):
                    reprk = '{0}'.format(k)
                subst = '{' + reprk + '}'
                subst_val = original_dict[k]
                if subst in new:
                    if isinstance(subst_val, (list, dict)):
                        inner_new = format_resolve(
                            subst_val, original_dict, this_call=this_call + 1, topdb=topdb)
                        # composed, we take the repr
                        if new != subst:
                            new = new.replace(subst, str(inner_new))
                        # no composed value, take the original list
                        else:
                            new = inner_new
                    else:
                        if new != subst_val:
                            new = new.replace(subst,
                                              str(subst_val))
        if ('{' in new) and ('}' in new):
            i = 0
            while True:
                try:
                    this_call += 1
                    if this_call > 1000:
                        raise _CycleError('cycle')
                    new_val = format_resolve(
                        new, original_dict, this_call=this_call + 1, topdb=topdb)
                    new_braces = new.count('{'), new.count('}')
                    newval_braces = new_val.count('{'), new_val.count('}')
                    if new_braces == newval_braces:
                        break
                    else:
                        new = new_val
                except _CycleError:
                    cycle = True
                    break
            if ('{' in new) and ('}' in new):
                left = True
    else:
        new = value
    if left:
        if this_call == 0:
            for i in global_tries:
                new_val = format_resolve(
                    new, original_dict, this_call=this_call + 1, topdb=topdb)
                if (new == new_val) or cycle:
                    break
                else:
                    new = new_val
        else:
            while not cycle:
                new_val = format_resolve(
                    new, original_dict, this_call=this_call + 1, topdb=topdb)
                if (new == new_val) or (cycle):
                    break
                else:
                    new = new_val
    return new
Beispiel #20
0
def register_dns_masters(only_domains=None, only_providers=None):
    '''
    Use registrar apis to switch the nameservers to the ones
    we manage on salt

    only_domains
        list of domains to act on, if empty all managed domains
        will be checked

    only_providers
        limit action to one or more providers  (gandi, ovh)

    CLI Examples::

        salt-call mc_dns.register_dns_masters only_providers=ovh
        salt-call mc_dns.register_dns_masters only_providers=gandi
        salt-call mc_dns.register_dns_masters foo.net

    '''
    register_checks()
    _s = __salt__
    if isinstance(only_providers, six.string_types):
        only_providers = only_providers.split(',')
    if isinstance(only_domains, six.string_types):
        only_domains = only_domains.split(',')
    if not only_domains:
        only_domains = []
    managed_domains = _s['mc_pillar.query']('managed_dns_zones')[:]
    if not only_domains:
        only_domains = managed_domains
    errors = [a for a in only_domains if a not in managed_domains]
    if errors:
        raise ValueError('{0} are not managed'.format(errors))
    dnss = {}
    for i in _s['mc_pillar.get_nss']()['slaves']:
        dnss[i] = ips_for(i, fail_over=True)
    # for each name server, registrer its glue record
    # in the domain it belongs
    glues = {}
    crets = OrderedDict()
    crets['set_ns'] = OrderedDict()
    for dns in dnss:
        domain = '.'.join(dns.split('.')[-2:])
        glues.setdefault(domain, {})
        glues[domain][dns] = dnss[dns]
    try:
        skipped = _s['mc_pillar.query']('skipped_updated_domains')
    except Exception:
        skipped = []
    for domain, dnss in six.iteritems(glues):
        provider = domain_registrar(domain)
        fun = 'mc_dns.{0}_glues'.format(provider)
        if fun not in _s:
            log.error('Registrar {0} switcher doest exist'
                      ' for {1}'.format(fun, domain))
            continue
        if not _s['mc_provider.get_{0}_opt'.format(provider)]('activated',
                                                              domain=domain):
            log.error('{0} not configured'.format(provider))
            continue
        crets['glues'] = _s[fun](domain, dnss)
    for domain in only_domains:
        if domain in skipped:
            log.error('{0} SKIPPED'.format(domain))
            continue
        provider = domain_registrar(domain)
        fun = 'mc_dns.{0}_set_nss'.format(provider)
        if not _s['mc_provider.get_{0}_opt'.format(provider)]('activated',
                                                              domain=domain):
            log.error('{0} not configured'.format(provider))
            continue
        if fun not in _s:
            log.error('Registrar {0} switcher doest exist'
                      ' for {1}'.format(fun, domain))
            continue
        if only_providers:
            if provider not in only_providers:
                continue
        crets['set_ns'][domain] = _s[fun](domain, dnss)
    return crets
    def gen_functions(self, pack=None, virtual_enable=True, whitelist=None,
                      provider_overrides=False):
        '''
        Return a dict of functions found in the defined module_dirs
        '''
        funcs = OrderedDict()
        self.load_modules()
        for mod in self.modules:
            # If this is a proxy minion then MOST modules cannot work.  Therefore, require that
            # any module that does work with salt-proxy-minion define __proxyenabled__ as a list
            # containing the names of the proxy types that the module supports.
            if not hasattr(mod, 'render') and 'proxy' in self.opts:
                if not hasattr(mod, '__proxyenabled__'):
                    # This is a proxy minion but this module doesn't support proxy
                    # minions at all
                    continue
                if not (self.opts['proxy']['proxytype'] in mod.__proxyenabled__ or
                        '*' in mod.__proxyenabled__):
                    # This is a proxy minion, this module supports proxy
                    # minions, but not this particular minion
                    log.debug(mod)
                    continue

            if hasattr(mod, '__opts__'):
                mod.__opts__.update(self.opts)
            else:
                mod.__opts__ = self.opts

            mod.__grains__ = self.grains
            mod.__pillar__ = self.pillar

            if pack:
                if isinstance(pack, list):
                    for chunk in pack:
                        if not isinstance(chunk, dict):
                            continue
                        try:
                            setattr(mod, chunk['name'], chunk['value'])
                        except KeyError:
                            pass
                else:
                    setattr(mod, pack['name'], pack['value'])

            # Call a module's initialization method if it exists
            if hasattr(mod, '__init__'):
                if callable(mod.__init__):
                    try:
                        mod.__init__(self.opts)
                    except TypeError:
                        pass

            # Trim the full pathname to just the module
            # this will be the short name that other salt modules and state
            # will refer to it as.
            module_name = mod.__name__.rsplit('.', 1)[-1]

            if virtual_enable:
                # if virtual modules are enabled, we need to look for the
                # __virtual__() function inside that module and run it.
                (virtual_ret, virtual_name) = self.process_virtual(mod,
                                                                   module_name)

                # if process_virtual returned a non-True value then we are
                # supposed to not process this module
                if virtual_ret is not True:
                    continue

                # update our module name to reflect the virtual name
                module_name = virtual_name

            if whitelist:
                # If a whitelist is defined then only load the module if it is
                # in the whitelist
                if module_name not in whitelist:
                    continue

            # load the functions from the module and update our dict
            funcs.update(self.load_functions(mod, module_name))

        # Handle provider overrides
        if provider_overrides and self.opts.get('providers', False):
            if isinstance(self.opts['providers'], dict):
                for mod, provider in self.opts['providers'].items():
                    newfuncs = raw_mod(self.opts, provider, funcs)
                    if newfuncs:
                        for newfunc in newfuncs:
                            f_key = '{0}{1}'.format(
                                mod, newfunc[newfunc.rindex('.'):]
                            )
                            funcs[f_key] = newfuncs[newfunc]

        # now that all the functions have been collected, iterate back over
        # the available modules and inject the special __salt__ namespace that
        # contains these functions.
        for mod in self.modules:
            if not hasattr(mod, '__salt__') or (
                not in_pack(pack, '__salt__') and
                (not str(mod.__name__).startswith('salt.loaded.int.grain') and
                 not str(mod.__name__).startswith('salt.loaded.ext.grain'))
            ):
                mod.__salt__ = funcs
            elif not in_pack(pack, '__salt__') and \
                    (str(mod.__name__).startswith('salt.loaded.int.grain') or
                     str(mod.__name__).startswith('salt.loaded.ext.grain')):
                mod.__salt__.update(funcs)
        return funcs
Beispiel #22
0
def hold_present(name, snapshot, recursive=False):
    '''
    ensure hold is present on the system

    name : string
        name of holdt
    snapshot : string
        name of snapshot
    recursive : boolean
        recursively add hold with the given tag on the snapshots of all descendent file systems.

    '''
    ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}

    ## log configuration
    log.debug('zfs.hold_present::%s::config::snapshot = %s', name, snapshot)
    log.debug('zfs.hold_present::%s::config::recursive = %s', name, recursive)

    ## check we have a snapshot/tag name
    if not __utils__['zfs.is_snapshot'](snapshot):
        ret['result'] = False
        ret['comment'] = 'invalid snapshot name: {0}'.format(snapshot)
        return ret

    if __utils__['zfs.is_snapshot'](name) or \
       __utils__['zfs.is_bookmark'](name) or \
       name == 'error':
        ret['result'] = False
        ret['comment'] = 'invalid tag name: {0}'.format(name)
        return ret

    ## place hold if required
    holds = __salt__['zfs.holds'](snapshot)
    if name in holds:
        ## NOTE: hold with name already exists for snapshot
        ret['comment'] = 'hold {0} is present for {1}'.format(
            name,
            snapshot,
        )
    else:
        ## NOTE: no hold found with name for snapshot
        if not __opts__['test']:
            mod_res = __salt__['zfs.hold'](name, snapshot, **{
                'recursive': recursive
            })
        else:
            mod_res = OrderedDict([('held', True)])

        ret['result'] = mod_res['held']
        if ret['result']:
            ret['changes'] = OrderedDict([
                (snapshot, OrderedDict([
                    (name, 'held'),
                ])),
            ])
            ret['comment'] = 'hold {0} added to {1}'.format(
                name,
                snapshot,
            )
        else:
            ret['comment'] = 'failed to add hold {0} to {1}'.format(
                name,
                snapshot,
            )
            if 'error' in mod_res:
                ret['comment'] = mod_res['error']

    return ret
Beispiel #23
0
 def test_include(self):
     ret = self.render(include_template)
     self.assertEqual(ret, OrderedDict([("include", ["http"])]))
Beispiel #24
0
 def test_check_state_result(self):
     self.assertFalse(utils.check_state_result(None),
                      "Failed to handle None as an invalid data type.")
     self.assertFalse(utils.check_state_result([]),
                      "Failed to handle an invalid data type.")
     self.assertFalse(utils.check_state_result({}),
                      "Failed to handle an empty dictionary.")
     self.assertFalse(utils.check_state_result({'host1': []}),
                      "Failed to handle an invalid host data structure.")
     test_valid_state = {
         'host1': {
             'test_state': {
                 'result': 'We have liftoff!'
             }
         }
     }
     self.assertTrue(utils.check_state_result(test_valid_state))
     test_valid_false_states = {
         'test1':
         OrderedDict([
             ('host1',
              OrderedDict([
                  ('test_state0', {
                      'result': True
                  }),
                  ('test_state', {
                      'result': False
                  }),
              ])),
         ]),
         'test2':
         OrderedDict([
             ('host1',
              OrderedDict([
                  ('test_state0', {
                      'result': True
                  }),
                  ('test_state', {
                      'result': True
                  }),
              ])),
             ('host2',
              OrderedDict([
                  ('test_state0', {
                      'result': True
                  }),
                  ('test_state', {
                      'result': False
                  }),
              ])),
         ]),
         'test3': ['a'],
         'test4':
         OrderedDict([('asup',
                       OrderedDict([('host1',
                                     OrderedDict([
                                         ('test_state0', {
                                             'result': True
                                         }),
                                         ('test_state', {
                                             'result': True
                                         }),
                                     ])),
                                    ('host2',
                                     OrderedDict([
                                         ('test_state0', {
                                             'result': True
                                         }),
                                         ('test_state', {
                                             'result': False
                                         }),
                                     ]))]))]),
         'test5':
         OrderedDict([('asup',
                       OrderedDict([('host1',
                                     OrderedDict([
                                         ('test_state0', {
                                             'result': True
                                         }),
                                         ('test_state', {
                                             'result': True
                                         }),
                                     ])), ('host2', [])]))])
     }
     for test, data in six.iteritems(test_valid_false_states):
         self.assertFalse(utils.check_state_result(data),
                          msg='{0} failed'.format(test))
     test_valid_true_states = {
         'test1':
         OrderedDict([
             ('host1',
              OrderedDict([
                  ('test_state0', {
                      'result': True
                  }),
                  ('test_state', {
                      'result': True
                  }),
              ])),
         ]),
         'test3':
         OrderedDict([
             ('host1',
              OrderedDict([
                  ('test_state0', {
                      'result': True
                  }),
                  ('test_state', {
                      'result': True
                  }),
              ])),
             ('host2',
              OrderedDict([
                  ('test_state0', {
                      'result': True
                  }),
                  ('test_state', {
                      'result': True
                  }),
              ])),
         ]),
         'test4':
         OrderedDict([('asup',
                       OrderedDict([('host1',
                                     OrderedDict([
                                         ('test_state0', {
                                             'result': True
                                         }),
                                         ('test_state', {
                                             'result': True
                                         }),
                                     ])),
                                    ('host2',
                                     OrderedDict([
                                         ('test_state0', {
                                             'result': True
                                         }),
                                         ('test_state', {
                                             'result': True
                                         }),
                                     ]))]))]),
         'test2':
         OrderedDict([('host1',
                       OrderedDict([
                           ('test_state0', {
                               'result': None
                           }),
                           ('test_state', {
                               'result': True
                           }),
                       ])),
                      ('host2',
                       OrderedDict([
                           ('test_state0', {
                               'result': True
                           }),
                           ('test_state', {
                               'result': 'abc'
                           }),
                       ]))])
     }
     for test, data in six.iteritems(test_valid_true_states):
         self.assertTrue(utils.check_state_result(data),
                         msg='{0} failed'.format(test))
     test_valid_false_state = {'host1': {'test_state': {'result': False}}}
     self.assertFalse(utils.check_state_result(test_valid_false_state))
Beispiel #25
0
    def process_results(self, rows):
        '''
            This function takes a list of database results and iterates over,
            merging them in to a dict form.
        '''
        listify = OrderedDict()
        listify_dicts = OrderedDict()
        for ret in rows:
            # crd is the Current Return Data level, to make this non-recursive.
            crd = self.focus
            # Walk and create dicts above the final layer
            for i in range(0, self.depth-1):
                # At the end we'll use listify to find values to make a list of
                if i+1 in self.with_lists:
                    if id(crd) not in listify:
                        listify[id(crd)] = []
                        listify_dicts[id(crd)] = crd
                    if ret[i] not in listify[id(crd)]:
                        listify[id(crd)].append(ret[i])
                if ret[i] not in crd:
                    # Key missing
                    crd[ret[i]] = {}
                    crd = crd[ret[i]]
                else:
                    # Check type of collision
                    ty = type(crd[ret[i]])
                    if ty is list:
                        # Already made list
                        temp = {}
                        crd[ret[i]].append(temp)
                        crd = temp
                    elif ty is not dict:
                        # Not a list, not a dict
                        if self.as_list:
                            # Make list
                            temp = {}
                            crd[ret[i]] = [crd[ret[i]], temp]
                            crd = temp
                        else:
                            # Overwrite
                            crd[ret[i]] = {}
                            crd = crd[ret[i]]
                    else:
                        # dict, descend.
                        crd = crd[ret[i]]

            # If this test is true, the penultimate field is the key
            if self.depth == self.num_fields - 1:
                nk = self.num_fields-2  # Aka, self.depth-1
                # Should we and will we have a list at the end?
                if ((self.as_list and (ret[nk] in crd)) or
                        (nk+1 in self.with_lists)):
                    if ret[nk] in crd:
                        if not isinstance(crd[ret[nk]], list):
                            crd[ret[nk]] = [crd[ret[nk]]]
                        # if it's already a list, do nothing
                    else:
                        crd[ret[nk]] = []
                    crd[ret[nk]].append(ret[self.num_fields-1])
                else:
                    if not self.ignore_null or ret[self.num_fields-1]:
                        crd[ret[nk]] = ret[self.num_fields-1]
            else:
                # Otherwise, the field name is the key but we have a spare.
                # The spare results because of {c: d} vs {c: {"d": d, "e": e }}
                # So, make that last dict
                if ret[self.depth-1] not in crd:
                    crd[ret[self.depth-1]] = {}
                # This bit doesn't escape listify
                if self.depth in self.with_lists:
                    if id(crd) not in listify:
                        listify[id(crd)] = []
                        listify_dicts[id(crd)] = crd
                    if ret[self.depth-1] not in listify[id(crd)]:
                        listify[id(crd)].append(ret[self.depth-1])
                crd = crd[ret[self.depth-1]]
                # Now for the remaining keys, we put them in to the dict
                for i in range(self.depth, self.num_fields):
                    nk = self.field_names[i]
                    # Listify
                    if i+1 in self.with_lists:
                        if id(crd) not in listify:
                            listify[id(crd)] = []
                            listify_dicts[id(crd)] = crd
                        if nk not in listify[id(crd)]:
                            listify[id(crd)].append(nk)
                    # Collision detection
                    if self.as_list and (nk in crd):
                        # Same as before...
                        if isinstance(crd[nk], list):
                            crd[nk].append(ret[i])
                        else:
                            crd[nk] = [crd[nk], ret[i]]
                    else:
                        if not self.ignore_null or ret[i]:
                            crd[nk] = ret[i]
        # Get key list and work backwards.  This is inner-out processing
        ks = list(listify_dicts.keys())
        ks.reverse()
        for i in ks:
            d = listify_dicts[i]
            for k in listify[i]:
                if isinstance(d[k], dict):
                    d[k] = list(d[k].values())
                elif isinstance(d[k], list):
                    d[k] = [d[k]]
Beispiel #26
0
 def _repr(self, context=None):
     return OrderedDict(m._repr(context) for m in self)
Beispiel #27
0
def pytest_sessionstart(session):
    session.stats_processes = OrderedDict((
        #('Log Server', test_daemon.log_server),
        ('    Test Suite Run', psutil.Process(os.getpid())), ))
Beispiel #28
0
def diff(name_a, name_b=None, **kwargs):
    '''
    Display the difference between a snapshot of a given filesystem and
    another snapshot of that filesystem from a later time or the current
    contents of the filesystem.

    name_a : string
        name of snapshot
    name_b : string
        (optional) name of snapshot or filesystem
    show_changetime : boolean
        display the path's inode change time as the first column of output. (default = True)
    show_indication : boolean
        display an indication of the type of file. (default = True)
    parsable : boolean
        if true we don't parse the timestamp to a more readable date (default = True)

    .. versionadded:: 2016.3.0

    CLI Example:

    .. code-block:: bash

        salt '*' zfs.diff myzpool/mydataset@yesterday myzpool/mydataset

    '''
    ## Configure command
    # NOTE: initialize the defaults
    flags = ['-H']
    target = []

    # NOTE: set extra config from kwargs
    if kwargs.get('show_changetime', True):
        flags.append('-t')
    if kwargs.get('show_indication', True):
        flags.append('-F')

    # NOTE: update target
    target.append(name_a)
    if name_b:
        target.append(name_b)

    ## Diff filesystem/snapshot
    res = __salt__['cmd.run_all'](
        __utils__['zfs.zfs_command'](
            command='diff',
            flags=flags,
            target=target,
        ),
        python_shell=False,
    )

    if res['retcode'] != 0:
        return __utils__['zfs.parse_command_result'](res)
    else:
        if not kwargs.get('parsable', True) and kwargs.get(
                'show_changetime', True):
            ret = OrderedDict()
            for entry in res['stdout'].splitlines():
                entry = entry.split()
                entry_timestamp = __utils__['dateutils.strftime'](
                    entry[0], '%Y-%m-%d.%H:%M:%S.%f')
                entry_data = "\t\t".join(entry[1:])
                ret[entry_timestamp] = entry_data
        else:
            ret = res['stdout'].splitlines()
        return ret
Beispiel #29
0
def list_(name=None, **kwargs):
    '''
    Return a list of all datasets or a specified dataset on the system and the
    values of their used, available, referenced, and mountpoint properties.

    name : string
        name of dataset, volume, or snapshot
    recursive : boolean
        recursively list children
    depth : int
        limit recursion to depth
    properties : string
        comma-separated list of properties to list, the name property will always be added
    type : string
        comma-separated list of types to display, where type is one of
        filesystem, snapshot, volume, bookmark, or all.
    sort : string
        property to sort on (default = name)
    order : string [ascending|descending]
        sort order (default = ascending)
    parsable : boolean
        display numbers in parsable (exact) values
        .. versionadded:: 2018.3.0

    .. versionadded:: 2015.5.0

    CLI Example:

    .. code-block:: bash

        salt '*' zfs.list
        salt '*' zfs.list myzpool/mydataset [recursive=True|False]
        salt '*' zfs.list myzpool/mydataset properties="sharenfs,mountpoint"

    '''
    ret = OrderedDict()

    ## update properties
    # NOTE: properties should be a list
    properties = kwargs.get('properties', 'used,avail,refer,mountpoint')
    if not isinstance(properties, list):
        properties = properties.split(',')

    # NOTE: name should be first property
    #       we loop here because there 'name' can be in the list
    #       multiple times.
    while 'name' in properties:
        properties.remove('name')
    properties.insert(0, 'name')

    ## Configure command
    # NOTE: initialize the defaults
    flags = ['-H', '-p']
    opts = {}

    # NOTE: set extra config from kwargs
    if kwargs.get('recursive', False):
        flags.append('-r')
    if kwargs.get('recursive', False) and kwargs.get('depth', False):
        opts['-d'] = kwargs.get('depth')
    if kwargs.get('type', False):
        opts['-t'] = kwargs.get('type')
    kwargs_sort = kwargs.get('sort', False)
    if kwargs_sort and kwargs_sort in properties:
        if kwargs.get('order', 'ascending').startswith('a'):
            opts['-s'] = kwargs_sort
        else:
            opts['-S'] = kwargs_sort
    if isinstance(properties, list):
        # NOTE: There can be only one -o and it takes a comma-seperated list
        opts['-o'] = ','.join(properties)
    else:
        opts['-o'] = properties

    ## parse zfs list
    res = __salt__['cmd.run_all'](
        __utils__['zfs.zfs_command'](
            command='list',
            flags=flags,
            opts=opts,
            target=name,
        ),
        python_shell=False,
    )
    if res['retcode'] == 0:
        for ds in res['stdout'].splitlines():
            if kwargs.get('parsable', True):
                ds_data = __utils__['zfs.from_auto_dict'](OrderedDict(
                    list(zip(properties, ds.split("\t")))), )
            else:
                ds_data = __utils__['zfs.to_auto_dict'](
                    OrderedDict(list(zip(properties, ds.split("\t")))),
                    convert_to_human=True,
                )

            ret[ds_data['name']] = ds_data
            del ret[ds_data['name']]['name']
    else:
        return __utils__['zfs.parse_command_result'](res)

    return ret
Beispiel #30
0
def get(*dataset, **kwargs):
    '''
    Displays properties for the given datasets.

    *dataset : string
        name of snapshot(s), filesystem(s), or volume(s)
    properties : string
        comma-separated list of properties to list, defaults to all
    recursive : boolean
        recursively list children
    depth : int
        recursively list children to depth
    fields : string
        comma-separated list of fields to include, the name and property field will always be added
    type : string
        comma-separated list of types to display, where type is one of
        filesystem, snapshot, volume, bookmark, or all.
    source : string
        comma-separated list of sources to display. Must be one of the following:
        local, default, inherited, temporary, and none. The default value is all sources.
    parsable : boolean
        display numbers in parsable (exact) values (default = True)
        .. versionadded:: 2018.3.0

    .. note::

        If no datasets are specified, then the command displays properties
        for all datasets on the system.

    .. versionadded:: 2016.3.0

    CLI Example:

    .. code-block:: bash

        salt '*' zfs.get
        salt '*' zfs.get myzpool/mydataset [recursive=True|False]
        salt '*' zfs.get myzpool/mydataset properties="sharenfs,mountpoint" [recursive=True|False]
        salt '*' zfs.get myzpool/mydataset myzpool/myotherdataset properties=available fields=value depth=1

    '''
    ## Configure command
    # NOTE: initialize the defaults
    flags = ['-H', '-p']
    opts = {}

    # NOTE: set extra config from kwargs
    if kwargs.get('depth', False):
        opts['-d'] = kwargs.get('depth')
    elif kwargs.get('recursive', False):
        flags.append('-r')
    fields = kwargs.get('fields', 'value,source').split(',')
    if 'name' in fields:  # ensure name is first
        fields.remove('name')
    if 'property' in fields:  # ensure property is second
        fields.remove('property')
    fields.insert(0, 'name')
    fields.insert(1, 'property')
    opts['-o'] = ",".join(fields)
    if kwargs.get('type', False):
        opts['-t'] = kwargs.get('type')
    if kwargs.get('source', False):
        opts['-s'] = kwargs.get('source')

    # NOTE: set property_name
    property_name = kwargs.get('properties', 'all')

    ## Get properties
    res = __salt__['cmd.run_all'](
        __utils__['zfs.zfs_command'](
            command='get',
            flags=flags,
            opts=opts,
            property_name=property_name,
            target=list(dataset),
        ),
        python_shell=False,
    )

    ret = __utils__['zfs.parse_command_result'](res)
    if res['retcode'] == 0:
        for ds in res['stdout'].splitlines():
            ds_data = OrderedDict(list(zip(fields, ds.split("\t"))))

            if 'value' in ds_data:
                if kwargs.get('parsable', True):
                    ds_data['value'] = __utils__['zfs.from_auto'](
                        ds_data['property'],
                        ds_data['value'],
                    )
                else:
                    ds_data['value'] = __utils__['zfs.to_auto'](
                        ds_data['property'],
                        ds_data['value'],
                        convert_to_human=True,
                    )

            if ds_data['name'] not in ret:
                ret[ds_data['name']] = OrderedDict()

            ret[ds_data['name']][ds_data['property']] = ds_data
            del ds_data['name']
            del ds_data['property']

    return ret
Beispiel #31
0
    def serialize(cls, id_=None):
        # The order matters
        serialized = OrderedDict()
        if id_ is not None:
            # This is meant as a configuration section, sub json schema
            serialized['id'] = '{0}/{1}.json#'.format(BASE_SCHEMA_URL, id_)
        else:
            # Main configuration block, json schema
            serialized['$schema'] = 'http://json-schema.org/draft-04/schema#'
        if cls.title is not None:
            serialized['title'] = cls.title
        if cls.description is not None:
            if cls.description == cls.__doc__:
                serialized['description'] = textwrap.dedent(cls.description).strip()
            else:
                serialized['description'] = cls.description

        required = []
        ordering = []
        serialized['type'] = 'object'
        properties = OrderedDict()
        cls.after_items_update = []
        for name in cls._order:
            skip_order = False
            if name in cls._sections:
                section = cls._sections[name]
                serialized_section = section.serialize(None if section.__flatten__ is True else name)
                if section.__flatten__ is True:
                    # Flatten the configuration section into the parent
                    # configuration
                    properties.update(serialized_section['properties'])
                    if 'x-ordering' in serialized_section:
                        ordering.extend(serialized_section['x-ordering'])
                    if 'required' in serialized_section:
                        required.extend(serialized_section['required'])
                    if hasattr(section, 'after_items_update'):
                        cls.after_items_update.extend(section.after_items_update)
                    skip_order = True
                else:
                    # Store it as a configuration section
                    properties[name] = serialized_section

            if name in cls._items:
                config = cls._items[name]
                # Handle the configuration items defined in the class instance
                if config.__flatten__ is True:
                    serialized_config = config.serialize()
                    cls.after_items_update.append(serialized_config)
                    skip_order = True
                else:
                    properties[name] = config.serialize()

                if config.required:
                    # If it's a required item, add it to the required list
                    required.append(name)

            if skip_order is False:
                # Store the order of the item
                if name not in ordering:
                    ordering.append(name)

        if properties:
            serialized['properties'] = properties

        # Update the serialized object with any items to include after properties
        if cls.after_items_update:
            after_items_update = {}
            for entry in cls.after_items_update:
                name, data = next(six.iteritems(entry))
                if name in after_items_update:
                    after_items_update[name].extend(data)
                else:
                    after_items_update[name] = data
            serialized.update(after_items_update)

        if required:
            # Only include required if not empty
            serialized['required'] = required
        if ordering:
            # Only include ordering if not empty
            serialized['x-ordering'] = ordering
        serialized['additionalProperties'] = cls.__allow_additional_items__
        return serialized
Beispiel #32
0
def list_all(
    prefix=None,
    app=None,
    owner=None,
    description_contains=None,
    name_not_contains=None,
    profile="splunk",
):
    """
    Get all splunk search details. Produces results that can be used to create
    an sls file.

    if app or owner are specified, results will be limited to matching saved
    searches.

    if description_contains is specified, results will be limited to those
    where "description_contains in description" is true if name_not_contains is
    specified, results will be limited to those where "name_not_contains not in
    name" is true.

    If prefix parameter is given, alarm names in the output will be prepended
    with the prefix; alarms that have the prefix will be skipped. This can be
    used to convert existing alarms to be managed by salt, as follows:

    CLI example:

            1. Make a "backup" of all existing searches
                $ salt-call splunk_search.list_all --out=txt | sed "s/local: //" > legacy_searches.sls

            2. Get all searches with new prefixed names
                $ salt-call splunk_search.list_all "prefix=**MANAGED BY SALT** " --out=txt | sed "s/local: //" > managed_searches.sls

            3. Insert the managed searches into splunk
                $ salt-call state.sls managed_searches.sls

            4.  Manually verify that the new searches look right

            5.  Delete the original searches
                $ sed s/present/absent/ legacy_searches.sls > remove_legacy_searches.sls
                $ salt-call state.sls remove_legacy_searches.sls

            6.  Get all searches again, verify no changes
                $ salt-call splunk_search.list_all --out=txt | sed "s/local: //" > final_searches.sls
                $ diff final_searches.sls managed_searches.sls
    """
    client = _get_splunk(profile)

    # splunklib doesn't provide the default settings for saved searches.
    # so, in order to get the defaults, we create a search with no
    # configuration, get that search, and then delete it. We use its contents
    # as the default settings
    name = "splunk_search.list_all get defaults"
    try:
        client.saved_searches.delete(name)
    except Exception:  # pylint: disable=broad-except
        pass
    search = client.saved_searches.create(name, search="nothing")
    defaults = dict(search.content)
    client.saved_searches.delete(name)

    # stuff that splunk returns but that you should not attempt to set.
    # cf http://dev.splunk.com/view/python-sdk/SP-CAAAEK2
    readonly_keys = (
        "triggered_alert_count",
        "action.email",
        "action.populate_lookup",
        "action.rss",
        "action.script",
        "action.summary_index",
        "qualifiedSearch",
        "next_scheduled_time",
    )

    results = OrderedDict()
    # sort the splunk searches by name, so we get consistent output
    searches = sorted([(s.name, s) for s in client.saved_searches])
    for name, search in searches:
        if app and search.access.app != app:
            continue
        if owner and search.access.owner != owner:
            continue
        if name_not_contains and name_not_contains in name:
            continue
        if prefix:
            if name.startswith(prefix):
                continue
            name = prefix + name
        # put name in the OrderedDict first
        d = [{"name": name}]
        # add the rest of the splunk settings, ignoring any defaults
        description = ""
        for (k, v) in sorted(search.content.items()):
            if k in readonly_keys:
                continue
            if k.startswith("display."):
                continue
            if not v:
                continue
            if k in defaults and defaults[k] == v:
                continue
            d.append({k: v})
            if k == "description":
                description = v
        if description_contains and description_contains not in description:
            continue
        results["manage splunk search " + name] = {"splunk_search.present": d}

    return salt.utils.yaml.safe_dump(results,
                                     default_flow_style=False,
                                     width=120)
Beispiel #33
0
def _dataset_present(dataset_type,
                     name,
                     volume_size=None,
                     sparse=False,
                     create_parent=False,
                     properties=None,
                     cloned_from=None):
    '''
    internal handler for filesystem_present/volume_present

    dataset_type : string
        volume or filesystem
    name : string
        name of volume
    volume_size : string
        size of volume
    sparse : boolean
        create sparse volume
    create_parent : boolean
        creates all the non-existing parent datasets.
        any property specified on the command line using the -o option is ignored.
    cloned_from : string
        name of snapshot to clone
    properties : dict
        additional zfs properties (-o)

    .. note::
        ``cloned_from`` is only use if the volume does not exist yet,
        when ``cloned_from`` is set after the volume exists it will be ignored.

    .. note::
        Properties do not get cloned, if you specify the properties in the state file
        they will be applied on a subsequent run.

        ``volume_size`` is considered a property, so the volume's size will be
        corrected when the properties get updated if it differs from the
        original volume.

        The sparse parameter is ignored when using ``cloned_from``.

    '''
    ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}

    ## fallback dataset_type to filesystem if out of range
    if dataset_type not in ['filesystem', 'volume']:
        dataset_type = 'filesystem'

    ## ensure properties are zfs values
    if volume_size:
        volume_size = __utils__['zfs.from_size'](volume_size)
    if properties:
        properties = __utils__['zfs.from_auto_dict'](properties)
    elif properties is None:
        properties = {}

    ## log configuration
    log.debug('zfs.%s_present::%s::config::volume_size = %s', dataset_type,
              name, volume_size)
    log.debug('zfs.%s_present::%s::config::sparse = %s', dataset_type, name,
              sparse)
    log.debug('zfs.%s_present::%s::config::create_parent = %s', dataset_type,
              name, create_parent)
    log.debug('zfs.%s_present::%s::config::cloned_from = %s', dataset_type,
              name, cloned_from)
    log.debug('zfs.%s_present::%s::config::properties = %s', dataset_type,
              name, properties)

    ## check we have valid filesystem name/volume name/clone snapshot
    if not __utils__['zfs.is_dataset'](name):
        ret['result'] = False
        ret['comment'] = 'invalid dataset name: {0}'.format(name)
        return ret

    if cloned_from and not __utils__['zfs.is_snapshot'](cloned_from):
        ret['result'] = False
        ret['comment'] = '{0} is not a snapshot'.format(cloned_from)
        return ret

    ## ensure dataset is in correct state
    ## NOTE: update the dataset
    if __salt__['zfs.exists'](name, **{'type': dataset_type}):
        ## NOTE: fetch current volume properties
        properties_current = __salt__['zfs.get'](
            name,
            type=dataset_type,
            fields='value',
            depth=0,
            parsable=True,
        ).get(name, OrderedDict())

        ## NOTE: add volsize to properties
        if volume_size:
            properties['volsize'] = volume_size

        ## NOTE: build list of properties to update
        properties_update = []
        for prop in properties:
            ## NOTE: skip unexisting properties
            if prop not in properties_current:
                log.warning(
                    'zfs.%s_present::%s::update - unknown property: %s',
                    dataset_type, name, prop)
                continue

            ## NOTE: compare current and wanted value
            if properties_current[prop]['value'] != properties[prop]:
                properties_update.append(prop)

        ## NOTE: update pool properties
        for prop in properties_update:
            if not __opts__['test']:
                mod_res = __salt__['zfs.set'](name, **{prop: properties[prop]})
            else:
                mod_res = OrderedDict([('set', True)])

            if mod_res['set']:
                if name not in ret['changes']:
                    ret['changes'][name] = {}
                ret['changes'][name][prop] = properties[prop]
            else:
                ret['result'] = False
                if ret['comment'] == '':
                    ret['comment'] = 'The following properties were not updated:'
                ret['comment'] = '{0} {1}'.format(ret['comment'], prop)

        ## NOTE: update comment
        if ret['result'] and name in ret['changes']:
            ret['comment'] = '{0} {1} was updated'.format(dataset_type, name)
        elif ret['result']:
            ret['comment'] = '{0} {1} is uptodate'.format(dataset_type, name)
        else:
            ret['comment'] = '{0} {1} failed to be updated'.format(
                dataset_type, name)

    ## NOTE: create or clone the dataset
    else:
        mod_res_action = 'cloned' if cloned_from else 'created'
        if __opts__['test']:
            ## NOTE: pretend to create/clone
            mod_res = OrderedDict([
                (mod_res_action, True),
            ])
        elif cloned_from:
            ## NOTE: add volsize to properties
            if volume_size:
                properties['volsize'] = volume_size

            ## NOTE: clone the dataset
            mod_res = __salt__['zfs.clone'](cloned_from, name, **{
                'create_parent': create_parent,
                'properties': properties,
            })
        else:
            ## NOTE: create the dataset
            mod_res = __salt__['zfs.create'](name, **{
                'create_parent': create_parent,
                'properties': properties,
                'volume_size': volume_size,
                'sparse': sparse,
            })

        ret['result'] = mod_res[mod_res_action]
        if ret['result']:
            ret['changes'][name] = mod_res_action
            if properties:
                ret['changes'][name] = properties
            ret['comment'] = '{0} {1} was {2}'.format(
                dataset_type,
                name,
                mod_res_action,
            )
        else:
            ret['comment'] = 'failed to {0} {1} {2}'.format(
                mod_res_action[:-1],
                dataset_type,
                name,
            )
            if 'error' in mod_res:
                ret['comment'] = mod_res['error']

    return ret
Beispiel #34
0
    def test_present_create_passthrough_fail(self):
        """
        Test zpool present with non existing pool (without a layout)
        """
        ret = {
            "name":
            "myzpool",
            "result":
            False,
            "comment":
            "\n".join([
                "invalid vdev specification",
                "use 'force=True' to override the following errors:",
                "/data/salt/vdisk0 is part of exported pool 'zsalt'",
                "/data/salt/vdisk1 is part of exported pool 'zsalt'",
            ]),
            "changes": {},
        }

        config = {
            "force": False,
            "import": False,
        }
        layout = [
            OrderedDict([("mirror", ["disk0", "disk1"])]),
            OrderedDict([("mirror", ["disk2", "disk3"])]),
        ]
        properties = {
            "autoexpand": True,
        }
        filesystem_properties = {
            "quota": "5G",
        }

        mock_exists = MagicMock(return_value=False)
        mock_create = MagicMock(return_value=OrderedDict([
            ("created", False),
            (
                "error",
                "\n".join([
                    "invalid vdev specification",
                    "use 'force=True' to override the following errors:",
                    "/data/salt/vdisk0 is part of exported pool 'zsalt'",
                    "/data/salt/vdisk1 is part of exported pool 'zsalt'",
                ]),
            ),
        ]))
        with patch.dict(zpool.__salt__,
                        {"zpool.exists": mock_exists}), patch.dict(
                            zpool.__salt__,
                            {"zpool.create": mock_create}), patch.dict(
                                zpool.__utils__, self.utils_patch):
            self.assertEqual(
                zpool.present(
                    "myzpool",
                    config=config,
                    layout=layout,
                    properties=properties,
                    filesystem_properties=filesystem_properties,
                ),
                ret,
            )
Beispiel #35
0
            "mode": "0755"
        },
        {
            "require": [{
                "file": "/usr/local/bin"
            }]
        },
        {
            "source": "salt://debian/files/pydmesg.py"
        },
        {
            "user": "******"
        },
    ]
}
pydmesg_salt_expected = OrderedDict([("/usr/local/bin/pydmesg",
                                      pydmesg_expected)])
pydmesg_kwargs = dict(user="******",
                      group="root",
                      mode="0755",
                      source="salt://debian/files/pydmesg.py")

basic_template = """#!pyobjects
File.directory('/tmp', mode='1777', owner='root', group='root')
"""

invalid_template = """#!pyobjects
File.fail('/tmp')
"""

include_template = """#!pyobjects
include('http')
Beispiel #36
0
def managed(name, entries, connect_spec=None):
    '''Ensure the existance (or not) of LDAP entries and their attributes

    Example:

    .. code-block:: yaml

        ldapi:///:
          ldap.managed:
            - connect_spec:
                bind:
                  method: sasl

            - entries:

              # make sure the entry doesn't exist
              - cn=foo,ou=users,dc=example,dc=com:
                - delete_others: True

              # make sure the entry exists with only the specified
              # attribute values
              - cn=admin,dc=example,dc=com:
                - delete_others: True
                - replace:
                    cn:
                      - admin
                    description:
                      - LDAP administrator
                    objectClass:
                      - simpleSecurityObject
                      - organizationalRole
                    userPassword:
                      - {{pillar.ldap_admin_password}}

              # make sure the entry exists, its olcRootDN attribute
              # has only the specified value, the olcRootDN attribute
              # doesn't exist, and all other attributes are ignored
              - 'olcDatabase={1}hdb,cn=config':
                - replace:
                    olcRootDN:
                      - cn=admin,dc=example,dc=com
                    # the admin entry has its own password attribute
                    olcRootPW: []

              # note the use of 'default'.  also note how you don't
              # have to use list syntax if there is only one attribute
              # value
              - cn=foo,ou=users,dc=example,dc=com:
                - delete_others: True
                - default:
                    userPassword: changeme
                    shadowLastChange: 0
                    # keep sshPublicKey if present, but don't create
                    # the attribute if it is missing
                    sshPublicKey: []
                - replace:
                    cn: foo
                    uid: foo
                    uidNumber: 1000
                    gidNumber: 1000
                    gecos: Foo Bar
                    givenName: Foo
                    sn: Bar
                    homeDirectory: /home/foo
                    loginShell: /bin/bash
                    objectClass:
                      - inetOrgPerson
                      - posixAccount
                      - top
                      - ldapPublicKey
                      - shadowAccount

    :param name:
        The URL of the LDAP server.  This is ignored if
        ``connect_spec`` is either a connection object or a dict with
        a ``'url'`` entry.

    :param entries:
        A description of the desired state of zero or more LDAP
        entries.

        ``entries`` is an iterable of dicts.  Each of these dict's
        keys are the distinguished names (DNs) of LDAP entries to
        manage.  Each of these dicts is processed in order.  A later
        dict can reference an LDAP entry that was already mentioned in
        an earlier dict, which makes it possible for later dicts to
        enhance or alter the desired state of an LDAP entry.

        The DNs are mapped to a description of the LDAP entry's
        desired state.  These LDAP entry descriptions are themselves
        iterables of dicts.  Each dict in the iterable is processed in
        order.  They contain directives controlling the entry's state.
        The key names the directive type and the value is state
        information for the directive.  The specific structure of the
        state information depends on the directive type.

        The structure of ``entries`` looks like this::

            [{dn1: [{directive1: directive1_state,
                     directive2: directive2_state},
                    {directive3: directive3_state}],
              dn2: [{directive4: directive4_state,
                     directive5: directive5_state}]},
             {dn3: [{directive6: directive6_state}]}]

        These are the directives:

        * ``'delete_others'``
            Boolean indicating whether to delete attributes not
            mentioned in this dict or any of the other directive
            dicts for this DN.  Defaults to ``False``.

            If you don't want to delete an attribute if present, but
            you also don't want to add it if it is missing or modify
            it if it is present, you can use either the ``'default'``
            directive or the ``'add'`` directive with an empty value
            list.

        * ``'default'``
            A dict mapping an attribute name to an iterable of default
            values for that attribute.  If the attribute already
            exists, it is left alone.  If not, it is created using the
            given list of values.

            An empty value list is useful when you don't want to
            create an attribute if it is missing but you do want to
            preserve it if the ``'delete_others'`` key is ``True``.

        * ``'add'``
            Attribute values to add to the entry.  This is a dict
            mapping an attribute name to an iterable of values to add.

            An empty value list is useful when you don't want to
            create an attribute if it is missing but you do want to
            preserve it if the ``'delete_others'`` key is ``True``.

        * ``'delete'``
            Attribute values to remove from the entry.  This is a dict
            mapping an attribute name to an iterable of values to
            delete from the attribute.  If the iterable is empty, all
            of the attribute's values are deleted.

        * ``'replace'``
            Attributes to replace.  This is a dict mapping an
            attribute name to an iterable of values.  Any existing
            values for the attribute are deleted, then the given
            values are added.  The iterable may be empty.

        In the above directives, the iterables of attribute values may
        instead be ``None``, in which case an empty list is used, or a
        scalar such as a string or number, in which case a new list
        containing the scalar is used.

        Note that if all attribute values are removed from an entry,
        the entire entry is deleted.

    :param connect_spec:
        See the description of the ``connect_spec`` parameter of the
        :py:func:`ldap3.connect <salt.modules.ldap3.connect>` function
        in the :py:mod:`ldap3 <salt.modules.ldap3>` execution module.
        If this is a dict and the ``'url'`` entry is not specified,
        the ``'url'`` entry is set to the value of the ``name``
        parameter.

    :returns:
        A dict with the following keys:

        * ``'name'``
            This is the same object passed to the ``name`` parameter.

        * ``'changes'``
            This is a dict describing the changes made (or, in test
            mode, the changes that would have been attempted).  If no
            changes were made (or no changes would have been
            attempted), then this dict is empty.  Only successful
            changes are included.

            Each key is a DN of an entry that was changed (or would
            have been changed).  Entries that were not changed (or
            would not have been changed) are not included.  The value
            is a dict with two keys:

            * ``'old'``
                The state of the entry before modification.  If the
                entry did not previously exist, this key maps to
                ``None``.  Otherwise, the value is a dict mapping each
                of the old entry's attributes to a list of its values
                before any modifications were made.  Unchanged
                attributes are excluded from this dict.

            * ``'new'``
                The state of the entry after modification.  If the
                entry was deleted, this key maps to ``None``.
                Otherwise, the value is a dict mapping each of the
                entry's attributes to a list of its values after the
                modifications were made.  Unchanged attributes are
                excluded from this dict.

            Example ``'changes'`` dict where a new entry was created
            with a single attribute containing two values::

                {'dn1': {'old': None,
                         'new': {'attr1': ['val1', 'val2']}}}

            Example ``'changes'`` dict where a new attribute was added
            to an existing entry::

                {'dn1': {'old': {},
                         'new': {'attr2': ['val3']}}}

        * ``'result'``
            One of the following values:

            * ``True`` if no changes were necessary or if all changes
              were applied successfully.
            * ``False`` if at least one change was unable to be applied.
            * ``None`` if changes would be applied but it is in test
              mode.
    '''
    if connect_spec is None:
        connect_spec = {}
    try:
        connect_spec.setdefault('url', name)
    except AttributeError:
        # already a connection object
        pass

    connect = __salt__['ldap3.connect']

    # hack to get at the ldap3 module to access the ldap3.LDAPError
    # exception class.  https://github.com/saltstack/salt/issues/27578
    ldap3 = inspect.getmodule(connect)

    with connect(connect_spec) as l:

        old, new = _process_entries(l, entries)

        # collect all of the affected entries (only the key is
        # important in this dict; would have used an OrderedSet if
        # there was one)
        dn_set = OrderedDict()
        dn_set.update(old)
        dn_set.update(new)

        # do some cleanup
        dn_to_delete = set()
        for dn in dn_set:
            o = old.get(dn, {})
            n = new.get(dn, {})
            for x in o, n:
                to_delete = set()
                for attr, vals in six.iteritems(x):
                    if not len(vals):
                        # clean out empty attribute lists
                        to_delete.add(attr)
                for attr in to_delete:
                    del x[attr]
            if o == n:
                # clean out unchanged entries
                dn_to_delete.add(dn)
        for dn in dn_to_delete:
            for x in old, new:
                x.pop(dn, None)
            del dn_set[dn]

        ret = {
            'name': name,
            'changes': {},
            'result': None,
            'comment': '',
        }

        if old == new:
            ret['comment'] = 'LDAP entries already set'
            ret['result'] = True
            return ret

        if __opts__['test']:
            ret['comment'] = 'Would change LDAP entries'
            changed_old = old
            changed_new = new
            success_dn_set = dn_set
        else:
            # execute the changes
            changed_old = OrderedDict()
            changed_new = OrderedDict()
            # assume success; these will be changed on error
            ret['result'] = True
            ret['comment'] = 'Successfully updated LDAP entries'
            errs = []
            success_dn_set = OrderedDict()
            for dn in dn_set:
                o = old.get(dn, {})
                n = new.get(dn, {})

                try:
                    # perform the operation
                    if len(o):
                        if len(n):
                            op = 'modify'
                            assert o != n
                            __salt__['ldap3.change'](l, dn, o, n)
                        else:
                            op = 'delete'
                            __salt__['ldap3.delete'](l, dn)
                    else:
                        op = 'add'
                        assert len(n)
                        __salt__['ldap3.add'](l, dn, n)

                    # update these after the op in case an exception
                    # is raised
                    changed_old[dn] = o
                    changed_new[dn] = n
                    success_dn_set[dn] = True
                except ldap3.LDAPError:
                    log.exception('failed to %s entry %s', op, dn)
                    errs.append((op, dn))
                    continue

            if len(errs):
                ret['result'] = False
                ret['comment'] = 'failed to ' \
                                 + ', '.join((op + ' entry ' + dn
                                              for op, dn in errs))

    # set ret['changes'].  filter out any unchanged attributes, and
    # convert the value sets to lists before returning them to the
    # user (sorted for easier comparisons)
    for dn in success_dn_set:
        o = changed_old.get(dn, {})
        n = changed_new.get(dn, {})
        changes = {}
        ret['changes'][dn] = changes
        for x, xn in ((o, 'old'), (n, 'new')):
            if not len(x):
                changes[xn] = None
                continue
            changes[xn] = dict(((attr, sorted(vals))
                                for attr, vals in six.iteritems(x)
                                if o.get(attr, ()) != n.get(attr, ())))

    return ret
Beispiel #37
0
def ovh_set_nss(domain, dnss, dnssec=False):
    register_checks()
    log.info('{0}: Setting up DNS'.format(domain))
    if not dnss:
        raise ValueError('no dns for {0}'.format(domain))
    _s = __salt__
    crets = OrderedDict()
    client = __salt__['mc_provider.ovh_client'](domain=domain)
    dnssec_ep = '/domain/zone/{0}/dnssec'.format(domain)
    dnssec_status = client.get(dnssec_ep)['status'] != 'disabled'
    if dnssec:
        if not dnssec_status:
            client.post(dnssec_ep)
            log.info('{1}: activated dnssec')
    else:
        if dnssec_status:
            client.delete(dnssec_ep)
            log.info('{1}: deactivated dnssec')
    hosted_status = client.get('/domain/{0}'.format(domain))
    if hosted_status['nameServerType'] == 'hosted':
        client.put('/domain/{0}'.format(domain), nameServerType='external')
    current_nss = {}
    todelete = set()
    skip = []
    crets['skipped'] = OrderedDict()
    for nsid in client.get('/domain/{0}/nameServer'.format(domain)):
        ns = client.get('/domain/{0}/nameServer/{1}'.format(domain, nsid))
        host = ns['host']
        dns = current_nss.setdefault(host, {})
        cns = current_nss[host] = __salt__['mc_utils.dictupdate'](dns, ns)
        if host not in dnss:
            todelete.add(nsid)
        if host in dnss and cns['ip']:
            if cns['ip'] == dnss[host][0]:
                crets['skipped'][host] = cns
                skip.append(host)
            else:
                todelete.add(nsid)

    def remove_remaining(remaining, log=False):
        can_delete = True
        for ns in [a for a in todelete]:
            if not can_delete:
                continue
            ret = client.get('/domain/{0}/nameServer/{1}'.format(domain, ns))
            if ret['toDelete']:
                continue
            try:
                client.delete('/domain/{0}/nameServer/{1}'.format(domain, ns))
                log.info('{1}: deleted ns: {0}'.format(ns, domain))
                todelete.remove(ns)
            except (Exception, ) as exc:
                if log:
                    print(exc)
                    log.error(traceback.format_exc())
                can_delete = False
                continue
        return todelete

    todelete = remove_remaining(todelete)
    for ns, data in six.iteritems(dnss):
        if ns in skip:
            continue
        nameservers = [{'host': ns, 'ip': a} for a in data]
        crets['ns_{0}'.format(ns)] = ret = client.post(
            '/domain/{0}/nameServer'.format(domain), nameServer=nameservers)
        if ret['status'] != 'todo':
            log.error("{0} unexpected result".format(ns))
            log.error(pprint.pformat(ret))
        else:
            log.info('{1}: Created ns: {0}'.format(ns, domain))
        # try to delete remaining deleted servers
        todelete = remove_remaining(todelete)
    todelete = remove_remaining(todelete, log=True)
    if todelete:
        log.error('{0}: {1} were not deleted'.format(ns, todelete))
    return crets
Beispiel #38
0
def _absent(name,
            dataset_type,
            force=False,
            recursive=False,
            recursive_all=False):
    '''
    internal shared function for *_absent

    name : string
        name of dataset
    dataset_type : string [filesystem, volume, snapshot, or bookmark]
        type of dataset to remove
    force : boolean
        try harder to destroy the dataset
    recursive : boolean
        also destroy all the child datasets
    recursive_all : boolean
        recursively destroy all dependents, including cloned file systems
        outside the target hierarchy. (-R)

    '''
    ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}

    ## log configuration
    dataset_type = dataset_type.lower()
    log.debug('zfs.%s_absent::%s::config::force = %s', dataset_type, name,
              force)
    log.debug('zfs.%s_absent::%s::config::recursive = %s', dataset_type, name,
              recursive)

    ## destroy dataset if needed
    if __salt__['zfs.exists'](name, **{'type': dataset_type}):
        ## NOTE: dataset found with the name and dataset_type
        if not __opts__['test']:
            mod_res = __salt__['zfs.destroy'](name, **{
                'force': force,
                'recursive': recursive,
                'recursive_all': recursive_all
            })
        else:
            mod_res = OrderedDict([('destroyed', True)])

        ret['result'] = mod_res['destroyed']
        if ret['result']:
            ret['changes'][name] = 'destroyed'
            ret['comment'] = '{0} {1} was destroyed'.format(
                dataset_type,
                name,
            )
        else:
            ret['comment'] = 'failed to destroy {0} {1}'.format(
                dataset_type,
                name,
            )
            if 'error' in mod_res:
                ret['comment'] = mod_res['error']
    else:
        ## NOTE: no dataset found with name of the dataset_type
        ret['comment'] = '{0} {1} is absent'.format(dataset_type, name)

    return ret
Beispiel #39
0
def gandi_glues(domain, dnss):
    register_checks()
    _s = __salt__
    api, apikey = _s['mc_provider.gandi_client'](domain=domain)
    current_glues = api.domain.host.list(apikey, domain)
    ret = OrderedDict()
    ret['add'] = OrderedDict()
    ret['update'] = OrderedDict()
    ret['delete'] = OrderedDict()
    ret['ns'] = OrderedDict()
    toadd = {}
    toupdate = {}
    todelete = {}
    for a in current_glues:
        if a['name'] not in dnss:
            ips = todelete.setdefault(a['name'], [])
            for ip in a['ips']:
                if ip not in ips:
                    ips.append(ip)
    for a in current_glues:
        if a['name'] in dnss:
            aips = dnss[a['name']][:]
            aips.sort()
            bips = a['ips'][:]
            bips.sort()
            if aips != bips:
                toupdate[a['name']] = aips
    for dns in dnss:
        if dns not in [a['name'] for a in current_glues]:
            toadd[dns] = dnss[dns][:]
    error = False
    for ns, ips in six.iteritems(toadd):
        try:
            ret['add'][ns] = api.domain.host.create(apikey, ns, ips)
            assert ret['add'][ns]['errortype'] is None
        except KeyboardInterrupt:
            raise
        except Exception:
            print(traceback.format_exc())
            error = True
    if not error:
        for ns, ips in six.iteritems(toupdate):
            try:
                ret['update'][ns] = api.domain.host.update(apikey, ns, ips)
                assert ret['update'][ns]['errortype'] is None
            except KeyboardInterrupt:
                raise
            except Exception:
                print(traceback.format_exc())
                error = True
    if not error:
        try:
            ret['ns'] = api.domain.nameservers.set(apikey, domain,
                                                   [a for a in dnss])
            assert ret['ns']['errortype'] is None
        except KeyboardInterrupt:
            raise
        except Exception:
            print(traceback.format_exc())
            error = True
    if not error:
        for ns, ips in six.iteritems(todelete):
            try:
                ret['delete'][ns] = api.domain.host.delete(apikey, ns)
                assert ret['delete'][ns]['errortype'] is None
            except KeyboardInterrupt:
                raise
            except Exception:
                print(traceback.format_exc())
                error = True
    ret['error'] = error
    return ret
class PathInfoDict(fileinfo.FileInfo):
    def __init__(self, match_each=True, **patterns):
        '''
        match_each:
            If True, each file path is matched which prevents uses less memory
            but sacrifices performance a little bit.  If False, the complete
            list is matched after all the file information has been added to
            pathinfo.

        patterns:
            Contains the patterns to match.
            Example:
                { 'saltenv': 'base', 'relpath': ['*.sls'] }
        '''
        super(PathInfoDict, self).__init__(
            fields=PATHINFO_FIELDS,
            match_each=match_each,
            **patterns
        )
        self._elements = OrderedDict()

    @property
    def as_sequence(self):
        if self.pattern and not self.match_each:
            return list(
                matcher.ifilter(
                    self._elements.values(),
                    _pattern=self.pattern
                )
            )
        return self._elements.values()

    def element(self, root=None, abspath=None, **kwargs):
        '''
        kwargs contain extra information for custom methods.

        This method must return a valid empty object if no vars are passed
        to allow introspection to create patterns.

        :param root:
        :param abspath:
        '''
        if root is None and abspath is None:
            root = os.path.abspath('.')
            abspath = os.path.abspath('.')
        relpath = os.path.relpath(abspath, root)

        try:
            element = self._elements.get(relpath, OrderedDict())
        except AttributeError:
            element = OrderedDict()

        if not element:
            for field in PATHINFO_FIELDS:
                element.setdefault(field, '')
            element['saltenv'] = kwargs.get('saltenv', 'base')
            element['relpath'] = relpath
            element['abspath'] = abspath
            element['is_pillar'] = kwargs.get('is_pillar', False)

        cachedir = kwargs.get('cachedir', '')
        if cachedir and os.path.commonprefix([abspath, cachedir]) == cachedir:
            element['cache_root'] = root
        else:
            element['file_root'] = root

        element_hook = kwargs.get('_element_hook', None)
        if element_hook:
            element = element_hook(self, element, **kwargs)

        return element

    def add_element(self, element, **kwargs):
        add_hook = kwargs.get('_add_hook', None)
        if add_hook:
            element = add_hook(self, element, **kwargs)

        if element['relpath'] not in self._elements:
            self._elements[element['relpath']] = element

    def filelist(self, roots, **kwargs):
        '''
        :param roots:
            file_roots, pillar_roots, cache_roots, etc to walk.

        kwargs:
            Contains any extra variables to pass to element.

        '''
        for env, destdirs in six.iteritems(roots):
            kwargs['saltenv'] = env
            super(PathInfoDict, self).filelist(destdirs, **kwargs)

        return self.as_sequence
Beispiel #41
0
    def test_present_update_nochange_success(self):
        """
        Test zpool present with non existing pool
        """
        ret = {
            "name": "myzpool",
            "result": True,
            "comment": "no update needed",
            "changes": {},
        }

        config = {
            "import": False,
        }
        layout = [
            OrderedDict([("mirror", ["disk0", "disk1"])]),
            OrderedDict([("mirror", ["disk2", "disk3"])]),
        ]
        properties = {
            "autoexpand": True,
        }

        mock_exists = MagicMock(return_value=True)
        mock_get = MagicMock(return_value=OrderedDict([
            ("comment", "salt managed pool"),
            ("freeing", 0),
            ("listsnapshots", False),
            ("leaked", 0),
            ("feature@obsolete_counts", "enabled"),
            ("feature@sha512", "enabled"),
            ("delegation", True),
            ("dedupditto", "0"),
            ("dedupratio", "1.00x"),
            ("autoexpand", True),
            ("feature@bookmarks", "enabled"),
            ("allocated", 115712),
            ("guid", 1591906802560842214),
            ("feature@large_blocks", "enabled"),
            ("size", 2113929216),
            ("feature@enabled_txg", "active"),
            ("feature@hole_birth", "active"),
            ("capacity", 0),
            ("feature@multi_vdev_crash_dump", "enabled"),
            ("feature@extensible_dataset", "enabled"),
            ("cachefile", "-"),
            ("bootfs", "-"),
            ("autoreplace", True),
            ("readonly", False),
            ("version", "-"),
            ("health", "ONLINE"),
            ("expandsize", "-"),
            ("feature@embedded_data", "active"),
            ("feature@lz4_compress", "active"),
            ("feature@async_destroy", "enabled"),
            ("feature@skein", "enabled"),
            ("feature@empty_bpobj", "enabled"),
            ("feature@spacemap_histogram", "active"),
            ("bootsize", "-"),
            ("free", 2113813504),
            ("feature@device_removal", "enabled"),
            ("failmode", "wait"),
            ("feature@filesystem_limits", "enabled"),
            ("feature@edonr", "enabled"),
            ("altroot", "-"),
            ("fragmentation", "0%"),
        ]))
        with patch.dict(zpool.__salt__,
                        {"zpool.exists": mock_exists}), patch.dict(
                            zpool.__salt__,
                            {"zpool.get": mock_get}), patch.dict(
                                zpool.__utils__, self.utils_patch):
            self.assertEqual(
                zpool.present(
                    "myzpool",
                    config=config,
                    layout=layout,
                    properties=properties,
                ),
                ret,
            )

        # Run state with test=true
        ret = {
            "name": "myzpool",
            "result": True,
            "comment": "storage pool myzpool is uptodate",
            "changes": {},
        }

        with patch.dict(zpool.__salt__,
                        {"zpool.exists": mock_exists}), patch.dict(
                            zpool.__salt__,
                            {"zpool.get": mock_get}), patch.dict(
                                zpool.__utils__, self.utils_patch), patch.dict(
                                    zpool.__opts__, {"test": True}):
            self.assertEqual(
                zpool.present(
                    "myzpool",
                    config=config,
                    layout=layout,
                    properties=properties,
                ),
                ret,
            )
Beispiel #42
0
def scheduled_snapshot(name, prefix, recursive=True, schedule=None):
    '''
    maintain a set of snapshots based on a schedule

    name : string
        name of filesystem or volume
    prefix : string
        prefix for the snapshots
        e.g. 'test' will result in snapshots being named 'test-yyyymmdd_hhmm'
    recursive : boolean
        create snapshots for all children also
    schedule : dict
        dict holding the schedule, the following keys are available (minute, hour,
        day, month, and year) by default all are set to 0 the value indicated the
        number of snapshots of that type to keep around.

    .. warning::

        snapshots will only be created and pruned every time the state runs.
        a schedule must be setup to automatically run the state. this means that if
        you run the state daily the hourly snapshot will only be made once per day!

    .. versionchanged:: 2018.3.0

        switched to localtime from gmtime so times now take into account timezones.

    '''
    ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}

    ## initialize defaults
    schedule_holds = ['minute', 'hour', 'day', 'month', 'year']
    snapshots = OrderedDict([
        ('_create', OrderedDict()),
        ('_prunable', []),
        ('_schedule', OrderedDict()),
    ])

    ## strict configuration validation
    ## NOTE: we need a valid dataset
    if not __utils__['zfs.is_dataset'](name):
        ret['result'] = False
        ret['comment'] = 'invalid dataset name: {0}'.format(name)

    if not __salt__['zfs.exists'](name, **{'type': 'filesystem,volume'}):
        ret['comment'] = 'dataset {0} does not exist'.format(name)
        ret['result'] = False

    ## NOTE: prefix must be 4 or longer
    if not prefix or len(prefix) < 4:
        ret['comment'] = 'prefix ({0}) must be at least 4 long'.format(prefix)
        ret['result'] = False

    ## NOTE: validate schedule
    total_count = 0
    for hold in schedule_holds:
        snapshots[hold] = []
        if hold not in schedule:
            snapshots['_schedule'][hold] = 0
        elif isinstance(schedule[hold], int):
            snapshots['_schedule'][hold] = schedule[hold]
        else:
            ret['result'] = False
            ret['comment'] = 'schedule value for {0} is not an integer'.format(
                hold, )
            break
        total_count += snapshots['_schedule'][hold]
    if ret['result'] and total_count == 0:
        ret['result'] = False
        ret['comment'] = 'schedule is not valid, you need to keep atleast 1 snapshot'

    ## NOTE: return if configuration is not valid
    if not ret['result']:
        return ret

    ## retrieve existing snapshots
    snapshots = _schedule_snapshot_retrieve(name, prefix, snapshots)

    ## prepare snapshot
    snapshots = _schedule_snapshot_prepare(name, prefix, snapshots)

    ## log configuration
    log.debug('zfs.scheduled_snapshot::%s::config::recursive = %s', name,
              recursive)
    log.debug('zfs.scheduled_snapshot::%s::config::prefix = %s', name, prefix)
    log.debug('zfs.scheduled_snapshot::%s::snapshots = %s', name, snapshots)

    ## create snapshot(s)
    for snapshot_name, snapshot_holds in snapshots['_create'].items():
        ## NOTE: skip if new snapshot has no holds
        if not snapshot_holds:
            continue

        ## NOTE: create snapshot
        if not __opts__['test']:
            mod_res = __salt__['zfs.snapshot'](snapshot_name, **{
                'recursive': recursive
            })
        else:
            mod_res = OrderedDict([('snapshotted', True)])

        if not mod_res['snapshotted']:
            ret['result'] = False
            ret['comment'] = 'error creating snapshot ({0})'.format(
                snapshot_name)
        else:
            ## NOTE: create holds (if we have a snapshot)
            for hold in snapshot_holds:
                if not __opts__['test']:
                    mod_res = __salt__['zfs.hold'](hold, snapshot_name, **{
                        'recursive': recursive
                    })
                else:
                    mod_res = OrderedDict([('held', True)])

                if not mod_res['held']:
                    ret['result'] = False
                    ret['comment'] = "error adding hold ({0}) to snapshot ({1})".format(
                        hold,
                        snapshot_name,
                    )
                    break

                snapshots[hold].append(snapshot_name)

        if ret['result']:
            ret['comment'] = 'scheduled snapshots updated'
            if 'created' not in ret['changes']:
                ret['changes']['created'] = []
            ret['changes']['created'].append(snapshot_name)

    ## prune hold(s)
    for hold, hold_count in snapshots['_schedule'].items():
        while ret['result'] and len(snapshots[hold]) > hold_count:
            ## NOTE: pop oldest snapshot
            snapshot_name = snapshots[hold].pop(0)

            ## NOTE: release hold for snapshot
            if not __opts__['test']:
                mod_res = __salt__['zfs.release'](hold, snapshot_name, **{
                    'recursive': recursive
                })
            else:
                mod_res = OrderedDict([('released', True)])

            if not mod_res['released']:
                ret['result'] = False
                ret['comment'] = "error adding hold ({0}) to snapshot ({1})".format(
                    hold,
                    snapshot_name,
                )

            ## NOTE: mark as prunable
            if not __salt__['zfs.holds'](snapshot_name):
                snapshots['_prunable'].append(snapshot_name)

    ## prune snapshot(s)
    for snapshot_name in snapshots['_prunable']:
        ## NOTE: destroy snapshot
        if not __opts__['test']:
            mod_res = __salt__['zfs.destroy'](snapshot_name, **{
                'recursive': recursive
            })
        else:
            mod_res = OrderedDict([('destroyed', True)])

        if not mod_res['destroyed']:
            ret['result'] = False
            ret['comment'] = "error prunding snapshot ({1})".format(
                snapshot_name, )
            break

    if ret['result'] and snapshots['_prunable']:
        ret['comment'] = 'scheduled snapshots updated'
        ret['changes']['pruned'] = snapshots['_prunable']

    if ret['result'] and not ret['changes']:
        ret['comment'] = 'scheduled snapshots are up to date'

    return ret
Beispiel #43
0
def _process_entries(l, entries):
    '''Helper for managed() to process entries and return before/after views

    Collect the current database state and update it according to the
    data in :py:func:`managed`'s ``entries`` parameter.  Return the
    current database state and what it will look like after
    modification.

    :param l:
        the LDAP connection object

    :param entries:
        the same object passed to the ``entries`` parameter of
        :py:func:`manage`

    :return:
        an ``(old, new)`` tuple that describes the current state of
        the entries and what they will look like after modification.
        Each item in the tuple is an OrderedDict that maps an entry DN
        to another dict that maps an attribute name to a set of its
        values (it's a set because according to the LDAP spec,
        attribute value ordering is unspecified and there can't be
        duplicates).  The structure looks like this:

            {dn1: {attr1: set([val1])},
             dn2: {attr1: set([val2]), attr2: set([val3, val4])}}

        All of an entry's attributes and values will be included, even
        if they will not be modified.  If an entry mentioned in the
        entries variable doesn't yet exist in the database, the DN in
        ``old`` will be mapped to an empty dict.  If an entry in the
        database will be deleted, the DN in ``new`` will be mapped to
        an empty dict.  All value sets are non-empty:  An attribute
        that will be added to an entry is not included in ``old``, and
        an attribute that will be deleted frm an entry is not included
        in ``new``.

        These are OrderedDicts to ensure that the user-supplied
        entries are processed in the user-specified order (in case
        there are dependencies, such as ACL rules specified in an
        early entry that make it possible to modify a later entry).
    '''

    old = OrderedDict()
    new = OrderedDict()

    for entries_dict in entries:
        for dn, directives_seq in six.iteritems(entries_dict):
            # get the old entry's state.  first check to see if we've
            # previously processed the entry.
            olde = new.get(dn, None)
            if olde is None:
                # next check the database
                results = __salt__['ldap3.search'](l, dn, 'base')
                if len(results) == 1:
                    attrs = results[dn]
                    olde = dict(((attr, set(attrs[attr]))
                                 for attr in attrs
                                 if len(attrs[attr])))
                else:
                    # nothing, so it must be a brand new entry
                    assert len(results) == 0
                    olde = {}
                old[dn] = olde
            # copy the old entry to create the new (don't do a simple
            # assignment or else modifications to newe will affect
            # olde)
            newe = copy.deepcopy(olde)
            new[dn] = newe

            # process the directives
            entry_status = {
                'delete_others': False,
                'mentioned_attributes': set(),
            }
            for directives in directives_seq:
                _update_entry(newe, entry_status, directives)
            if entry_status['delete_others']:
                to_delete = set()
                for attr in newe:
                    if attr not in entry_status['mentioned_attributes']:
                        to_delete.add(attr)
                for attr in to_delete:
                    del newe[attr]
    return old, new
Beispiel #44
0
def _process_entries(l, entries):
    '''Helper for managed() to process entries and return before/after views

    Collect the current database state and update it according to the
    data in :py:func:`managed`'s ``entries`` parameter.  Return the
    current database state and what it will look like after
    modification.

    :param l:
        the LDAP connection object

    :param entries:
        the same object passed to the ``entries`` parameter of
        :py:func:`manage`

    :return:
        an ``(old, new)`` tuple that describes the current state of
        the entries and what they will look like after modification.
        Each item in the tuple is an OrderedDict that maps an entry DN
        to another dict that maps an attribute name to a set of its
        values (it's a set because according to the LDAP spec,
        attribute value ordering is unspecified and there can't be
        duplicates).  The structure looks like this:

            {dn1: {attr1: set([val1])},
             dn2: {attr1: set([val2]), attr2: set([val3, val4])}}

        All of an entry's attributes and values will be included, even
        if they will not be modified.  If an entry mentioned in the
        entries variable doesn't yet exist in the database, the DN in
        ``old`` will be mapped to an empty dict.  If an entry in the
        database will be deleted, the DN in ``new`` will be mapped to
        an empty dict.  All value sets are non-empty:  An attribute
        that will be added to an entry is not included in ``old``, and
        an attribute that will be deleted frm an entry is not included
        in ``new``.

        These are OrderedDicts to ensure that the user-supplied
        entries are processed in the user-specified order (in case
        there are dependencies, such as ACL rules specified in an
        early entry that make it possible to modify a later entry).
    '''

    old = OrderedDict()
    new = OrderedDict()

    for entries_dict in entries:
        for dn, directives_seq in six.iteritems(entries_dict):
            # get the old entry's state.  first check to see if we've
            # previously processed the entry.
            olde = new.get(dn, None)
            if olde is None:
                # next check the database
                results = __salt__['ldap3.search'](l, dn, 'base')
                if len(results) == 1:
                    attrs = results[dn]
                    olde = dict(((attr, OrderedSet(attrs[attr]))
                                 for attr in attrs if len(attrs[attr])))
                else:
                    # nothing, so it must be a brand new entry
                    assert not results
                    olde = {}
                old[dn] = olde
            # copy the old entry to create the new (don't do a simple
            # assignment or else modifications to newe will affect
            # olde)
            newe = copy.deepcopy(olde)
            new[dn] = newe

            # process the directives
            entry_status = {
                'delete_others': False,
                'mentioned_attributes': set(),
            }
            for directives in directives_seq:
                _update_entry(newe, entry_status, directives)
            if entry_status['delete_others']:
                to_delete = set()
                for attr in newe:
                    if attr not in entry_status['mentioned_attributes']:
                        to_delete.add(attr)
                for attr in to_delete:
                    del newe[attr]
    return old, new
Beispiel #45
0
# -*- coding: utf-8 -*-
'''
Application Kinds of Salt apps.
These are used to indicate what kind of Application is using RAET
'''
from __future__ import absolute_import
from collections import namedtuple
from salt.utils.odict import OrderedDict

# Python equivalent of an enum
APPL_KINDS = OrderedDict([('master', 0),
                          ('minion', 1),
                          ('syndic', 2),
                          ('caller', 3)])
APPL_KIND_NAMES = OrderedDict((v, k) for k, v in list(APPL_KINDS.items()))  # inverse map
ApplKind = namedtuple('ApplKind', list(APPL_KINDS.keys()))
applKinds = ApplKind(**APPL_KINDS)
Beispiel #46
0
def get_local_registry(name,
                       cached=True,
                       cachetime=60,
                       registry_format='yaml'):
    '''Get local registry
    Masteralt & Salt share the local registries
    unless for the main ones:

        - controllers
        - services
        - nodetypes
        - localsettings
        - cloud

    For backward compatibility, we take care to load and merge
    shared registries in mastersalt & salt prefix if any is found.
    '''
    not_shared = ['controllers', 'services', 'nodetypes',
                  'localsettings', 'cloud']
    mastersalt_registryf = '{0}/makina-states/{1}.{2}'.format(
        '/etc/mastersalt',  name, registry_format)
    salt_registryf = '{0}/makina-states/{1}.{2}'.format(
        '/etc/salt',  name, registry_format)
    shared_registryf = os.path.join(
        '/etc/makina-states/{0}.{1}'.format(name, registry_format))
    registry = OrderedDict()
    # cache local registries one minute
    pkey = '{0}____'.format(name)
    key = '{0}{1}'.format(pkey, time.time() // cachetime)
    if name not in not_shared:
        to_load = [mastersalt_registryf,
                   salt_registryf,
                   shared_registryf]
    else:
        to_load = [
            '{0}/makina-states/{1}.{2}'.format(
                __opts__['config_dir'], name, registry_format)
        ]
    if (key not in _LOCAL_REG_CACHE) or (not cached):
        invalidate_cached_registry(name)
        for registryf in to_load:
            dregistry = os.path.dirname(registryf)
            if not os.path.exists(dregistry):
                os.makedirs(dregistry)
            if os.path.exists(registryf):
                _LOCAL_REG_CACHE[key] = registry = __salt__[
                    'mc_utils.dictupdate'](
                        registry,
                        __salt__[
                            'mc_macros.{0}_load_local_registry'.format(
                                registry_format)](name, registryf))
                # unprefix local simple registries
                loc_k = DEFAULT_LOCAL_REG_NAME.format(name)
                for k in [t for t in registry if t.startswith(loc_k)]:
                    spl = loc_k + '.'
                    nk = spl.join(k.split(spl)[1:])
                    registry[nk] = registry[k]
                    registry.pop(k)
    elif cached:
        registry = _LOCAL_REG_CACHE[key]
    return registry
Beispiel #47
0
 def empty(self):
     self.states = OrderedDict()
     self.requisites = []
Beispiel #48
0
def managed(name, entries, connect_spec=None):
    '''Ensure the existence (or not) of LDAP entries and their attributes

    Example:

    .. code-block:: yaml

        ldapi:///:
          ldap.managed:
            - connect_spec:
                bind:
                  method: sasl

            - entries:

              # make sure the entry doesn't exist
              - cn=foo,ou=users,dc=example,dc=com:
                - delete_others: True

              # make sure the entry exists with only the specified
              # attribute values
              - cn=admin,dc=example,dc=com:
                - delete_others: True
                - replace:
                    cn:
                      - admin
                    description:
                      - LDAP administrator
                    objectClass:
                      - simpleSecurityObject
                      - organizationalRole
                    userPassword:
                      - {{pillar.ldap_admin_password}}

              # make sure the entry exists, its olcRootDN attribute
              # has only the specified value, the olcRootDN attribute
              # doesn't exist, and all other attributes are ignored
              - 'olcDatabase={1}hdb,cn=config':
                - replace:
                    olcRootDN:
                      - cn=admin,dc=example,dc=com
                    # the admin entry has its own password attribute
                    olcRootPW: []

              # note the use of 'default'.  also note how you don't
              # have to use list syntax if there is only one attribute
              # value
              - cn=foo,ou=users,dc=example,dc=com:
                - delete_others: True
                - default:
                    userPassword: changeme
                    shadowLastChange: 0
                    # keep sshPublicKey if present, but don't create
                    # the attribute if it is missing
                    sshPublicKey: []
                - replace:
                    cn: foo
                    uid: foo
                    uidNumber: 1000
                    gidNumber: 1000
                    gecos: Foo Bar
                    givenName: Foo
                    sn: Bar
                    homeDirectory: /home/foo
                    loginShell: /bin/bash
                    objectClass:
                      - inetOrgPerson
                      - posixAccount
                      - top
                      - ldapPublicKey
                      - shadowAccount

    :param name:
        The URL of the LDAP server.  This is ignored if
        ``connect_spec`` is either a connection object or a dict with
        a ``'url'`` entry.

    :param entries:
        A description of the desired state of zero or more LDAP
        entries.

        ``entries`` is an iterable of dicts.  Each of these dict's
        keys are the distinguished names (DNs) of LDAP entries to
        manage.  Each of these dicts is processed in order.  A later
        dict can reference an LDAP entry that was already mentioned in
        an earlier dict, which makes it possible for later dicts to
        enhance or alter the desired state of an LDAP entry.

        The DNs are mapped to a description of the LDAP entry's
        desired state.  These LDAP entry descriptions are themselves
        iterables of dicts.  Each dict in the iterable is processed in
        order.  They contain directives controlling the entry's state.
        The key names the directive type and the value is state
        information for the directive.  The specific structure of the
        state information depends on the directive type.

        The structure of ``entries`` looks like this::

            [{dn1: [{directive1: directive1_state,
                     directive2: directive2_state},
                    {directive3: directive3_state}],
              dn2: [{directive4: directive4_state,
                     directive5: directive5_state}]},
             {dn3: [{directive6: directive6_state}]}]

        These are the directives:

        * ``'delete_others'``
            Boolean indicating whether to delete attributes not
            mentioned in this dict or any of the other directive
            dicts for this DN.  Defaults to ``False``.

            If you don't want to delete an attribute if present, but
            you also don't want to add it if it is missing or modify
            it if it is present, you can use either the ``'default'``
            directive or the ``'add'`` directive with an empty value
            list.

        * ``'default'``
            A dict mapping an attribute name to an iterable of default
            values for that attribute.  If the attribute already
            exists, it is left alone.  If not, it is created using the
            given list of values.

            An empty value list is useful when you don't want to
            create an attribute if it is missing but you do want to
            preserve it if the ``'delete_others'`` key is ``True``.

        * ``'add'``
            Attribute values to add to the entry.  This is a dict
            mapping an attribute name to an iterable of values to add.

            An empty value list is useful when you don't want to
            create an attribute if it is missing but you do want to
            preserve it if the ``'delete_others'`` key is ``True``.

        * ``'delete'``
            Attribute values to remove from the entry.  This is a dict
            mapping an attribute name to an iterable of values to
            delete from the attribute.  If the iterable is empty, all
            of the attribute's values are deleted.

        * ``'replace'``
            Attributes to replace.  This is a dict mapping an
            attribute name to an iterable of values.  Any existing
            values for the attribute are deleted, then the given
            values are added.  The iterable may be empty.

        In the above directives, the iterables of attribute values may
        instead be ``None``, in which case an empty list is used, or a
        scalar such as a string or number, in which case a new list
        containing the scalar is used.

        Note that if all attribute values are removed from an entry,
        the entire entry is deleted.

    :param connect_spec:
        See the description of the ``connect_spec`` parameter of the
        :py:func:`ldap3.connect <salt.modules.ldap3.connect>` function
        in the :py:mod:`ldap3 <salt.modules.ldap3>` execution module.
        If this is a dict and the ``'url'`` entry is not specified,
        the ``'url'`` entry is set to the value of the ``name``
        parameter.

    :returns:
        A dict with the following keys:

        * ``'name'``
            This is the same object passed to the ``name`` parameter.

        * ``'changes'``
            This is a dict describing the changes made (or, in test
            mode, the changes that would have been attempted).  If no
            changes were made (or no changes would have been
            attempted), then this dict is empty.  Only successful
            changes are included.

            Each key is a DN of an entry that was changed (or would
            have been changed).  Entries that were not changed (or
            would not have been changed) are not included.  The value
            is a dict with two keys:

            * ``'old'``
                The state of the entry before modification.  If the
                entry did not previously exist, this key maps to
                ``None``.  Otherwise, the value is a dict mapping each
                of the old entry's attributes to a list of its values
                before any modifications were made.  Unchanged
                attributes are excluded from this dict.

            * ``'new'``
                The state of the entry after modification.  If the
                entry was deleted, this key maps to ``None``.
                Otherwise, the value is a dict mapping each of the
                entry's attributes to a list of its values after the
                modifications were made.  Unchanged attributes are
                excluded from this dict.

            Example ``'changes'`` dict where a new entry was created
            with a single attribute containing two values::

                {'dn1': {'old': None,
                         'new': {'attr1': ['val1', 'val2']}}}

            Example ``'changes'`` dict where a new attribute was added
            to an existing entry::

                {'dn1': {'old': {},
                         'new': {'attr2': ['val3']}}}

        * ``'result'``
            One of the following values:

            * ``True`` if no changes were necessary or if all changes
              were applied successfully.
            * ``False`` if at least one change was unable to be applied.
            * ``None`` if changes would be applied but it is in test
              mode.
    '''
    if connect_spec is None:
        connect_spec = {}
    try:
        connect_spec.setdefault('url', name)
    except AttributeError:
        # already a connection object
        pass

    connect = __salt__['ldap3.connect']

    # hack to get at the ldap3 module to access the ldap3.LDAPError
    # exception class.  https://github.com/saltstack/salt/issues/27578
    ldap3 = inspect.getmodule(connect)

    with connect(connect_spec) as l:

        old, new = _process_entries(l, entries)

        # collect all of the affected entries (only the key is
        # important in this dict; would have used an OrderedSet if
        # there was one)
        dn_set = OrderedDict()
        dn_set.update(old)
        dn_set.update(new)

        # do some cleanup
        dn_to_delete = set()
        for dn in dn_set:
            o = old.get(dn, {})
            n = new.get(dn, {})
            for x in o, n:
                to_delete = set()
                for attr, vals in six.iteritems(x):
                    if not vals:
                        # clean out empty attribute lists
                        to_delete.add(attr)
                for attr in to_delete:
                    del x[attr]
            if o == n:
                # clean out unchanged entries
                dn_to_delete.add(dn)
        for dn in dn_to_delete:
            for x in old, new:
                x.pop(dn, None)
            del dn_set[dn]

        ret = {
            'name': name,
            'changes': {},
            'result': None,
            'comment': '',
        }

        if old == new:
            ret['comment'] = 'LDAP entries already set'
            ret['result'] = True
            return ret

        if __opts__['test']:
            ret['comment'] = 'Would change LDAP entries'
            changed_old = old
            changed_new = new
            success_dn_set = dn_set
        else:
            # execute the changes
            changed_old = OrderedDict()
            changed_new = OrderedDict()
            # assume success; these will be changed on error
            ret['result'] = True
            ret['comment'] = 'Successfully updated LDAP entries'
            errs = []
            success_dn_set = OrderedDict()
            for dn in dn_set:
                o = old.get(dn, {})
                n = new.get(dn, {})

                try:
                    # perform the operation
                    if o:
                        if n:
                            op = 'modify'
                            assert o != n
                            __salt__['ldap3.change'](l, dn, o, n)
                        else:
                            op = 'delete'
                            __salt__['ldap3.delete'](l, dn)
                    else:
                        op = 'add'
                        assert n
                        __salt__['ldap3.add'](l, dn, n)

                    # update these after the op in case an exception
                    # is raised
                    changed_old[dn] = o
                    changed_new[dn] = n
                    success_dn_set[dn] = True
                except ldap3.LDAPError as err:
                    log.exception('failed to %s entry %s (%s)', op, dn, err)
                    errs.append((op, dn, err))
                    continue

            if errs:
                ret['result'] = False
                ret['comment'] = 'failed to ' \
                                 + ', '.join((op + ' entry ' + dn + '(' + six.text_type(err) + ')'
                                              for op, dn, err in errs))

    # set ret['changes'].  filter out any unchanged attributes, and
    # convert the value sets to lists before returning them to the
    # user (sorted for easier comparisons)
    for dn in success_dn_set:
        o = changed_old.get(dn, {})
        n = changed_new.get(dn, {})
        changes = {}
        ret['changes'][dn] = changes
        for x, xn in ((o, 'old'), (n, 'new')):
            if not x:
                changes[xn] = None
                continue
            changes[xn] = dict(((attr, sorted(vals))
                                for attr, vals in six.iteritems(x)
                                if o.get(attr, ()) != n.get(attr, ())))

    return ret
Beispiel #49
0
    def handle_func(self, func, data):
        '''
        Execute this method in a multiprocess or thread
        '''
        if salt.utils.is_windows():
            self.functions = salt.loader.minion_mods(self.opts)
            self.returners = salt.loader.returners(self.opts, self.functions)
        ret = {'id': self.opts.get('id', 'master'),
               'fun': func,
               'schedule': data['name'],
               'jid': '{0:%Y%m%d%H%M%S%f}'.format(datetime.datetime.now())}

        if 'metadata' in data:
            if isinstance(data['metadata'], dict):
                ret['metadata'] = data['metadata']
                ret['metadata']['_TOS'] = self.time_offset
                ret['metadata']['_TS'] = time.ctime()
                ret['metadata']['_TT'] = time.strftime('%Y %B %d %a %H %m', time.gmtime())
            else:
                log.warning('schedule: The metadata parameter must be '
                            'specified as a dictionary.  Ignoring.')

        salt.utils.appendproctitle(ret['jid'])

        proc_fn = os.path.join(
            salt.minion.get_proc_dir(self.opts['cachedir']),
            ret['jid']
        )

        # Check to see if there are other jobs with this
        # signature running.  If there are more than maxrunning
        # jobs present then don't start another.
        # If jid_include is False for this job we can ignore all this
        # NOTE--jid_include defaults to True, thus if it is missing from the data
        # dict we treat it like it was there and is True
        if 'jid_include' not in data or data['jid_include']:
            jobcount = 0
            for basefilename in os.listdir(salt.minion.get_proc_dir(self.opts['cachedir'])):
                fn_ = os.path.join(salt.minion.get_proc_dir(self.opts['cachedir']), basefilename)
                if not os.path.exists(fn_):
                    log.debug('schedule.handle_func: {0} was processed '
                              'in another thread, skipping.'.format(
                                  basefilename))
                    continue
                with salt.utils.fopen(fn_, 'rb') as fp_:
                    job = salt.payload.Serial(self.opts).load(fp_)
                    if job:
                        if 'schedule' in job:
                            log.debug('schedule.handle_func: Checking job against '
                                      'fun {0}: {1}'.format(ret['fun'], job))
                            if ret['schedule'] == job['schedule'] and os_is_running(job['pid']):
                                jobcount += 1
                                log.debug(
                                    'schedule.handle_func: Incrementing jobcount, now '
                                    '{0}, maxrunning is {1}'.format(
                                        jobcount, data['maxrunning']))
                                if jobcount >= data['maxrunning']:
                                    log.debug(
                                        'schedule.handle_func: The scheduled job {0} '
                                        'was not started, {1} already running'.format(
                                            ret['schedule'], data['maxrunning']))
                                    return False
                    else:
                        try:
                            log.info('Invalid job file found.  Removing.')
                            os.remove(fn_)
                        except OSError:
                            log.info('Unable to remove file: {0}.'.format(fn_))

        salt.utils.daemonize_if(self.opts)

        ret['pid'] = os.getpid()

        if 'jid_include' not in data or data['jid_include']:
            log.debug('schedule.handle_func: adding this job to the jobcache '
                      'with data {0}'.format(ret))
            # write this to /var/cache/salt/minion/proc
            with salt.utils.fopen(proc_fn, 'wb+') as fp_:
                fp_.write(salt.payload.Serial(self.opts).dumps(ret))

        args = tuple()
        if 'args' in data:
            args = data['args']

        kwargs = {}
        if 'kwargs' in data:
            kwargs = data['kwargs']
        # if the func support **kwargs, lets pack in the pub data we have
        # TODO: pack the *same* pub data as a minion?
        argspec = salt.utils.args.get_function_argspec(self.functions[func])
        if argspec.keywords:
            # this function accepts **kwargs, pack in the publish data
            for key, val in ret.iteritems():
                kwargs['__pub_{0}'.format(key)] = val

        try:
            ret['return'] = self.functions[func](*args, **kwargs)

            data_returner = data.get('returner', None)
            if data_returner or self.schedule_returner:
                if 'returner_config' in data:
                    ret['ret_config'] = data['returner_config']
                rets = []
                for returner in [data_returner, self.schedule_returner]:
                    if isinstance(returner, str):
                        rets.append(returner)
                    elif isinstance(returner, list):
                        rets.extend(returner)
                # simple de-duplication with order retained
                for returner in OrderedDict.fromkeys(rets):
                    ret_str = '{0}.returner'.format(returner)
                    if ret_str in self.returners:
                        ret['success'] = True
                        self.returners[ret_str](ret)
                    else:
                        log.info(
                            'Job {0} using invalid returner: {1}. Ignoring.'.format(
                                func, returner
                            )
                        )

            if 'return_job' in data and not data['return_job']:
                pass
            else:
                # Send back to master so the job is included in the job list
                mret = ret.copy()
                mret['jid'] = 'req'
                channel = salt.transport.Channel.factory(self.opts, usage='salt_schedule')
                load = {'cmd': '_return', 'id': self.opts['id']}
                for key, value in mret.items():
                    load[key] = value
                channel.send(load)

        except Exception:
            log.exception("Unhandled exception running {0}".format(ret['fun']))
            # Although catch-all exception handlers are bad, the exception here
            # is to let the exception bubble up to the top of the thread context,
            # where the thread will die silently, which is worse.
        finally:
            try:
                os.unlink(proc_fn)
            except OSError as exc:
                if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT:
                    # EEXIST and ENOENT are OK because the file is gone and that's what
                    # we wanted
                    pass
                else:
                    log.error("Failed to delete '{0}': {1}".format(proc_fn, exc.errno))
                    # Otherwise, failing to delete this file is not something
                    # we can cleanly handle.
                    raise
Beispiel #50
0
def get(zpool, prop=None, show_source=False, parsable=True):
    """
    .. versionadded:: 2016.3.0

    Retrieves the given list of properties

    zpool : string
        Name of storage pool

    prop : string
        Optional name of property to retrieve

    show_source : boolean
        Show source of property

    parsable : boolean
        Display numbers in parsable (exact) values

        .. versionadded:: 2018.3.0

    CLI Example:

    .. code-block:: bash

        salt '*' zpool.get myzpool

    """
    ret = OrderedDict()
    value_properties = ["name", "property", "value", "source"]

    ## collect get output
    res = __salt__["cmd.run_all"](
        __utils__["zfs.zpool_command"](
            command="get",
            flags=["-H"],
            property_name=prop if prop else "all",
            target=zpool,
        ),
        python_shell=False,
    )

    if res["retcode"] != 0:
        return __utils__["zfs.parse_command_result"](res)

    # NOTE: command output for reference
    # ========================================================================
    # ...
    # data  mountpoint  /data   local
    # data  compression off     default
    # ...
    # =========================================================================

    # parse get output
    for line in res["stdout"].splitlines():
        # NOTE: transform data into dict
        prop_data = OrderedDict(
            list(
                zip(
                    value_properties,
                    [x for x in line.strip().split("\t") if x not in [""]],
                )))

        # NOTE: older zfs does not have -o, fall back to manually stipping the name field
        del prop_data["name"]

        # NOTE: normalize values
        if parsable:
            # NOTE: raw numbers and pythonic types
            prop_data["value"] = __utils__["zfs.from_auto"](
                prop_data["property"], prop_data["value"])
        else:
            # NOTE: human readable zfs types
            prop_data["value"] = __utils__["zfs.to_auto"](
                prop_data["property"], prop_data["value"])

        # NOTE: show source if requested
        if show_source:
            ret[prop_data["property"]] = prop_data
            del ret[prop_data["property"]]["property"]
        else:
            ret[prop_data["property"]] = prop_data["value"]

    return ret
Beispiel #51
0
def status(zpool=None):
    """
    Return the status of the named zpool

    zpool : string
        optional name of storage pool

    .. versionadded:: 2016.3.0

    CLI Example:

    .. code-block:: bash

        salt '*' zpool.status myzpool

    """
    ret = OrderedDict()

    ## collect status output
    res = __salt__["cmd.run_all"](
        __utils__["zfs.zpool_command"]("status", target=zpool),
        python_shell=False,
    )

    if res["retcode"] != 0:
        return __utils__["zfs.parse_command_result"](res)

    # NOTE: command output for reference
    # =====================================================================
    #   pool: data
    #  state: ONLINE
    #   scan: scrub repaired 0 in 2h27m with 0 errors on Mon Jan  8 03:27:25 2018
    # config:
    #
    #     NAME                       STATE     READ WRITE CKSUM
    #     data                       ONLINE       0     0     0
    #       mirror-0                 ONLINE       0     0     0
    #         c0tXXXXCXXXXXXXXXXXd0  ONLINE       0     0     0
    #         c0tXXXXCXXXXXXXXXXXd0  ONLINE       0     0     0
    #         c0tXXXXCXXXXXXXXXXXd0  ONLINE       0     0     0
    #
    # errors: No known data errors
    # =====================================================================

    ## parse status output
    # NOTE: output is 'key: value' except for the 'config' key.
    #       multiple pools will repeat the output, so if switch pools if
    #       we see 'pool:'
    current_pool = None
    current_prop = None
    for zpd in res["stdout"].splitlines():
        if zpd.strip() == "":
            continue
        if ":" in zpd and zpd[0] != "\t":
            # NOTE: line is 'key: value' format, we just update a dict
            prop = zpd.split(":")[0].strip()
            value = ":".join(zpd.split(":")[1:]).strip()
            if prop == "pool" and current_pool != value:
                current_pool = value
                ret[current_pool] = OrderedDict()
            if prop != "pool":
                ret[current_pool][prop] = value

            current_prop = prop
        else:
            # NOTE: we append the line output to the last property
            #       this should only happens once we hit the config
            #       section
            ret[current_pool][current_prop] = "{}\n{}".format(
                ret[current_pool][current_prop], zpd)

    ## parse config property for each pool
    # NOTE: the config property has some structured data
    #       sadly this data is in a different format than
    #       the rest and it needs further processing
    for pool in ret:
        if "config" not in ret[pool]:
            continue
        header = None
        root_vdev = None
        vdev = None
        dev = None
        rdev = None
        config = ret[pool]["config"]
        config_data = OrderedDict()
        for line in config.splitlines():
            # NOTE: the first line is the header
            #       we grab all the none whitespace values
            if not header:
                header = line.strip().lower()
                header = [x for x in header.split(" ") if x not in [""]]
                continue

            # NOTE: data is indented by 1 tab, then multiples of 2 spaces
            #       to differential root vdev, vdev, and dev
            #
            #       we just strip the initial tab (can't use .strip() here)
            if line[0] == "\t":
                line = line[1:]

            # NOTE: transform data into dict
            stat_data = OrderedDict(
                list(
                    zip(
                        header,
                        [x for x in line.strip().split(" ") if x not in [""]],
                    )))

            # NOTE: decode the zfs values properly
            stat_data = __utils__["zfs.from_auto_dict"](stat_data)

            # NOTE: store stat_data in the proper location
            if line.startswith(" " * 6):
                rdev = stat_data["name"]
                config_data[root_vdev][vdev][dev][rdev] = stat_data
            elif line.startswith(" " * 4):
                rdev = None
                dev = stat_data["name"]
                config_data[root_vdev][vdev][dev] = stat_data
            elif line.startswith(" " * 2):
                rdev = dev = None
                vdev = stat_data["name"]
                config_data[root_vdev][vdev] = stat_data
            else:
                rdev = dev = vdev = None
                root_vdev = stat_data["name"]
                config_data[root_vdev] = stat_data

            # NOTE: name already used as identifier, drop duplicate data
            del stat_data["name"]

        ret[pool]["config"] = config_data

    return ret
Beispiel #52
0
import salt.exceptions
from salt.utils.odict import OrderedDict
from salt.ext.six.moves import range  # pylint: disable=import-error,redefined-builtin

log = logging.getLogger(__name__)

EXT_NAME_MAPPINGS = OrderedDict([
    ('basicConstraints', 'X509v3 Basic Constraints'),
    ('keyUsage', 'X509v3 Key Usage'),
    ('extendedKeyUsage', 'X509v3 Extended Key Usage'),
    ('subjectKeyIdentifier', 'X509v3 Subject Key Identifier'),
    ('authorityKeyIdentifier', 'X509v3 Authority Key Identifier'),
    ('issuserAltName', 'X509v3 Issuer Alternative Name'),
    ('authorityInfoAccess', 'X509v3 Authority Info Access'),
    ('subjectAltName', 'X509v3 Subject Alternative Name'),
    ('crlDistributionPoints', 'X509v3 CRL Distribution Points'),
    ('issuingDistributionPoint', 'X509v3 Issuing Distribution Point'),
    ('certificatePolicies', 'X509v3 Certificate Policies'),
    ('policyConstraints', 'X509v3 Policy Constraints'),
    ('inhibitAnyPolicy', 'X509v3 Inhibit Any Policy'),
    ('nameConstraints', 'X509v3 Name Constraints'),
    ('noCheck', 'X509v3 OCSP No Check'),
    ('nsComment', 'Netscape Comment'),
    ('nsCertType', 'Netscape Certificate Type'),
])

CERT_DEFAULTS = {
    'days_valid': 365,
    'version': 3,
    'serial_bits': 64,
    'algorithm': 'sha256'
Beispiel #53
0
    def handle_func(self, func, data):
        '''
        Execute this method in a multiprocess or thread
        '''
        if salt.utils.is_windows():
            self.functions = salt.loader.minion_mods(self.opts)
            self.returners = salt.loader.returners(self.opts, self.functions)
        ret = {'id': self.opts.get('id', 'master'),
               'fun': func,
               'jid': '{0:%Y%m%d%H%M%S%f}'.format(datetime.datetime.now())}

        proc_fn = os.path.join(
            salt.minion.get_proc_dir(self.opts['cachedir']),
            ret['jid']
        )

        # Check to see if there are other jobs with this
        # signature running.  If there are more than maxrunning
        # jobs present then don't start another.
        # If jid_include is False for this job we can ignore all this
        # NOTE--jid_include defaults to True, thus if it is missing from the data
        # dict we treat it like it was there and is True
        if 'jid_include' not in data or data['jid_include']:
            jobcount = 0
            for basefilename in os.listdir(salt.minion.get_proc_dir(self.opts['cachedir'])):
                fn = os.path.join(salt.minion.get_proc_dir(self.opts['cachedir']), basefilename)
                with salt.utils.fopen(fn, 'r') as fp_:
                    job = salt.payload.Serial(self.opts).load(fp_)
                    log.debug('schedule.handle_func: Checking job against '
                              'fun {0}: {1}'.format(ret['fun'], job))
                    if ret['fun'] == job['fun'] and os_is_running(job['pid']):
                        jobcount += 1
                        log.debug(
                            'schedule.handle_func: Incrementing jobcount, now '
                            '{0}, maxrunning is {1}'.format(
                                      jobcount, data['maxrunning']))
                        if jobcount >= data['maxrunning']:
                            log.debug(
                                'schedule.handle_func: The scheduled job {0} '
                                'was not started, {1} already running'.format(
                                    func, data['maxrunning']))
                            return False

        salt.utils.daemonize_if(self.opts)

        ret['pid'] = os.getpid()

        if 'jid_include' not in data or data['jid_include']:
            log.debug('schedule.handle_func: adding this job to the jobcache '
                      'with data {0}'.format(ret))
            # write this to /var/cache/salt/minion/proc
            with salt.utils.fopen(proc_fn, 'w+') as fp_:
                fp_.write(salt.payload.Serial(self.opts).dumps(ret))

        args = None
        if 'args' in data:
            args = data['args']

        kwargs = None
        if 'kwargs' in data:
            kwargs = data['kwargs']

        try:
            if args and kwargs:
                ret['return'] = self.functions[func](*args, **kwargs)

            if args and not kwargs:
                ret['return'] = self.functions[func](*args)

            if kwargs and not args:
                ret['return'] = self.functions[func](**kwargs)

            if not kwargs and not args:
                ret['return'] = self.functions[func]()

            data_returner = data.get('returner', None)
            if data_returner or self.schedule_returner:
                rets = []
                for returner in [data_returner, self.schedule_returner]:
                    if isinstance(returner, str):
                        rets.append(returner)
                    elif isinstance(returner, list):
                        rets.extend(returner)
                # simple de-duplication with order retained
                rets = OrderedDict.fromkeys(rets).keys()
                for returner in rets:
                    ret_str = '{0}.returner'.format(returner)
                    if ret_str in self.returners:
                        ret['success'] = True
                        self.returners[ret_str](ret)
                    else:
                        log.info(
                            'Job {0} using invalid returner: {1} Ignoring.'.format(
                            func, returner
                            )
                        )
        except Exception:
            log.exception("Unhandled exception running {0}".format(ret['fun']))
            # Although catch-all exception handlers are bad, the exception here
            # is to let the exception bubble up to the top of the thread context,
            # where the thread will die silently, which is worse.
        finally:
            try:
                os.unlink(proc_fn)
            except OSError as e:
                if e.errno == errno.EEXIST:
                    # EEXIST is OK because the file is gone and that's what
                    # we wanted
                    pass
                else:
                    log.error("Failed to delete '{0}': {1}".format(proc_fn, e.errno))
                    # Otherwise, failing to delete this file is not something
                    # we can cleanly handle.
                    raise
Beispiel #54
0
def read_certificate(certificate):
    '''
    Returns a dict containing details of a certificate. Input can be a PEM string or file path.

    certificate:
        The certificate to be read. Can be a path to a certificate file, or a string containing
        the PEM formatted text of the certificate.

    CLI Example:

    .. code-block:: bash

        salt '*' x509.read_certificate /etc/pki/mycert.crt
    '''
    if isinstance(certificate, M2Crypto.X509.X509):
        cert = certificate
    else:
        cert = _get_certificate_obj(certificate)

    ret = {
        # X509 Verison 3 has a value of 2 in the field.
        # Version 2 has a value of 1.
        # https://tools.ietf.org/html/rfc5280#section-4.1.2.1
        'Version':
        cert.get_version() + 1,
        # Get size returns in bytes. The world thinks of key sizes in bits.
        'Key Size':
        cert.get_pubkey().size() * 8,
        'Serial Number':
        _dec2hex(cert.get_serial_number()),
        'SHA-256 Finger Print':
        _pretty_hex(cert.get_fingerprint(md='sha256')),
        'MD5 Finger Print':
        _pretty_hex(cert.get_fingerprint(md='md5')),
        'SHA1 Finger Print':
        _pretty_hex(cert.get_fingerprint(md='sha1')),
        'Subject':
        _parse_subject(cert.get_subject()),
        'Subject Hash':
        _dec2hex(cert.get_subject().as_hash()),
        'Issuer':
        _parse_subject(cert.get_issuer()),
        'Issuer Hash':
        _dec2hex(cert.get_issuer().as_hash()),
        'Not Before':
        cert.get_not_before().get_datetime().strftime('%Y-%m-%d %H:%M:%S'),
        'Not After':
        cert.get_not_after().get_datetime().strftime('%Y-%m-%d %H:%M:%S'),
        'Public Key':
        get_public_key(cert.as_pem())
    }

    exts = OrderedDict()
    for ext_index in range(0, cert.get_ext_count()):
        ext = cert.get_ext_at(ext_index)
        name = ext.get_name()
        val = ext.get_value()
        if ext.get_critical():
            val = 'critical ' + val
        exts[name] = val

    if exts:
        ret['X509v3 Extensions'] = exts

    return ret
Beispiel #55
0
def history(zpool=None, internal=False, verbose=False):
    """
    .. versionadded:: 2016.3.0

    Displays the command history of the specified pools, or all pools if no
    pool is specified

    zpool : string
        Optional storage pool

    internal : boolean
        Toggle display of internally logged ZFS events

    verbose : boolean
        Toggle display of the user name, the hostname, and the zone in which
        the operation was performed

    CLI Example:

    .. code-block:: bash

        salt '*' zpool.upgrade myzpool

    """
    ret = OrderedDict()

    ## Configure pool
    # NOTE: initialize the defaults
    flags = []

    # NOTE: set extra config
    if verbose:
        flags.append("-l")
    if internal:
        flags.append("-i")

    ## Lookup history
    res = __salt__["cmd.run_all"](
        __utils__["zfs.zpool_command"](
            command="history",
            flags=flags,
            target=zpool,
        ),
        python_shell=False,
    )

    if res["retcode"] != 0:
        return __utils__["zfs.parse_command_result"](res)
    else:
        pool = "unknown"
        for line in res["stdout"].splitlines():
            if line.startswith("History for"):
                pool = line[13:-2]
                ret[pool] = OrderedDict()
            else:
                if line == "":
                    continue
                log_timestamp = line[0:19]
                log_command = line[20:]
                ret[pool][log_timestamp] = log_command

    return ret
Beispiel #56
0
 def test_include(self):
     ret = self.render(include_template)
     self.assertEqual(ret, OrderedDict([
         ('include', ['http']),
     ]))
Beispiel #57
0
def iostat(zpool=None, sample_time=5, parsable=True):
    """
    Display I/O statistics for the given pools

    zpool : string
        optional name of storage pool

    sample_time : int
        seconds to capture data before output
        default a sample of 5 seconds is used
    parsable : boolean
        display data in pythonc values (True, False, Bytes,...)

    .. versionadded:: 2016.3.0
    .. versionchanged:: 2018.3.1

        Added ```parsable``` parameter that defaults to True

    CLI Example:

    .. code-block:: bash

        salt '*' zpool.iostat myzpool

    """
    ret = OrderedDict()

    ## get iostat output
    res = __salt__["cmd.run_all"](
        __utils__["zfs.zpool_command"](command="iostat",
                                       flags=["-v"],
                                       target=[zpool, sample_time, 2]),
        python_shell=False,
    )

    if res["retcode"] != 0:
        return __utils__["zfs.parse_command_result"](res)

    # NOTE: command output for reference
    # =====================================================================
    #                               capacity     operations    bandwidth
    # pool                       alloc   free   read  write   read  write
    # -------------------------  -----  -----  -----  -----  -----  -----
    # mypool                      648G  1.18T     10      6  1.30M   817K
    #   mirror                    648G  1.18T     10      6  1.30M   817K
    #     c0tXXXXCXXXXXXXXXXXd0      -      -      9      5  1.29M   817K
    #     c0tXXXXCXXXXXXXXXXXd0      -      -      9      5  1.29M   817K
    #     c0tXXXXCXXXXXXXXXXXd0      -      -      9      5  1.29M   817K
    # -------------------------  -----  -----  -----  -----  -----  -----
    # =====================================================================

    ## parse iostat output
    # NOTE: hardcode the header
    #       the double header line is hard to parse, we opt to
    #       hardcode the header fields
    header = [
        "name",
        "capacity-alloc",
        "capacity-free",
        "operations-read",
        "operations-write",
        "bandwidth-read",
        "bandwidth-write",
    ]
    root_vdev = None
    vdev = None
    dev = None
    current_data = OrderedDict()
    for line in res["stdout"].splitlines():
        # NOTE: skip header
        if line.strip() == "" or line.strip().split()[-1] in [
                "write", "bandwidth"
        ]:
            continue

        # NOTE: reset pool on line separator
        if line.startswith("-") and line.endswith("-"):
            ret.update(current_data)
            current_data = OrderedDict()
            continue

        # NOTE: transform data into dict
        io_data = OrderedDict(
            list(
                zip(
                    header,
                    [x for x in line.strip().split(" ") if x not in [""]],
                )))

        # NOTE: normalize values
        if parsable:
            # NOTE: raw numbers and pythonic types
            io_data = __utils__["zfs.from_auto_dict"](io_data)
        else:
            # NOTE: human readable zfs types
            io_data = __utils__["zfs.to_auto_dict"](io_data)

        # NOTE: store io_data in the proper location
        if line.startswith(" " * 4):
            dev = io_data["name"]
            current_data[root_vdev][vdev][dev] = io_data
        elif line.startswith(" " * 2):
            dev = None
            vdev = io_data["name"]
            current_data[root_vdev][vdev] = io_data
        else:
            dev = vdev = None
            root_vdev = io_data["name"]
            current_data[root_vdev] = io_data

        # NOTE: name already used as identifier, drop duplicate data
        del io_data["name"]

    return ret
Beispiel #58
0
def list_(properties="size,alloc,free,cap,frag,health",
          zpool=None,
          parsable=True):
    """
    .. versionadded:: 2015.5.0

    Return information about (all) storage pools

    zpool : string
        optional name of storage pool

    properties : string
        comma-separated list of properties to list

    parsable : boolean
        display numbers in parsable (exact) values

        .. versionadded:: 2018.3.0

    .. note::

        The ``name`` property will always be included, while the ``frag``
        property will get removed if not available

    zpool : string
        optional zpool

    .. note::

        Multiple storage pool can be provided as a space separated list

    CLI Example:

    .. code-block:: bash

        salt '*' zpool.list
        salt '*' zpool.list zpool=tank
        salt '*' zpool.list 'size,free'
        salt '*' zpool.list 'size,free' tank

    """
    ret = OrderedDict()

    ## update properties
    # NOTE: properties should be a list
    if not isinstance(properties, list):
        properties = properties.split(",")

    # NOTE: name should be first property
    while "name" in properties:
        properties.remove("name")
    properties.insert(0, "name")

    # NOTE: remove 'frags' if we don't have feature flags
    if not __utils__["zfs.has_feature_flags"]():
        while "frag" in properties:
            properties.remove("frag")

    ## collect list output
    res = __salt__["cmd.run_all"](
        __utils__["zfs.zpool_command"](
            command="list",
            flags=["-H"],
            opts={
                "-o": ",".join(properties)
            },
            target=zpool,
        ),
        python_shell=False,
    )

    if res["retcode"] != 0:
        return __utils__["zfs.parse_command_result"](res)

    # NOTE: command output for reference
    # ========================================================================
    # data  1992864825344   695955501056    1296909324288   34  11%     ONLINE
    # =========================================================================

    ## parse list output
    for line in res["stdout"].splitlines():
        # NOTE: transform data into dict
        zpool_data = OrderedDict(
            list(zip(
                properties,
                line.strip().split("\t"),
            )))

        # NOTE: normalize values
        if parsable:
            # NOTE: raw numbers and pythonic types
            zpool_data = __utils__["zfs.from_auto_dict"](zpool_data)
        else:
            # NOTE: human readable zfs types
            zpool_data = __utils__["zfs.to_auto_dict"](zpool_data)

        ret[zpool_data["name"]] = zpool_data
        del ret[zpool_data["name"]]["name"]

    return ret
Beispiel #59
0
    def process_results(self, rows):
        '''
            This function takes a list of database results and iterates over,
            merging them in to a dict form.
        '''
        listify = OrderedDict()
        listify_dicts = OrderedDict()
        for ret in rows:
            # crd is the Current Return Data level, to make this non-recursive.
            crd = self.focus
            # Walk and create dicts above the final layer
            for i in range(0, self.depth-1):
                # At the end we'll use listify to find values to make a list of
                if i+1 in self.with_lists:
                    if id(crd) not in listify:
                        listify[id(crd)] = []
                        listify_dicts[id(crd)] = crd
                    if ret[i] not in listify[id(crd)]:
                        listify[id(crd)].append(ret[i])
                if ret[i] not in crd:
                    # Key missing
                    crd[ret[i]] = {}
                    crd = crd[ret[i]]
                else:
                    # Check type of collision
                    ty = type(crd[ret[i]])
                    if ty is list:
                        # Already made list
                        temp = {}
                        crd[ret[i]].append(temp)
                        crd = temp
                    elif ty is not dict:
                        # Not a list, not a dict
                        if self.as_list:
                            # Make list
                            temp = {}
                            crd[ret[i]] = [crd[ret[i]], temp]
                            crd = temp
                        else:
                            # Overwrite
                            crd[ret[i]] = {}
                            crd = crd[ret[i]]
                    else:
                        # dict, descend.
                        crd = crd[ret[i]]

            # If this test is true, the penultimate field is the key
            if self.depth == self.num_fields - 1:
                nk = self.num_fields-2  # Aka, self.depth-1
                # Should we and will we have a list at the end?
                if ((self.as_list and (ret[nk] in crd)) or
                        (nk+1 in self.with_lists)):
                    if ret[nk] in crd:
                        if type(crd[ret[nk]]) is not list:
                            crd[ret[nk]] = [crd[ret[nk]]]
                        # if it's already a list, do nothing
                    else:
                        crd[ret[nk]] = []
                    crd[ret[nk]].append(ret[self.num_fields-1])
                else:
                    # No clobber checks then
                    crd[ret[nk]] = ret[self.num_fields-1]
            else:
                # Otherwise, the field name is the key but we have a spare.
                # The spare results because of {c: d} vs {c: {"d": d, "e": e }}
                # So, make that last dict
                if ret[self.depth-1] not in crd:
                    crd[ret[self.depth-1]] = {}
                # This bit doesn't escape listify
                if self.depth in self.with_lists:
                    if id(crd) not in listify:
                        listify[id(crd)] = []
                        listify_dicts[id(crd)] = crd
                    if ret[self.depth-1] not in listify[id(crd)]:
                        listify[id(crd)].append(ret[self.depth-1])
                crd = crd[ret[self.depth-1]]
                # Now for the remaining keys, we put them in to the dict
                for i in range(self.depth, self.num_fields):
                    nk = self.field_names[i]
                    # Listify
                    if i+1 in self.with_lists:
                        if id(crd) not in listify:
                            listify[id(crd)] = []
                            listify_dicts[id(crd)] = crd
                        if nk not in listify[id(crd)]:
                            listify[id(crd)].append(nk)
                    # Collision detection
                    if self.as_list and (nk in crd):
                        # Same as before...
                        if type(crd[nk]) is list:
                            crd[nk].append(ret[i])
                        else:
                            crd[nk] = [crd[nk], ret[i]]
                    else:
                        crd[nk] = ret[i]
        # Get key list and work backwards.  This is inner-out processing
        ks = listify_dicts.keys()
        ks.reverse()
        for i in ks:
            d = listify_dicts[i]
            for k in listify[i]:
                if type(d[k]) is dict:
                    d[k] = d[k].values()
                elif type(d[k]) is not list:
                    d[k] = [d[k]]
Beispiel #60
0
 def empty(self):
     self.states = OrderedDict()
     self.requisites = []
     self.includes = []
     self.extends = OrderedDict()