Beispiel #1
0
def _prompt_choice(var_name, options):
    """
    Prompt the user to choose between a list of options, index each one by adding an enumerator
    based on https://github.com/audreyr/cookiecutter/blob/master/cookiecutter/prompt.py#L51

    :param var_name: The question to ask the user
    :type  var_name: ``str``

    :param options: A list of options
    :type  options: ``list`` of ``tupple``

    :rtype: ``tuple``
    :returns: The selected user
    """
    choice_map = OrderedDict((u"{0}".format(i), value) for i, value in enumerate(options, 1) if value[0] != "test")
    choices = choice_map.keys()
    default = u"1"

    choice_lines = [u"{0} - {1} - {2}".format(c[0], c[1][0], c[1][1]) for c in choice_map.items()]
    prompt = u"\n".join(
        (u"Select {0}:".format(var_name), u"\n".join(choice_lines), u"Choose from {0}".format(u", ".join(choices)))
    )

    user_choice = click.prompt(prompt, type=click.Choice(choices), default=default)
    return choice_map[user_choice]
Beispiel #2
0
 def merge_tops(self, tops):
     """
     Cleanly merge the top files
     """
     top = collections.defaultdict(dict)
     orders = collections.defaultdict(dict)
     for ctops in tops.values():
         for ctop in ctops:
             for saltenv, targets in ctop.items():
                 if saltenv == "include":
                     continue
                 for tgt in targets:
                     matches = []
                     states = OrderedDict()
                     orders[saltenv][tgt] = 0
                     for comp in ctop[saltenv][tgt]:
                         if isinstance(comp, dict):
                             if "match" in comp:
                                 matches.append(comp)
                             if "order" in comp:
                                 order = comp["order"]
                                 if not isinstance(order, int):
                                     try:
                                         order = int(order)
                                     except ValueError:
                                         order = 0
                                 orders[saltenv][tgt] = order
                         if isinstance(comp, string_types):
                             states[comp] = True
                     top[saltenv][tgt] = matches
                     top[saltenv][tgt].extend(list(states.keys()))
     return self.sort_top_targets(top, orders)
Beispiel #3
0
class StateRegistry(object):
    """
    The StateRegistry holds all of the states that have been created.
    """

    def __init__(self):
        self.empty()

    def empty(self):
        self.states = OrderedDict()
        self.requisites = []
        self.includes = []
        self.extends = OrderedDict()

    def include(self, *args):
        self.includes += args

    def salt_data(self):
        states = OrderedDict([(id_, state()) for id_, state in self.states.iteritems()])

        if self.includes:
            states["include"] = self.includes

        if self.extends:
            states["extend"] = OrderedDict([(id_, state()) for id_, state in self.extends.iteritems()])

        self.empty()

        return states

    def add(self, id_, state, extend=False):
        if extend:
            attr = self.extends
        else:
            attr = self.states

        if id_ in attr:
            raise DuplicateState("A state with id '%s' already exists" % id_)

        # if we have requisites in our stack then add them to the state
        if len(self.requisites) > 0:
            for req in self.requisites:
                if req.requisite not in state.kwargs:
                    state.kwargs[req.requisite] = []
                state.kwargs[req.requisite].append(req())

        attr[id_] = state

    def extend(self, id_, state):
        self.add(id_, state, extend=True)

    def make_extend(self, name):
        return StateExtend(name)

    def push_requisite(self, requisite):
        self.requisites.append(requisite)

    def pop_requisite(self):
        del self.requisites[-1]
Beispiel #4
0
def list_vms(search=None, sort=None, order="uuid,type,ram,state,alias", keyed=True):
    """
    Return a list of VMs

    search : string
        vmadm filter property
    sort : string
        vmadm sort (-s) property
    order : string
        vmadm order (-o) property -- Default: uuid,type,ram,state,alias
    keyed : boolean
        specified if the output should be an array (False) or dict (True)
            For a dict the key is the first item from the order parameter.
            Note: If key is not unique last vm wins.

    CLI Example:

    .. code-block:: bash

        salt '*' vmadm.list
        salt '*' vmadm.list order=alias,ram,cpu_cap sort=-ram,-cpu_cap
        salt '*' vmadm.list search='type=KVM'
    """
    ret = {}
    vmadm = _check_vmadm()
    # vmadm list [-p] [-H] [-o field,...] [-s field,...] [field=value ...]
    cmd = "{vmadm} list -p -H {order} {sort} {search}".format(
        vmadm=vmadm,
        order="-o {0}".format(order) if order else "",
        sort="-s {0}".format(sort) if sort else "",
        search=search if search else "",
    )
    res = __salt__["cmd.run_all"](cmd)
    retcode = res["retcode"]
    result = OrderedDict() if keyed else []
    if retcode != 0:
        ret["Error"] = res["stderr"] if "stderr" in res else _exit_status(retcode)
        return ret

    fields = order.split(",")

    for vm in res["stdout"].splitlines():
        vm_data = OrderedDict()
        vm = vm.split(":")
        if keyed:
            for field in fields:
                if fields.index(field) == 0:
                    continue
                vm_data[field.strip()] = vm[fields.index(field)].strip()
            result[vm[0]] = vm_data
        else:
            if len(vm) > 1:
                for field in fields:
                    vm_data[field.strip()] = vm[fields.index(field)].strip()
            else:
                vm_data = vm[0]
            result.append(vm_data)
    return result
Beispiel #5
0
def list_vms(search=None, verbose=False):
    """
    List all vms

    search : string
        filter vms, see the execution module
    verbose : boolean
        print additional information about the vm

    CLI Example:

    .. code-block:: bash

        salt-run vmadm.list
        salt-run vmadm.list search='type=KVM'
        salt-run vmadm.list verbose=True
    """
    ret = OrderedDict() if verbose else []
    client = salt.client.get_local_client(__opts__["conf_file"])
    try:
        vmadm_args = {}
        vmadm_args["order"] = "uuid,alias,hostname,state,type,cpu_cap,vcpus,ram"
        if search:
            vmadm_args["search"] = search
        for cn in client.cmd_iter(
            "G@virtual:physical and G@os:smartos", "vmadm.list", kwarg=vmadm_args, tgt_type="compound"
        ):
            if not cn:
                continue
            node = next(cn.iterkeys())
            if not isinstance(cn[node], dict) or "ret" not in cn[node] or not isinstance(cn[node]["ret"], dict):
                continue
            for vm in cn[node]["ret"].keys():
                vmcfg = cn[node]["ret"][vm]
                if verbose:
                    ret[vm] = OrderedDict()
                    ret[vm]["hostname"] = vmcfg["hostname"]
                    ret[vm]["alias"] = vmcfg["alias"]
                    ret[vm]["computenode"] = node
                    ret[vm]["state"] = vmcfg["state"]
                    ret[vm]["resources"] = OrderedDict()
                    ret[vm]["resources"]["memory"] = vmcfg["ram"]
                    if vmcfg["type"] == "KVM":
                        ret[vm]["resources"]["cpu"] = "{0:.2f}".format(int(vmcfg["vcpus"]))
                    else:
                        if vmcfg["cpu_cap"] != "":
                            ret[vm]["resources"]["cpu"] = "{0:.2f}".format(int(vmcfg["cpu_cap"]) / 100)
                else:
                    ret.append(vm)
    except SaltClientError as client_error:
        return "{0}".format(client_error)

    if not verbose:
        ret = sorted(ret)

    return ret
Beispiel #6
0
 def gen_ini(self):
     yield "{0}[{1}]{0}".format(os.linesep, self.name)
     sections_dict = OrderedDict()
     for name, value in six.iteritems(self):
         if com_regx.match(name):
             yield "{0}{1}".format(value, os.linesep)
         elif isinstance(value, _Section):
             sections_dict.update({name: value})
         else:
             yield "{0}{1}{2}{3}".format(
                 name, (" {0} ".format(self.sep) if self.sep != " " else self.sep), value, os.linesep
             )
     for name, value in six.iteritems(sections_dict):
         for line in value.gen_ini():
             yield line
Beispiel #7
0
    def test_compare_sls_vs_yaml_with_jinja(self):
        tpl = "{{ data }}"
        env = jinja2.Environment()
        src = "{foo: 1, bar: 2, baz: {qux: true}}"

        sls_src = env.from_string(tpl).render(data=yamlex.deserialize(src))
        yml_src = env.from_string(tpl).render(data=yaml.deserialize(src))

        sls_data = yamlex.deserialize(sls_src)
        yml_data = yaml.deserialize(yml_src)

        # ensure that sls & yaml have the same base
        assert isinstance(sls_data, dict)
        assert isinstance(yml_data, dict)
        # The below has been commented out because something the loader test
        # is modifying the yaml renderer to render things to unicode. Without
        # running the loader test, the below passes. Even reloading the module
        # from disk does not reset its internal state (per the Python docs).
        ##
        # assert sls_data == yml_data

        # ensure that sls is ordered, while yaml not
        assert isinstance(sls_data, OrderedDict)
        assert not isinstance(yml_data, OrderedDict)

        # prove that yaml does not handle well with OrderedDict
        # while sls is jinja friendly.
        obj = OrderedDict([("foo", 1), ("bar", 2), ("baz", {"qux": True})])

        sls_obj = yamlex.deserialize(yamlex.serialize(obj))
        try:
            yml_obj = yaml.deserialize(yaml.serialize(obj))
        except SerializationError:
            # BLAAM! yaml was unable to serialize OrderedDict,
            # but it's not the purpose of the current test.
            yml_obj = obj.copy()

        sls_src = env.from_string(tpl).render(data=sls_obj)
        yml_src = env.from_string(tpl).render(data=yml_obj)

        final_obj = yaml.deserialize(sls_src)
        assert obj == final_obj

        # BLAAM! yml_src is not valid !
        final_obj = OrderedDict(yaml.deserialize(yml_src))
        assert obj != final_obj
    def test_compare_sls_vs_yaml_with_jinja(self):
        tpl = "{{ data }}"
        env = jinja2.Environment()
        src = "{foo: 1, bar: 2, baz: {qux: true}}"

        sls_src = env.from_string(tpl).render(data=sls.deserialize(src))
        yml_src = env.from_string(tpl).render(data=yaml.deserialize(src))

        sls_data = sls.deserialize(sls_src)
        yml_data = yaml.deserialize(yml_src)

        # ensure that sls & yaml have the same base
        assert isinstance(sls_data, dict)
        assert isinstance(yml_data, dict)
        assert sls_data == yml_data

        # ensure that sls is ordered, while yaml not
        assert isinstance(sls_data, OrderedDict)
        assert not isinstance(yml_data, OrderedDict)

        # prove that yaml does not handle well with OrderedDict
        # while sls is jinja friendly.
        obj = OrderedDict([("foo", 1), ("bar", 2), ("baz", {"qux": True})])

        sls_obj = sls.deserialize(sls.serialize(obj))
        try:
            yml_obj = yaml.deserialize(yaml.serialize(obj))
        except SerializationError:
            # BLAAM! yaml was unable to serialize OrderedDict,
            # but it's not the purpose of the current test.
            yml_obj = obj.copy()

        sls_src = env.from_string(tpl).render(data=sls_obj)
        yml_src = env.from_string(tpl).render(data=yml_obj)

        final_obj = yaml.deserialize(sls_src)
        assert obj == final_obj

        # BLAAM! yml_src is not valid !
        final_obj = yaml.deserialize(yml_src)
        assert obj != final_obj
    def element(self, root=None, abspath=None, **kwargs):
        """
        kwargs contain extra information for custom methods.

        This method must return a valid empty object if no vars are passed
        to allow introspection to create patterns.

        :param root:
        :param abspath:
        """
        if root is None and abspath is None:
            root = os.path.abspath(".")
            abspath = os.path.abspath(".")
        relpath = os.path.relpath(abspath, root)

        try:
            element = self._elements.get(relpath, OrderedDict())
        except AttributeError:
            element = OrderedDict()

        if not element:
            for field in PATHINFO_FIELDS:
                element.setdefault(field, "")
            element["saltenv"] = kwargs.get("saltenv", "base")
            element["relpath"] = relpath
            element["abspath"] = abspath
            element["is_pillar"] = kwargs.get("is_pillar", False)

        cachedir = kwargs.get("cachedir", "")
        if cachedir and os.path.commonprefix([abspath, cachedir]) == cachedir:
            element["cache_root"] = root
        else:
            element["file_root"] = root

        element_hook = kwargs.get("_element_hook", None)
        if element_hook:
            element = element_hook(self, element, **kwargs)

        return element
Beispiel #10
0
 def merge_tops(self, tops):
     """
     Cleanly merge the top files
     """
     top = collections.defaultdict(OrderedDict)
     orders = collections.defaultdict(OrderedDict)
     for ctops in six.itervalues(tops):
         for ctop in ctops:
             for saltenv, targets in six.iteritems(ctop):
                 if saltenv == "include":
                     continue
                 for tgt in targets:
                     matches = []
                     states = OrderedDict()
                     orders[saltenv][tgt] = 0
                     ignore_missing = False
                     for comp in ctop[saltenv][tgt]:
                         if isinstance(comp, dict):
                             if "match" in comp:
                                 matches.append(comp)
                             if "order" in comp:
                                 order = comp["order"]
                                 if not isinstance(order, int):
                                     try:
                                         order = int(order)
                                     except ValueError:
                                         order = 0
                                 orders[saltenv][tgt] = order
                             if comp.get("ignore_missing", False):
                                 ignore_missing = True
                         if isinstance(comp, six.string_types):
                             states[comp] = True
                     if ignore_missing:
                         if saltenv not in self.ignored_pillars:
                             self.ignored_pillars[saltenv] = []
                         self.ignored_pillars[saltenv].extend(states.keys())
                     top[saltenv][tgt] = matches
                     top[saltenv][tgt].extend(states)
     return self.sort_top_targets(top, orders)
Beispiel #11
0
 def _uncomment_if_commented(self, opt_key):
     # should be called only if opt_key is not already present
     # will uncomment the key if commented and create a place holder
     # for the key where the correct value can be update later
     # used to preserve the ordering of comments and commented options
     # and to make sure options without sectons go above any section
     options_backup = OrderedDict()
     comment_index = None
     for key, value in six.iteritems(self):
         if comment_index is not None:
             options_backup.update({key: value})
             continue
         if "#comment" not in key:
             continue
         opt_match = self.opt_regx.match(value.lstrip("#"))
         if opt_match and opt_match.group(2) == opt_key:
             comment_index = key
     for key in options_backup:
         self.pop(key)
     self.pop(comment_index, None)
     super(_Section, self).update({opt_key: None})
     for key, value in six.iteritems(options_backup):
         super(_Section, self).update({key: value})
    def __init__(self, match_each=True, **patterns):
        """
        match_each:
            If True, each file path is matched which prevents uses less memory
            but sacrifices performance a little bit.  If False, the complete
            list is matched after all the file information has been added to
            pathinfo.

        patterns:
            Contains the patterns to match.
            Example:
                { 'saltenv': 'base', 'relpath': ['*.sls'] }
        """
        super(PathInfoDict, self).__init__(fields=PATHINFO_FIELDS, match_each=match_each, **patterns)
        self._elements = OrderedDict()
def compile(labels, **patterns):  # pylint: disable=W0622
    """
    Compile patterns.

    Args:
        labels:
    """
    pattern = patterns.pop("_pattern", None)
    if pattern:
        return pattern

    regex = patterns.pop("_regex", False)
    escape = patterns.pop("_escape", [])

    if not patterns or not labels:
        return None

    for pattern in list(patterns.keys()):
        if pattern not in labels:
            patterns.pop(pattern)

    default_pattern = get_default_pattern(regex)
    escape = escape if escape else []
    _escape_text = functools.partial(escape_text, regex=regex)

    # Set default values and join patterns for each field
    pattern = OrderedDict.fromkeys(labels, None)
    for label in labels:
        if label in patterns and patterns[label]:
            field = patterns[label]
            if isinstance(field, re._pattern_type):  # pylint: disable=W0212
                field = [field.pattern]
            if isinstance(field, six.string_types):
                field = [field]
            if label in escape or not regex:
                field = [_escape_text(text) for text in field]
        else:
            field = default_pattern
        pattern[label] = r"(?:{0})".format(r"|".join(field))

    try:
        return re.compile(r"\n".join(six.itervalues(pattern)), re.MULTILINE | re.DOTALL)
    except NameError:
        raise
Beispiel #14
0
class StateRegistry(object):
    """
    The StateRegistry holds all of the states that have been created.
    """

    def __init__(self):
        self.empty()

    def empty(self):
        self.states = OrderedDict()
        self.requisites = []

    def salt_data(self):
        states = OrderedDict([(id_, state()) for id_, state in self.states.iteritems()])

        self.empty()

        return states

    def add(self, id_, state):
        if id_ in self.states:
            raise DuplicateState("A state with id '%s' already exists" % id_)

        # if we have requisites in our stack then add them to the state
        if len(self.requisites) > 0:
            for req in self.requisites:
                if req.requisite not in state.kwargs:
                    state.kwargs[req.requisite] = []
                state.kwargs[req.requisite].append(req())

        self.states[id_] = state

    def push_requisite(self, requisite):
        self.requisites.append(requisite)

    def pop_requisite(self):
        del self.requisites[-1]
Beispiel #15
0
 def empty(self):
     self.states = OrderedDict()
     self.requisites = []
     self.includes = []
     self.extends = OrderedDict()
class PathInfoDict(fileinfo.FileInfo):
    def __init__(self, match_each=True, **patterns):
        """
        match_each:
            If True, each file path is matched which prevents uses less memory
            but sacrifices performance a little bit.  If False, the complete
            list is matched after all the file information has been added to
            pathinfo.

        patterns:
            Contains the patterns to match.
            Example:
                { 'saltenv': 'base', 'relpath': ['*.sls'] }
        """
        super(PathInfoDict, self).__init__(fields=PATHINFO_FIELDS, match_each=match_each, **patterns)
        self._elements = OrderedDict()

    @property
    def as_sequence(self):
        if self.pattern and not self.match_each:
            return list(matcher.ifilter(self._elements.values(), _pattern=self.pattern))
        return self._elements.values()

    def element(self, root=None, abspath=None, **kwargs):
        """
        kwargs contain extra information for custom methods.

        This method must return a valid empty object if no vars are passed
        to allow introspection to create patterns.

        :param root:
        :param abspath:
        """
        if root is None and abspath is None:
            root = os.path.abspath(".")
            abspath = os.path.abspath(".")
        relpath = os.path.relpath(abspath, root)

        try:
            element = self._elements.get(relpath, OrderedDict())
        except AttributeError:
            element = OrderedDict()

        if not element:
            for field in PATHINFO_FIELDS:
                element.setdefault(field, "")
            element["saltenv"] = kwargs.get("saltenv", "base")
            element["relpath"] = relpath
            element["abspath"] = abspath
            element["is_pillar"] = kwargs.get("is_pillar", False)

        cachedir = kwargs.get("cachedir", "")
        if cachedir and os.path.commonprefix([abspath, cachedir]) == cachedir:
            element["cache_root"] = root
        else:
            element["file_root"] = root

        element_hook = kwargs.get("_element_hook", None)
        if element_hook:
            element = element_hook(self, element, **kwargs)

        return element

    def add_element(self, element, **kwargs):
        add_hook = kwargs.get("_add_hook", None)
        if add_hook:
            element = add_hook(self, element, **kwargs)

        if element["relpath"] not in self._elements:
            self._elements[element["relpath"]] = element

    def filelist(self, roots, **kwargs):
        """
        :param roots:
            file_roots, pillar_roots, cache_roots, etc to walk.

        kwargs:
            Contains any extra variables to pass to element.

        """
        for env, destdirs in six.iteritems(roots):
            kwargs["saltenv"] = env
            super(PathInfoDict, self).filelist(destdirs, **kwargs)

        return self.as_sequence
Beispiel #17
0
    def process_results(self, rows):
        """
            This function takes a list of database results and iterates over,
            merging them in to a dict form.
        """
        listify = OrderedDict()
        listify_dicts = OrderedDict()
        for ret in rows:
            # crd is the Current Return Data level, to make this non-recursive.
            crd = self.focus
            # Walk and create dicts above the final layer
            for i in range(0, self.depth - 1):
                # At the end we'll use listify to find values to make a list of
                if i + 1 in self.with_lists:
                    if id(crd) not in listify:
                        listify[id(crd)] = []
                        listify_dicts[id(crd)] = crd
                    if ret[i] not in listify[id(crd)]:
                        listify[id(crd)].append(ret[i])
                if ret[i] not in crd:
                    # Key missing
                    crd[ret[i]] = {}
                    crd = crd[ret[i]]
                else:
                    # Check type of collision
                    ty = type(crd[ret[i]])
                    if ty is list:
                        # Already made list
                        temp = {}
                        crd[ret[i]].append(temp)
                        crd = temp
                    elif ty is not dict:
                        # Not a list, not a dict
                        if self.as_list:
                            # Make list
                            temp = {}
                            crd[ret[i]] = [crd[ret[i]], temp]
                            crd = temp
                        else:
                            # Overwrite
                            crd[ret[i]] = {}
                            crd = crd[ret[i]]
                    else:
                        # dict, descend.
                        crd = crd[ret[i]]

            # If this test is true, the penultimate field is the key
            if self.depth == self.num_fields - 1:
                nk = self.num_fields - 2  # Aka, self.depth-1
                # Should we and will we have a list at the end?
                if (self.as_list and (ret[nk] in crd)) or (nk + 1 in self.with_lists):
                    if ret[nk] in crd:
                        if type(crd[ret[nk]]) is not list:
                            crd[ret[nk]] = [crd[ret[nk]]]
                        # if it's already a list, do nothing
                    else:
                        crd[ret[nk]] = []
                    crd[ret[nk]].append(ret[self.num_fields - 1])
                else:
                    # No clobber checks then
                    crd[ret[nk]] = ret[self.num_fields - 1]
            else:
                # Otherwise, the field name is the key but we have a spare.
                # The spare results because of {c: d} vs {c: {"d": d, "e": e }}
                # So, make that last dict
                if ret[self.depth - 1] not in crd:
                    crd[ret[self.depth - 1]] = {}
                # This bit doesn't escape listify
                if self.depth in self.with_lists:
                    if id(crd) not in listify:
                        listify[id(crd)] = []
                        listify_dicts[id(crd)] = crd
                    if ret[self.depth - 1] not in listify[id(crd)]:
                        listify[id(crd)].append(ret[self.depth - 1])
                crd = crd[ret[self.depth - 1]]
                # Now for the remaining keys, we put them in to the dict
                for i in range(self.depth, self.num_fields):
                    nk = self.field_names[i]
                    # Listify
                    if i + 1 in self.with_lists:
                        if id(crd) not in listify:
                            listify[id(crd)] = []
                            listify_dicts[id(crd)] = crd
                        if nk not in listify[id(crd)]:
                            listify[id(crd)].append(nk)
                    # Collision detection
                    if self.as_list and (nk in crd):
                        # Same as before...
                        if type(crd[nk]) is list:
                            crd[nk].append(ret[i])
                        else:
                            crd[nk] = [crd[nk], ret[i]]
                    else:
                        crd[nk] = ret[i]
        # Get key list and work backwards.  This is inner-out processing
        ks = listify_dicts.keys()
        ks.reverse()
        for i in ks:
            d = listify_dicts[i]
            for k in listify[i]:
                if type(d[k]) is dict:
                    d[k] = d[k].values()
                elif type(d[k]) is not list:
                    d[k] = [d[k]]
Beispiel #18
0
    def handle_func(self, func, data):
        """
        Execute this method in a multiprocess or thread
        """
        if salt.utils.is_windows():
            self.functions = salt.loader.minion_mods(self.opts)
            self.returners = salt.loader.returners(self.opts, self.functions)
        ret = {
            "id": self.opts.get("id", "master"),
            "fun": func,
            "jid": "{0:%Y%m%d%H%M%S%f}".format(datetime.datetime.now()),
        }

        proc_fn = os.path.join(salt.minion.get_proc_dir(self.opts["cachedir"]), ret["jid"])

        # Check to see if there are other jobs with this
        # signature running.  If there are more than maxrunning
        # jobs present then don't start another.
        # If jid_include is False for this job we can ignore all this
        # NOTE--jid_include defaults to True, thus if it is missing from the data
        # dict we treat it like it was there and is True
        if "jid_include" not in data or data["jid_include"]:
            jobcount = 0
            for basefilename in os.listdir(salt.minion.get_proc_dir(self.opts["cachedir"])):
                fn = os.path.join(salt.minion.get_proc_dir(self.opts["cachedir"]), basefilename)
                with salt.utils.fopen(fn, "r") as fp_:
                    job = salt.payload.Serial(self.opts).load(fp_)
                    log.debug("schedule.handle_func: Checking job against " "fun {0}: {1}".format(ret["fun"], job))
                    if ret["fun"] == job["fun"] and os_is_running(job["pid"]):
                        jobcount += 1
                        log.debug(
                            "schedule.handle_func: Incrementing jobcount, now "
                            "{0}, maxrunning is {1}".format(jobcount, data["maxrunning"])
                        )
                        if jobcount >= data["maxrunning"]:
                            log.debug(
                                "schedule.handle_func: The scheduled job {0} "
                                "was not started, {1} already running".format(func, data["maxrunning"])
                            )
                            return False

        salt.utils.daemonize_if(self.opts)

        ret["pid"] = os.getpid()

        if "jid_include" not in data or data["jid_include"]:
            log.debug("schedule.handle_func: adding this job to the jobcache " "with data {0}".format(ret))
            # write this to /var/cache/salt/minion/proc
            with salt.utils.fopen(proc_fn, "w+") as fp_:
                fp_.write(salt.payload.Serial(self.opts).dumps(ret))

        args = None
        if "args" in data:
            args = data["args"]

        kwargs = None
        if "kwargs" in data:
            kwargs = data["kwargs"]

        try:
            if args and kwargs:
                ret["return"] = self.functions[func](*args, **kwargs)

            if args and not kwargs:
                ret["return"] = self.functions[func](*args)

            if kwargs and not args:
                ret["return"] = self.functions[func](**kwargs)

            if not kwargs and not args:
                ret["return"] = self.functions[func]()

            data_returner = data.get("returner", None)
            if data_returner or self.schedule_returner:
                rets = []
                for returner in [data_returner, self.schedule_returner]:
                    if isinstance(returner, str):
                        rets.append(returner)
                    elif isinstance(returner, list):
                        rets.extend(returner)
                # simple de-duplication with order retained
                rets = OrderedDict.fromkeys(rets).keys()
                for returner in rets:
                    ret_str = "{0}.returner".format(returner)
                    if ret_str in self.returners:
                        ret["success"] = True
                        self.returners[ret_str](ret)
                    else:
                        log.info("Job {0} using invalid returner: {1} Ignoring.".format(func, returner))
        except Exception:
            log.exception("Unhandled exception running {0}".format(ret["fun"]))
            # Although catch-all exception handlers are bad, the exception here
            # is to let the exception bubble up to the top of the thread context,
            # where the thread will die silently, which is worse.
        finally:
            try:
                os.unlink(proc_fn)
            except OSError as e:
                if e.errno == errno.EEXIST:
                    # EEXIST is OK because the file is gone and that's what
                    # we wanted
                    pass
                else:
                    log.error("Failed to delete '{0}': {1}".format(proc_fn, e.errno))
                    # Otherwise, failing to delete this file is not something
                    # we can cleanly handle.
                    raise
Beispiel #19
0
def iostat(zpool=None, sample_time=0):
    """
    .. versionchanged:: 2016.3.0

    Display I/O statistics for the given pools

    zpool : string
        optional name of storage pool
    sample_time : int
        seconds to capture data before output

    CLI Example:

    .. code-block:: bash

        salt '*' zpool.iostat myzpool
    """
    ret = OrderedDict()

    # get zpool list data
    zpool_cmd = _check_zpool()
    cmd = "{zpool_cmd} iostat -v{zpool}{sample_time}".format(
        zpool_cmd=zpool_cmd,
        zpool=" {0}".format(zpool) if zpool else "",
        sample_time=" {0} 2".format(sample_time) if sample_time else "",
    )
    res = __salt__["cmd.run_all"](cmd, python_shell=False)
    if res["retcode"] != 0:
        ret["error"] = res["stderr"] if "stderr" in res else res["stdout"]
        return ret

    # note: hardcoded header fields, the double header is hard to parse
    #                                capacity     operations    bandwidth
    # pool                         alloc   free   read  write   read  write
    header = [
        "pool",
        "capacity-alloc",
        "capacity-free",
        "operations-read",
        "operations-write",
        "bandwith-read",
        "bandwith-write",
    ]
    root_vdev = None
    vdev = None
    dev = None
    config_data = None
    current_pool = None
    for line in res["stdout"].splitlines():
        if line.strip() == "":
            continue

        # ignore header
        if line.startswith("pool") and line.endswith("write"):
            continue
        if line.endswith("bandwidth"):
            continue

        if line.startswith("-") and line.endswith("-"):
            if config_data:
                ret[current_pool] = config_data
            config_data = OrderedDict()
            current_pool = None
        else:
            if not isinstance(config_data, salt.utils.odict.OrderedDict):
                continue

            stat_data = OrderedDict()
            stats = [x for x in line.strip().split(" ") if x not in [""]]
            for prop in header:
                if header.index(prop) < len(stats):
                    if prop == "pool":
                        if not current_pool:
                            current_pool = stats[header.index(prop)]
                        continue
                    if stats[header.index(prop)] == "-":
                        continue
                    stat_data[prop] = stats[header.index(prop)]

            dev = line.strip().split()[0]

            if line[0:4] != "    ":
                if line[0:2] == "  ":
                    vdev = line.strip().split()[0]
                    dev = None
                else:
                    root_vdev = line.strip().split()[0]
                    vdev = None
                    dev = None

            if root_vdev:
                if not config_data.get(root_vdev):
                    config_data[root_vdev] = {}
                    if len(stat_data) > 0:
                        config_data[root_vdev] = stat_data
                if vdev:
                    if vdev not in config_data[root_vdev]:
                        config_data[root_vdev][vdev] = {}
                        if len(stat_data) > 0:
                            config_data[root_vdev][vdev] = stat_data
                    if dev and dev not in config_data[root_vdev][vdev]:
                        config_data[root_vdev][vdev][dev] = {}
                        if len(stat_data) > 0:
                            config_data[root_vdev][vdev][dev] = stat_data

    return ret
Beispiel #20
0
def _process_entries(l, entries):
    """Helper for managed() to process entries and return before/after views

    Collect the current database state and update it according to the
    data in :py:func:`managed`'s ``entries`` parameter.  Return the
    current database state and what it will look like after
    modification.

    :param l:
        the LDAP connection object

    :param entries:
        the same object passed to the ``entries`` parameter of
        :py:func:`manage`

    :return:
        an ``(old, new)`` tuple that describes the current state of
        the entries and what they will look like after modification.
        Each item in the tuple is an OrderedDict that maps an entry DN
        to another dict that maps an attribute name to a set of its
        values (it's a set because according to the LDAP spec,
        attribute value ordering is unspecified and there can't be
        duplicates).  The structure looks like this:

            {dn1: {attr1: set([val1])},
             dn2: {attr1: set([val2]), attr2: set([val3, val4])}}

        All of an entry's attributes and values will be included, even
        if they will not be modified.  If an entry mentioned in the
        entries variable doesn't yet exist in the database, the DN in
        ``old`` will be mapped to an empty dict.  If an entry in the
        database will be deleted, the DN in ``new`` will be mapped to
        an empty dict.  All value sets are non-empty:  An attribute
        that will be added to an entry is not included in ``old``, and
        an attribute that will be deleted frm an entry is not included
        in ``new``.

        These are OrderedDicts to ensure that the user-supplied
        entries are processed in the user-specified order (in case
        there are dependencies, such as ACL rules specified in an
        early entry that make it possible to modify a later entry).
    """

    old = OrderedDict()
    new = OrderedDict()

    for entries_dict in entries:
        for dn, directives_seq in six.iteritems(entries_dict):
            # get the old entry's state.  first check to see if we've
            # previously processed the entry.
            olde = new.get(dn, None)
            if olde is None:
                # next check the database
                results = __salt__["ldap3.search"](l, dn, "base")
                if len(results) == 1:
                    attrs = results[dn]
                    olde = dict(((attr, set(attrs[attr])) for attr in attrs if len(attrs[attr])))
                else:
                    # nothing, so it must be a brand new entry
                    assert len(results) == 0
                    olde = {}
                old[dn] = olde
            # copy the old entry to create the new (don't do a simple
            # assignment or else modifications to newe will affect
            # olde)
            newe = copy.deepcopy(olde)
            new[dn] = newe

            # process the directives
            entry_status = {"delete_others": False, "mentioned_attributes": set()}
            for directives in directives_seq:
                _update_entry(newe, entry_status, directives)
            if entry_status["delete_others"]:
                to_delete = set()
                for attr in newe:
                    if attr not in entry_status["mentioned_attributes"]:
                        to_delete.add(attr)
                for attr in to_delete:
                    del newe[attr]
    return old, new
Beispiel #21
0
def format_resolve(value, original_dict=None, global_tries=50, this_call=0, topdb=False):
    """Resolve a dict of formatted strings, mappings & list to a valued dict
    Please also read the associated test::

        {"a": ["{b}", "{c}", "{e}"],
         "b": 1,
         "c": "{d}",
         "d": "{b}",
         "e": "{d}",
        }

        ====>
        {"a": ["1", "1", "{e}"],
         "b": 1,
         "c": "{d}",
         "d": "{b}",
         "e": "{d}",
        }

    """
    if not original_dict:
        original_dict = OrderedDict()
    if this_call == 0 and not original_dict and isinstance(value, dict):
        original_dict = value
    left = False
    cycle = True

    if isinstance(value, dict):
        new = OrderedDict()
        for key, val in value.items():
            val = format_resolve(val, original_dict, this_call=this_call + 1, topdb=topdb)
            new[key] = val
    elif isinstance(value, (list, tuple)):
        new = type(value)()
        for v in value:
            val = format_resolve(v, original_dict, this_call=this_call + 1, topdb=topdb)
            new = new + type(value)([val])
    elif isinstance(value, basestring):
        new = value
        if "/downloads" in new:
            topdb = True
        # do not directly call format to handle keyerror in original mapping
        # where we may have yet keyerrors
        if isinstance(original_dict, dict):
            for k in original_dict:
                reprk = k
                if not isinstance(reprk, basestring):
                    reprk = "{0}".format(k)
                subst = "{" + reprk + "}"
                subst_val = original_dict[k]
                if subst in new:
                    if isinstance(subst_val, (list, dict)):
                        inner_new = format_resolve(subst_val, original_dict, this_call=this_call + 1, topdb=topdb)
                        # composed, we take the repr
                        if new != subst:
                            new = new.replace(subst, str(inner_new))
                        # no composed value, take the original list
                        else:
                            new = inner_new
                    else:
                        if new != subst_val:
                            new = new.replace(subst, str(subst_val))
        if ("{" in new) and ("}" in new):
            i = 0
            while True:
                try:
                    this_call += 1
                    if this_call > 1000:
                        raise _CycleError("cycle")
                    new_val = format_resolve(new, original_dict, this_call=this_call + 1, topdb=topdb)
                    new_braces = new.count("{"), new.count("}")
                    newval_braces = new_val.count("{"), new_val.count("}")
                    if new_braces == newval_braces:
                        break
                    else:
                        new = new_val
                except _CycleError:
                    cycle = True
                    break
            if ("{" in new) and ("}" in new):
                left = True
    else:
        new = value
    if left:
        if this_call == 0:
            for i in global_tries:
                new_val = format_resolve(new, original_dict, this_call=this_call + 1, topdb=topdb)
                if (new == new_val) or cycle:
                    break
                else:
                    new = new_val
        else:
            while not cycle:
                new_val = format_resolve(new, original_dict, this_call=this_call + 1, topdb=topdb)
                if (new == new_val) or (cycle):
                    break
                else:
                    new = new_val
    return new
Beispiel #22
0
def managed(name, entries, connect_spec=None):
    """Ensure the existence (or not) of LDAP entries and their attributes

    Example:

    .. code-block:: yaml

        ldapi:///:
          ldap.managed:
            - connect_spec:
                bind:
                  method: sasl

            - entries:

              # make sure the entry doesn't exist
              - cn=foo,ou=users,dc=example,dc=com:
                - delete_others: True

              # make sure the entry exists with only the specified
              # attribute values
              - cn=admin,dc=example,dc=com:
                - delete_others: True
                - replace:
                    cn:
                      - admin
                    description:
                      - LDAP administrator
                    objectClass:
                      - simpleSecurityObject
                      - organizationalRole
                    userPassword:
                      - {{pillar.ldap_admin_password}}

              # make sure the entry exists, its olcRootDN attribute
              # has only the specified value, the olcRootDN attribute
              # doesn't exist, and all other attributes are ignored
              - 'olcDatabase={1}hdb,cn=config':
                - replace:
                    olcRootDN:
                      - cn=admin,dc=example,dc=com
                    # the admin entry has its own password attribute
                    olcRootPW: []

              # note the use of 'default'.  also note how you don't
              # have to use list syntax if there is only one attribute
              # value
              - cn=foo,ou=users,dc=example,dc=com:
                - delete_others: True
                - default:
                    userPassword: changeme
                    shadowLastChange: 0
                    # keep sshPublicKey if present, but don't create
                    # the attribute if it is missing
                    sshPublicKey: []
                - replace:
                    cn: foo
                    uid: foo
                    uidNumber: 1000
                    gidNumber: 1000
                    gecos: Foo Bar
                    givenName: Foo
                    sn: Bar
                    homeDirectory: /home/foo
                    loginShell: /bin/bash
                    objectClass:
                      - inetOrgPerson
                      - posixAccount
                      - top
                      - ldapPublicKey
                      - shadowAccount

    :param name:
        The URL of the LDAP server.  This is ignored if
        ``connect_spec`` is either a connection object or a dict with
        a ``'url'`` entry.

    :param entries:
        A description of the desired state of zero or more LDAP
        entries.

        ``entries`` is an iterable of dicts.  Each of these dict's
        keys are the distinguished names (DNs) of LDAP entries to
        manage.  Each of these dicts is processed in order.  A later
        dict can reference an LDAP entry that was already mentioned in
        an earlier dict, which makes it possible for later dicts to
        enhance or alter the desired state of an LDAP entry.

        The DNs are mapped to a description of the LDAP entry's
        desired state.  These LDAP entry descriptions are themselves
        iterables of dicts.  Each dict in the iterable is processed in
        order.  They contain directives controlling the entry's state.
        The key names the directive type and the value is state
        information for the directive.  The specific structure of the
        state information depends on the directive type.

        The structure of ``entries`` looks like this::

            [{dn1: [{directive1: directive1_state,
                     directive2: directive2_state},
                    {directive3: directive3_state}],
              dn2: [{directive4: directive4_state,
                     directive5: directive5_state}]},
             {dn3: [{directive6: directive6_state}]}]

        These are the directives:

        * ``'delete_others'``
            Boolean indicating whether to delete attributes not
            mentioned in this dict or any of the other directive
            dicts for this DN.  Defaults to ``False``.

            If you don't want to delete an attribute if present, but
            you also don't want to add it if it is missing or modify
            it if it is present, you can use either the ``'default'``
            directive or the ``'add'`` directive with an empty value
            list.

        * ``'default'``
            A dict mapping an attribute name to an iterable of default
            values for that attribute.  If the attribute already
            exists, it is left alone.  If not, it is created using the
            given list of values.

            An empty value list is useful when you don't want to
            create an attribute if it is missing but you do want to
            preserve it if the ``'delete_others'`` key is ``True``.

        * ``'add'``
            Attribute values to add to the entry.  This is a dict
            mapping an attribute name to an iterable of values to add.

            An empty value list is useful when you don't want to
            create an attribute if it is missing but you do want to
            preserve it if the ``'delete_others'`` key is ``True``.

        * ``'delete'``
            Attribute values to remove from the entry.  This is a dict
            mapping an attribute name to an iterable of values to
            delete from the attribute.  If the iterable is empty, all
            of the attribute's values are deleted.

        * ``'replace'``
            Attributes to replace.  This is a dict mapping an
            attribute name to an iterable of values.  Any existing
            values for the attribute are deleted, then the given
            values are added.  The iterable may be empty.

        In the above directives, the iterables of attribute values may
        instead be ``None``, in which case an empty list is used, or a
        scalar such as a string or number, in which case a new list
        containing the scalar is used.

        Note that if all attribute values are removed from an entry,
        the entire entry is deleted.

    :param connect_spec:
        See the description of the ``connect_spec`` parameter of the
        :py:func:`ldap3.connect <salt.modules.ldap3.connect>` function
        in the :py:mod:`ldap3 <salt.modules.ldap3>` execution module.
        If this is a dict and the ``'url'`` entry is not specified,
        the ``'url'`` entry is set to the value of the ``name``
        parameter.

    :returns:
        A dict with the following keys:

        * ``'name'``
            This is the same object passed to the ``name`` parameter.

        * ``'changes'``
            This is a dict describing the changes made (or, in test
            mode, the changes that would have been attempted).  If no
            changes were made (or no changes would have been
            attempted), then this dict is empty.  Only successful
            changes are included.

            Each key is a DN of an entry that was changed (or would
            have been changed).  Entries that were not changed (or
            would not have been changed) are not included.  The value
            is a dict with two keys:

            * ``'old'``
                The state of the entry before modification.  If the
                entry did not previously exist, this key maps to
                ``None``.  Otherwise, the value is a dict mapping each
                of the old entry's attributes to a list of its values
                before any modifications were made.  Unchanged
                attributes are excluded from this dict.

            * ``'new'``
                The state of the entry after modification.  If the
                entry was deleted, this key maps to ``None``.
                Otherwise, the value is a dict mapping each of the
                entry's attributes to a list of its values after the
                modifications were made.  Unchanged attributes are
                excluded from this dict.

            Example ``'changes'`` dict where a new entry was created
            with a single attribute containing two values::

                {'dn1': {'old': None,
                         'new': {'attr1': ['val1', 'val2']}}}

            Example ``'changes'`` dict where a new attribute was added
            to an existing entry::

                {'dn1': {'old': {},
                         'new': {'attr2': ['val3']}}}

        * ``'result'``
            One of the following values:

            * ``True`` if no changes were necessary or if all changes
              were applied successfully.
            * ``False`` if at least one change was unable to be applied.
            * ``None`` if changes would be applied but it is in test
              mode.
    """
    if connect_spec is None:
        connect_spec = {}
    try:
        connect_spec.setdefault("url", name)
    except AttributeError:
        # already a connection object
        pass

    connect = __salt__["ldap3.connect"]

    # hack to get at the ldap3 module to access the ldap3.LDAPError
    # exception class.  https://github.com/saltstack/salt/issues/27578
    ldap3 = inspect.getmodule(connect)

    with connect(connect_spec) as l:

        old, new = _process_entries(l, entries)

        # collect all of the affected entries (only the key is
        # important in this dict; would have used an OrderedSet if
        # there was one)
        dn_set = OrderedDict()
        dn_set.update(old)
        dn_set.update(new)

        # do some cleanup
        dn_to_delete = set()
        for dn in dn_set:
            o = old.get(dn, {})
            n = new.get(dn, {})
            for x in o, n:
                to_delete = set()
                for attr, vals in six.iteritems(x):
                    if not len(vals):
                        # clean out empty attribute lists
                        to_delete.add(attr)
                for attr in to_delete:
                    del x[attr]
            if o == n:
                # clean out unchanged entries
                dn_to_delete.add(dn)
        for dn in dn_to_delete:
            for x in old, new:
                x.pop(dn, None)
            del dn_set[dn]

        ret = {"name": name, "changes": {}, "result": None, "comment": ""}

        if old == new:
            ret["comment"] = "LDAP entries already set"
            ret["result"] = True
            return ret

        if __opts__["test"]:
            ret["comment"] = "Would change LDAP entries"
            changed_old = old
            changed_new = new
            success_dn_set = dn_set
        else:
            # execute the changes
            changed_old = OrderedDict()
            changed_new = OrderedDict()
            # assume success; these will be changed on error
            ret["result"] = True
            ret["comment"] = "Successfully updated LDAP entries"
            errs = []
            success_dn_set = OrderedDict()
            for dn in dn_set:
                o = old.get(dn, {})
                n = new.get(dn, {})

                try:
                    # perform the operation
                    if len(o):
                        if len(n):
                            op = "modify"
                            assert o != n
                            __salt__["ldap3.change"](l, dn, o, n)
                        else:
                            op = "delete"
                            __salt__["ldap3.delete"](l, dn)
                    else:
                        op = "add"
                        assert len(n)
                        __salt__["ldap3.add"](l, dn, n)

                    # update these after the op in case an exception
                    # is raised
                    changed_old[dn] = o
                    changed_new[dn] = n
                    success_dn_set[dn] = True
                except ldap3.LDAPError:
                    log.exception("failed to %s entry %s", op, dn)
                    errs.append((op, dn))
                    continue

            if len(errs):
                ret["result"] = False
                ret["comment"] = "failed to " + ", ".join((op + " entry " + dn for op, dn in errs))

    # set ret['changes'].  filter out any unchanged attributes, and
    # convert the value sets to lists before returning them to the
    # user (sorted for easier comparisons)
    for dn in success_dn_set:
        o = changed_old.get(dn, {})
        n = changed_new.get(dn, {})
        changes = {}
        ret["changes"][dn] = changes
        for x, xn in ((o, "old"), (n, "new")):
            if not len(x):
                changes[xn] = None
                continue
            changes[xn] = dict(
                ((attr, sorted(vals)) for attr, vals in six.iteritems(x) if o.get(attr, ()) != n.get(attr, ()))
            )

    return ret
Beispiel #23
0
# -*- coding: utf-8 -*-
"""
Application Kinds of Salt apps.
These are used to indicate what kind of Application is using RAET
"""
from __future__ import absolute_import
from collections import namedtuple
from salt.utils.odict import OrderedDict

# Python equivalent of an enum
APPL_KINDS = OrderedDict([("master", 0), ("minion", 1), ("syndic", 2), ("caller", 3)])
APPL_KIND_NAMES = OrderedDict((v, k) for k, v in list(APPL_KINDS.items()))  # inverse map
ApplKind = namedtuple("ApplKind", list(APPL_KINDS.keys()))
applKinds = ApplKind(**APPL_KINDS)
Beispiel #24
0
 def empty(self):
     self.states = OrderedDict()
     self.requisites = []
    def gen_functions(self, pack=None, virtual_enable=True, whitelist=None, provider_overrides=False):
        """
        Return a dict of functions found in the defined module_dirs
        """
        funcs = OrderedDict()
        self.load_modules()
        for mod in self.modules:
            # If this is a proxy minion then MOST modules cannot work.  Therefore, require that
            # any module that does work with salt-proxy-minion define __proxyenabled__ as a list
            # containing the names of the proxy types that the module supports.
            if not hasattr(mod, "render") and "proxy" in self.opts:
                if not hasattr(mod, "__proxyenabled__"):
                    # This is a proxy minion but this module doesn't support proxy
                    # minions at all
                    continue
                if not (self.opts["proxy"]["proxytype"] in mod.__proxyenabled__ or "*" in mod.__proxyenabled__):
                    # This is a proxy minion, this module supports proxy
                    # minions, but not this particular minion
                    log.debug(mod)
                    continue

            if hasattr(mod, "__opts__"):
                mod.__opts__.update(self.opts)
            else:
                mod.__opts__ = self.opts

            mod.__grains__ = self.grains
            mod.__pillar__ = self.pillar

            if pack:
                if isinstance(pack, list):
                    for chunk in pack:
                        if not isinstance(chunk, dict):
                            continue
                        try:
                            setattr(mod, chunk["name"], chunk["value"])
                        except KeyError:
                            pass
                else:
                    setattr(mod, pack["name"], pack["value"])

            # Call a module's initialization method if it exists
            if hasattr(mod, "__init__"):
                if callable(mod.__init__):
                    try:
                        mod.__init__(self.opts)
                    except TypeError:
                        pass

            # Trim the full pathname to just the module
            # this will be the short name that other salt modules and state
            # will refer to it as.
            module_name = mod.__name__.rsplit(".", 1)[-1]

            if virtual_enable:
                # if virtual modules are enabled, we need to look for the
                # __virtual__() function inside that module and run it.
                (virtual_ret, virtual_name) = self.process_virtual(mod, module_name)

                # if process_virtual returned a non-True value then we are
                # supposed to not process this module
                if virtual_ret is not True:
                    continue

                # update our module name to reflect the virtual name
                module_name = virtual_name

            if whitelist:
                # If a whitelist is defined then only load the module if it is
                # in the whitelist
                if module_name not in whitelist:
                    continue

            # load the functions from the module and update our dict
            funcs.update(self.load_functions(mod, module_name))

        # Handle provider overrides
        if provider_overrides and self.opts.get("providers", False):
            if isinstance(self.opts["providers"], dict):
                for mod, provider in self.opts["providers"].items():
                    newfuncs = raw_mod(self.opts, provider, funcs)
                    if newfuncs:
                        for newfunc in newfuncs:
                            f_key = "{0}{1}".format(mod, newfunc[newfunc.rindex(".") :])
                            funcs[f_key] = newfuncs[newfunc]

        # now that all the functions have been collected, iterate back over
        # the available modules and inject the special __salt__ namespace that
        # contains these functions.
        for mod in self.modules:
            if not hasattr(mod, "__salt__") or (
                not in_pack(pack, "__salt__")
                and (
                    not str(mod.__name__).startswith("salt.loaded.int.grain")
                    and not str(mod.__name__).startswith("salt.loaded.ext.grain")
                )
            ):
                mod.__salt__ = funcs
            elif not in_pack(pack, "__salt__") and (
                str(mod.__name__).startswith("salt.loaded.int.grain")
                or str(mod.__name__).startswith("salt.loaded.ext.grain")
            ):
                mod.__salt__.update(funcs)
        return funcs
Beispiel #26
0
    def handle_func(self, func, data):
        """
        Execute this method in a multiprocess or thread
        """
        if salt.utils.is_windows():
            # Since function references can't be pickled and pickling
            # is required when spawning new processes on Windows, regenerate
            # the functions and returners.
            self.functions = salt.loader.minion_mods(self.opts)
            self.returners = salt.loader.returners(self.opts, self.functions)
        ret = {
            "id": self.opts.get("id", "master"),
            "fun": func,
            "schedule": data["name"],
            "jid": salt.utils.jid.gen_jid(),
        }

        if "metadata" in data:
            if isinstance(data["metadata"], dict):
                ret["metadata"] = data["metadata"]
                ret["metadata"]["_TOS"] = self.time_offset
                ret["metadata"]["_TS"] = time.ctime()
                ret["metadata"]["_TT"] = time.strftime("%Y %B %d %a %H %m", time.gmtime())
            else:
                log.warning("schedule: The metadata parameter must be " "specified as a dictionary.  Ignoring.")

        salt.utils.appendproctitle(ret["jid"])

        proc_fn = os.path.join(salt.minion.get_proc_dir(self.opts["cachedir"]), ret["jid"])

        # Check to see if there are other jobs with this
        # signature running.  If there are more than maxrunning
        # jobs present then don't start another.
        # If jid_include is False for this job we can ignore all this
        # NOTE--jid_include defaults to True, thus if it is missing from the data
        # dict we treat it like it was there and is True
        if "jid_include" not in data or data["jid_include"]:
            jobcount = 0
            for basefilename in os.listdir(salt.minion.get_proc_dir(self.opts["cachedir"])):
                fn_ = os.path.join(salt.minion.get_proc_dir(self.opts["cachedir"]), basefilename)
                if not os.path.exists(fn_):
                    log.debug(
                        "schedule.handle_func: {0} was processed " "in another thread, skipping.".format(basefilename)
                    )
                    continue
                with salt.utils.fopen(fn_, "rb") as fp_:
                    job = salt.payload.Serial(self.opts).load(fp_)
                    if job:
                        if "schedule" in job:
                            log.debug(
                                "schedule.handle_func: Checking job against " "fun {0}: {1}".format(ret["fun"], job)
                            )
                            if ret["schedule"] == job["schedule"] and os_is_running(job["pid"]):
                                jobcount += 1
                                log.debug(
                                    "schedule.handle_func: Incrementing jobcount, now "
                                    "{0}, maxrunning is {1}".format(jobcount, data["maxrunning"])
                                )
                                if jobcount >= data["maxrunning"]:
                                    log.debug(
                                        "schedule.handle_func: The scheduled job {0} "
                                        "was not started, {1} already running".format(
                                            ret["schedule"], data["maxrunning"]
                                        )
                                    )
                                    return False
                    else:
                        try:
                            log.info("Invalid job file found.  Removing.")
                            os.remove(fn_)
                        except OSError:
                            log.info("Unable to remove file: {0}.".format(fn_))

        salt.utils.daemonize_if(self.opts)

        ret["pid"] = os.getpid()

        if "jid_include" not in data or data["jid_include"]:
            log.debug("schedule.handle_func: adding this job to the jobcache " "with data {0}".format(ret))
            # write this to /var/cache/salt/minion/proc
            with salt.utils.fopen(proc_fn, "w+b") as fp_:
                fp_.write(salt.payload.Serial(self.opts).dumps(ret))

        args = tuple()
        if "args" in data:
            args = data["args"]

        kwargs = {}
        if "kwargs" in data:
            kwargs = data["kwargs"]
        # if the func support **kwargs, lets pack in the pub data we have
        # TODO: pack the *same* pub data as a minion?
        argspec = salt.utils.args.get_function_argspec(self.functions[func])
        if argspec.keywords:
            # this function accepts **kwargs, pack in the publish data
            for key, val in six.iteritems(ret):
                kwargs["__pub_{0}".format(key)] = val

        try:
            ret["return"] = self.functions[func](*args, **kwargs)

            data_returner = data.get("returner", None)
            if data_returner or self.schedule_returner:
                if "returner_config" in data:
                    ret["ret_config"] = data["returner_config"]
                rets = []
                for returner in [data_returner, self.schedule_returner]:
                    if isinstance(returner, str):
                        rets.append(returner)
                    elif isinstance(returner, list):
                        rets.extend(returner)
                # simple de-duplication with order retained
                for returner in OrderedDict.fromkeys(rets):
                    ret_str = "{0}.returner".format(returner)
                    if ret_str in self.returners:
                        ret["success"] = True
                        self.returners[ret_str](ret)
                    else:
                        log.info("Job {0} using invalid returner: {1}. Ignoring.".format(func, returner))

            if "return_job" in data and not data["return_job"]:
                pass
            else:
                # Send back to master so the job is included in the job list
                mret = ret.copy()
                mret["jid"] = "req"
                channel = salt.transport.Channel.factory(self.opts, usage="salt_schedule")
                load = {"cmd": "_return", "id": self.opts["id"]}
                for key, value in six.iteritems(mret):
                    load[key] = value
                channel.send(load)

        except Exception:
            log.exception("Unhandled exception running {0}".format(ret["fun"]))
            # Although catch-all exception handlers are bad, the exception here
            # is to let the exception bubble up to the top of the thread context,
            # where the thread will die silently, which is worse.
        finally:
            try:
                log.debug("schedule.handle_func: Removing {0}".format(proc_fn))
                os.unlink(proc_fn)
            except OSError as exc:
                if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT:
                    # EEXIST and ENOENT are OK because the file is gone and that's what
                    # we wanted
                    pass
                else:
                    log.error("Failed to delete '{0}': {1}".format(proc_fn, exc.errno))
                    # Otherwise, failing to delete this file is not something
                    # we can cleanly handle.
                    raise
Beispiel #27
0
    def serialize(cls, id_=None):
        # The order matters
        serialized = OrderedDict()
        if id_ is not None:
            # This is meant as a configuration section, sub json schema
            serialized["id"] = "{0}/{1}.json#".format(BASE_SCHEMA_URL, id_)
        else:
            # Main configuration block, json schema
            serialized["$schema"] = "http://json-schema.org/draft-04/schema#"
        if cls.title is not None:
            serialized["title"] = cls.title
        if cls.description is not None:
            if cls.description == cls.__doc__:
                serialized["description"] = textwrap.dedent(cls.description).strip()
            else:
                serialized["description"] = cls.description

        required = []
        ordering = []
        serialized["type"] = "object"
        properties = OrderedDict()
        cls.after_items_update = []
        for name in cls._order:
            skip_order = False
            if name in cls._sections:
                section = cls._sections[name]
                serialized_section = section.serialize(None if section.__flatten__ is True else name)
                if section.__flatten__ is True:
                    # Flatten the configuration section into the parent
                    # configuration
                    properties.update(serialized_section["properties"])
                    if "x-ordering" in serialized_section:
                        ordering.extend(serialized_section["x-ordering"])
                    if "required" in serialized_section:
                        required.extend(serialized_section["required"])
                    if hasattr(section, "after_items_update"):
                        cls.after_items_update.extend(section.after_items_update)
                    skip_order = True
                else:
                    # Store it as a configuration section
                    properties[name] = serialized_section

            if name in cls._items:
                config = cls._items[name]
                # Handle the configuration items defined in the class instance
                if config.__flatten__ is True:
                    serialized_config = config.serialize()
                    cls.after_items_update.append(serialized_config)
                    skip_order = True
                else:
                    properties[name] = config.serialize()

                if config.required:
                    # If it's a required item, add it to the required list
                    required.append(name)

            if skip_order is False:
                # Store the order of the item
                if name not in ordering:
                    ordering.append(name)

        if properties:
            serialized["properties"] = properties

        # Update the serialized object with any items to include after properties
        if cls.after_items_update:
            after_items_update = {}
            for entry in cls.after_items_update:
                name, data = next(six.iteritems(entry))
                if name in after_items_update:
                    after_items_update[name].extend(data)
                else:
                    after_items_update[name] = data
            serialized.update(after_items_update)

        if required:
            # Only include required if not empty
            serialized["required"] = required
        if ordering:
            # Only include ordering if not empty
            serialized["x-ordering"] = ordering
        serialized["additionalProperties"] = cls.__allow_additional_items__
        return serialized