def list_tables(family="ipv4"): """ Return a data structure of the current, in-memory tables CLI Example: .. code-block:: bash salt '*' nftables.list_tables salt '*' nftables.list_tables family=ipv6 """ nft_family = _NFTABLES_FAMILIES[family] tables = [] cmd = "{} --json --numeric --numeric --numeric list tables {}".format( _nftables_cmd(), nft_family) out = __salt__["cmd.run"](cmd, python_shell=False) if not out: return tables try: data = json.loads(out) except ValueError: return tables if not data or not data.get("nftables"): return tables for item in data.get("nftables", []): if "metainfo" not in item: tables.append(item["table"]) log.debug(tables) return tables
def __query_node_data(vm_name): data = show_instance(vm_name, call='action') if not data: # Trigger an error in the wait_for_ip function return False if data.get('PublicIpAddress', None) is not None: return data
def list_tables(family='ipv4'): ''' Return a data structure of the current, in-memory tables CLI Example: .. code-block:: bash salt '*' nftables.list_tables salt '*' nftables.list_tables family=ipv6 ''' nft_family = _NFTABLES_FAMILIES[family] tables = [] cmd = '{0} --json --numeric --numeric --numeric ' \ 'list tables {1}'. format(_nftables_cmd(), nft_family) out = __salt__['cmd.run'](cmd, python_shell=False) if not out: return tables data = json.loads(out) for item in data.get('nftables', []): if 'metainfo' not in item: tables.append(item['table']) log.debug(tables) return tables
def shell_jobber(self): ''' Shell jobber start! ''' while self.fun.value: msg = self.fun.value.popleft() data = msg.get('pub') match = getattr( self.matcher.value, '{0}_match'.format( data.get('tgt_type', 'glob') ) )(data['tgt']) if not match: continue fun = data['fun'] if fun in self.modules.value: func = self.modules.value[fun] else: continue args, kwargs = salt.minion.load_args_and_kwargs( func, salt.utils.args.parse_input( data['arg'], no_parse=data.get('no_parse', [])), data) cmd = ['salt-call', '--out', 'json', '--metadata', '-c', salt.syspaths.CONFIG_DIR] if 'return' in data: cmd.append('--return') cmd.append(data['return']) cmd.append(fun) for arg in args: cmd.append(arg) for key in kwargs: cmd.append('{0}={1}'.format(key, kwargs[key])) que = {'pub': data, 'msg': msg} que['proc'] = subprocess.Popen( cmd, shell=False, stderr=subprocess.PIPE, stdout=subprocess.PIPE) self.shells.value[data['jid']] = que
def _query_node_data(instance_id): data = show_instance(instance_id, call='action') if not data: return False if data.get('private_ips', []): return data
def _query_node_data(instance_id): data = show_instance(instance_id, call="action") if not data: return False if data.get("private_ips", []): return data
def get_minion_data(minion, opts): """ Get the grains/pillar for a specific minion. If minion is None, it will return the grains/pillar for the first minion it finds. Return value is a tuple of the minion ID, grains, and pillar """ grains = None pillar = None if opts.get("minion_data_cache", False): cache = salt.cache.factory(opts) if minion is None: for id_ in cache.list("minions"): data = cache.fetch("minions/{}".format(id_), "data") if data is None: continue else: data = cache.fetch("minions/{}".format(minion), "data") if data is not None: grains = data.get("grains", None) pillar = data.get("pillar", None) return minion if minion else None, grains, pillar
def get_minion_data(minion, opts): ''' Get the grains/pillar for a specific minion. If minion is None, it will return the grains/pillar for the first minion it finds. Return value is a tuple of the minion ID, grains, and pillar ''' grains = None pillar = None if opts.get('minion_data_cache', False): cache = salt.cache.factory(opts) if minion is None: for id_ in cache.list('minions'): data = cache.fetch('minions/{0}'.format(id_), 'data') if data is None: continue else: data = cache.fetch('minions/{0}'.format(minion), 'data') if data is not None: grains = data.get('grains', None) pillar = data.get('pillar', None) return minion if minion else None, grains, pillar
def action(self): ''' Pull the queue for functions to execute ''' while self.fun.value: msg = self.fun.value.popleft() data = msg.get('pub') match = getattr( self.matcher.value, '{0}_match'.format( data.get('tgt_type', 'glob') ) )(data['tgt']) if not match: continue if 'user' in data: log.info( 'User {0[user]} Executing command {0[fun]} with jid ' '{0[jid]}'.format(data)) else: log.info( 'Executing command {0[fun]} with jid {0[jid]}'.format(data) ) log.debug('Command details {0}'.format(data)) if is_windows(): # SaltRaetNixJobber is not picklable. Pickling is necessary # when spawning a process in Windows. Since the process will # be spawned and joined on non-Windows platforms, instead of # this, just run the function directly and absorb any thrown # exceptions. try: self.proc_run(msg) except Exception as exc: log.error( 'Exception caught by jobber: {0}'.format(exc), exc_info=True) else: process = multiprocessing.Process( target=self.proc_run, kwargs={'msg': msg} ) process.start() process.join()
def _blkid_output(out): """ Parse blkid output. """ flt = lambda data: [el for el in data if el.strip()] data = {} for dev_meta in flt(out.split("\n\n")): dev = {} for items in flt(dev_meta.strip().split("\n")): key, val = items.split("=", 1) dev[key.lower()] = val if dev.pop("type", None) == "xfs": dev["label"] = dev.get("label") data[dev.pop("devname")] = dev mounts = _get_mounts() for device in mounts: if data.get(device): data[device].update(mounts[device]) return data
def _blkid_output(out): ''' Parse blkid output. ''' flt = lambda data: [el for el in data if el.strip()] data = {} for dev_meta in flt(out.split("\n\n")): dev = {} for items in flt(dev_meta.strip().split("\n")): key, val = items.split("=", 1) dev[key.lower()] = val if dev.pop("type") == "xfs": dev['label'] = dev.get('label') data[dev.pop("devname")] = dev mounts = _get_mounts() for device in six.iterkeys(mounts): if data.get(device): data[device].update(mounts[device]) return data
def _xfs_prune_output(out, uuid): ''' Parse prune output. ''' data = {} cnt = [] cutpoint = False for line in [l.strip() for l in out.split("\n") if l]: if line.startswith("-"): if cutpoint: break else: cutpoint = True continue if cutpoint: cnt.append(line) for kset in [e for e in cnt[1:] if ':' in e]: key, val = [t.strip() for t in kset.split(":", 1)] data[key.lower().replace(" ", "_")] = val return data.get('uuid') == uuid and data or {}
def update(): ''' Execute an svn update on all of the repos ''' # data for the fileserver event data = {'changed': False, 'backend': 'svnfs'} # _clear_old_remotes runs init(), so use the value from there to avoid a # second init() data['changed'], repos = _clear_old_remotes() for repo in repos: if os.path.exists(repo['lockfile']): log.warning( 'Update lockfile is present for svnfs remote %s, skipping. ' 'If this warning persists, it is possible that the update ' 'process was interrupted. Removing %s or running ' '\'salt-run fileserver.clear_lock svnfs\' will allow updates ' 'to continue for this remote.', repo['url'], repo['lockfile'] ) continue _, errors = lock(repo) if errors: log.error( 'Unable to set update lock for svnfs remote %s, skipping.', repo['url'] ) continue log.debug('svnfs is fetching from %s', repo['url']) old_rev = _rev(repo) try: CLIENT.update(repo['repo']) except pysvn._pysvn.ClientError as exc: log.error( 'Error updating svnfs remote %s (cachedir: %s): %s', repo['url'], repo['cachedir'], exc ) new_rev = _rev(repo) if any((x is None for x in (old_rev, new_rev))): # There were problems getting the revision ID continue if new_rev != old_rev: data['changed'] = True clear_lock(repo) env_cache = os.path.join(__opts__['cachedir'], 'svnfs/envs.p') if data.get('changed', False) is True or not os.path.isfile(env_cache): env_cachedir = os.path.dirname(env_cache) if not os.path.exists(env_cachedir): os.makedirs(env_cachedir) new_envs = envs(ignore_cache=True) serial = salt.payload.Serial(__opts__) with salt.utils.files.fopen(env_cache, 'wb+') as fp_: fp_.write(serial.dumps(new_envs)) log.trace('Wrote env cache data to %s', env_cache) # if there is a change, fire an event if __opts__.get('fileserver_events', False): event = salt.utils.event.get_event( 'master', __opts__['sock_dir'], __opts__['transport'], opts=__opts__, listen=False) event.fire_event(data, tagify(['svnfs', 'update'], prefix='fileserver')) try: salt.fileserver.reap_fileserver_cache_dir( os.path.join(__opts__['cachedir'], 'svnfs/hash'), find_file ) except (IOError, OSError): # Hash file won't exist if no files have yet been served up pass
def _run_assertions( self, mod_and_func, args, data, module_output, output_details, assert_print_result, ): """ Run assertion against input """ value = {} assertion_section = data.get("assertion_section", None) assertion_section_delimiter = data.get("assertion_section_delimiter", DEFAULT_TARGET_DELIM) if assertion_section: module_output = salt.utils.data.traverse_dict_and_list( module_output, assertion_section, default=False, delimiter=assertion_section_delimiter, ) if mod_and_func in ["saltcheck.state_apply"]: assertion = "assertNotEmpty" else: assertion = data["assertion"] expected_return = data.get("expected_return", data.get("expected-return", None)) if assertion not in [ "assertIn", "assertNotIn", "assertEmpty", "assertNotEmpty", "assertTrue", "assertFalse", ]: expected_return = self._cast_expected_to_returned_type( expected_return, module_output) if assertion == "assertEqual": assertion_desc = "==" value["status"] = self.__assert_equal(expected_return, module_output, assert_print_result) elif assertion == "assertNotEqual": assertion_desc = "!=" value["status"] = self.__assert_not_equal(expected_return, module_output, assert_print_result) elif assertion == "assertTrue": assertion_desc = "True is" value["status"] = self.__assert_true(module_output) elif assertion == "assertFalse": assertion_desc = "False is" value["status"] = self.__assert_false(module_output) elif assertion == "assertIn": assertion_desc = "IN" value["status"] = self.__assert_in(expected_return, module_output, assert_print_result) elif assertion == "assertNotIn": assertion_desc = "NOT IN" value["status"] = self.__assert_not_in(expected_return, module_output, assert_print_result) elif assertion == "assertGreater": assertion_desc = ">" value["status"] = self.__assert_greater(expected_return, module_output) elif assertion == "assertGreaterEqual": assertion_desc = ">=" value["status"] = self.__assert_greater_equal( expected_return, module_output) elif assertion == "assertLess": assertion_desc = "<" value["status"] = self.__assert_less(expected_return, module_output) elif assertion == "assertLessEqual": assertion_desc = "<=" value["status"] = self.__assert_less_equal(expected_return, module_output) elif assertion == "assertEmpty": assertion_desc = "IS EMPTY" value["status"] = self.__assert_empty(module_output) elif assertion == "assertNotEmpty": assertion_desc = "IS NOT EMPTY" value["status"] = self.__assert_not_empty(module_output) else: value["status"] = "Fail - bad assertion" if output_details: if assertion_section: assertion_section_repr_title = " {}".format( "assertion_section") assertion_section_repr_value = " {}".format(assertion_section) else: assertion_section_repr_title = "" assertion_section_repr_value = "" value["module.function [args]{}".format( assertion_section_repr_title)] = "{} {}{}".format( mod_and_func, dumps(args), assertion_section_repr_value, ) value["saltcheck assertion"] = "{}{} {}".format( ("" if expected_return is None else "{} ".format(expected_return)), assertion_desc, ("hidden" if not assert_print_result else module_output), ) return value
def update(): ''' Execute an hg pull on all of the repos ''' # data for the fileserver event data = {'changed': False, 'backend': 'hgfs'} # _clear_old_remotes runs init(), so use the value from there to avoid a # second init() data['changed'], repos = _clear_old_remotes() for repo in repos: if os.path.exists(repo['lockfile']): log.warning( 'Update lockfile is present for hgfs remote %s, skipping. ' 'If this warning persists, it is possible that the update ' 'process was interrupted. Removing %s or running ' '\'salt-run fileserver.clear_lock hgfs\' will allow updates ' 'to continue for this remote.', repo['url'], repo['lockfile'] ) continue _, errors = lock(repo) if errors: log.error( 'Unable to set update lock for hgfs remote %s, skipping.', repo['url'] ) continue log.debug('hgfs is fetching from %s', repo['url']) repo['repo'].open() curtip = repo['repo'].tip() try: repo['repo'].pull() except Exception as exc: # pylint: disable=broad-except log.error( 'Exception %s caught while updating hgfs remote %s', exc, repo['url'], exc_info_on_loglevel=logging.DEBUG ) else: newtip = repo['repo'].tip() if curtip[1] != newtip[1]: data['changed'] = True repo['repo'].close() clear_lock(repo) env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p') if data.get('changed', False) is True or not os.path.isfile(env_cache): env_cachedir = os.path.dirname(env_cache) if not os.path.exists(env_cachedir): os.makedirs(env_cachedir) new_envs = envs(ignore_cache=True) serial = salt.payload.Serial(__opts__) with salt.utils.files.fopen(env_cache, 'wb+') as fp_: fp_.write(serial.dumps(new_envs)) log.trace('Wrote env cache data to %s', env_cache) # if there is a change, fire an event if __opts__.get('fileserver_events', False): with salt.utils.event.get_event( 'master', __opts__['sock_dir'], __opts__['transport'], opts=__opts__, listen=False) as event: event.fire_event(data, tagify(['hgfs', 'update'], prefix='fileserver')) try: salt.fileserver.reap_fileserver_cache_dir( os.path.join(__opts__['cachedir'], 'hgfs/hash'), find_file ) except (IOError, OSError): # Hash file won't exist if no files have yet been served up pass
def thread_return(cls, minion_instance, opts, data): """ This method should be used as a threading target, start the actual minion side execution. """ fn_ = os.path.join(minion_instance.proc_dir, data["jid"]) salt.utils.process.appendproctitle("{}._thread_return {}".format( cls.__name__, data["jid"])) sdata = {"pid": os.getpid()} sdata.update(data) log.info("Starting a new job with PID %s", sdata["pid"]) with salt.utils.files.fopen(fn_, "w+b") as fp_: fp_.write(salt.payload.dumps(sdata)) ret = {"success": False} function_name = data["fun"] executors = (data.get("module_executors") or getattr(minion_instance, "module_executors", []) or opts.get("module_executors", ["direct_call"])) allow_missing_funcs = any([ minion_instance.executors["{}.allow_missing_func".format(executor)]( function_name) for executor in executors if "{}.allow_missing_func".format(executor) in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts[ "pillar"].get("minion_blackout", False): whitelist = minion_instance.opts["pillar"].get( "minion_blackout_whitelist", []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if (function_name != "saltutil.refresh_pillar" and function_name not in whitelist): minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts["grains"].get("minion_blackout", False): whitelist = minion_instance.opts["grains"].get( "minion_blackout_whitelist", []) if (function_name != "saltutil.refresh_pillar" and function_name not in whitelist): minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError( "Minion in blackout mode. Set 'minion_blackout' " "to False in pillar or grains to resume operations. Only " "saltutil.refresh_pillar allowed in blackout mode.") if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = salt.minion.load_args_and_kwargs( func, data["arg"], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data["arg"], data minion_instance.functions.pack["__context__"]["retcode"] = 0 if isinstance(executors, str): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError( "Wrong executors specification: {}. String or non-empty list" " expected".format(executors)) if opts.get("sudo_user", "") and executors[-1] != "sudo": executors[-1] = "sudo" # replace the last one with sudo log.trace("Executors list %s", executors) # pylint: disable=no-member for name in executors: fname = "{}.execute".format(name) if fname not in minion_instance.executors: raise SaltInvocationError( "Executor '{}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data["jid"], "prog", opts["id"], str(ind)], "job") event_data = {"return": single} minion_instance._fire_master(event_data, tag) ind += 1 ret["return"] = iret else: ret["return"] = return_data retcode = minion_instance.functions.pack["__context__"].get( "retcode", salt.defaults.exitcodes.EX_OK) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all( return_data.get(x, True) for x in ("result", "success")) except Exception: # pylint: disable=broad-except # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret["retcode"] = retcode ret["success"] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = "Command required for '{}' not found".format(function_name) log.debug(msg, exc_info=True) ret["return"] = "{}: {}".format(msg, exc) ret["out"] = "nested" ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( "A command in '%s' had a problem: %s", function_name, exc, exc_info_on_loglevel=logging.DEBUG, ) ret["return"] = "ERROR: {}".format(exc) ret["out"] = "nested" ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( "Problem executing '%s': %s", function_name, exc, exc_info_on_loglevel=logging.DEBUG, ) ret["return"] = "ERROR executing '{}': {}".format( function_name, exc) ret["out"] = "nested" ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = "Passed invalid arguments to {}: {}\n{}".format( function_name, exc, func.__doc__ or "") log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret["return"] = msg ret["out"] = "nested" ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC except Exception: # pylint: disable=broad-except msg = "The minion function caused an exception" log.warning(msg, exc_info=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret["return"] = "{}: {}".format(msg, traceback.format_exc()) ret["out"] = "nested" ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions["sys.doc"]( "{}*".format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string( function_name) ret["return"] = docs else: ret["return"] = minion_instance.functions.missing_fun_string( function_name) mod_name = function_name.split(".")[0] if mod_name in minion_instance.function_errors: ret["return"] += " Possible reasons: '{}'".format( minion_instance.function_errors[mod_name]) ret["success"] = False ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC ret["out"] = "nested" ret["jid"] = data["jid"] ret["fun"] = data["fun"] ret["fun_args"] = data["arg"] if "master_id" in data: ret["master_id"] = data["master_id"] if "metadata" in data: if isinstance(data["metadata"], dict): ret["metadata"] = data["metadata"] else: log.warning( "The metadata parameter must be a dictionary. Ignoring.") if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer()) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get("return"), str): if data["ret"]: data["ret"] = ",".join((data["ret"], opts["return"])) else: data["ret"] = opts["return"] log.debug("minion return: %s", ret) # TODO: make a list? Seems odd to split it this late :/ if data["ret"] and isinstance(data["ret"], str): if "ret_config" in data: ret["ret_config"] = data["ret_config"] if "ret_kwargs" in data: ret["ret_kwargs"] = data["ret_kwargs"] ret["id"] = opts["id"] for returner in set(data["ret"].split(",")): try: returner_str = "{}.returner".format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string( returner_str) log.error( "Returner %s could not be loaded: %s", returner_str, returner_err, ) except Exception as exc: # pylint: disable=broad-except log.exception("The return failed for job %s: %s", data["jid"], exc)
def proc_run(self, msg): ''' Execute the run in a dedicated process ''' data = msg['pub'] fn_ = os.path.join(self.proc_dir, data['jid']) self.opts['__ex_id'] = data['jid'] salt.utils.daemonize_if(self.opts) salt.transport.jobber_stack = stack = self._setup_jobber_stack() # set up return destination from source src_estate, src_yard, src_share = msg['route']['src'] salt.transport.jobber_estate_name = src_estate salt.transport.jobber_yard_name = src_yard sdata = {'pid': os.getpid()} sdata.update(data) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(self.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] if function_name in self.modules.value: try: func = self.modules.value[data['fun']] args, kwargs = salt.minion.load_args_and_kwargs( func, salt.utils.args.parse_input(data['arg'], no_parse=data.get( 'no_parse', [])), data) sys.modules[func.__module__].__context__['retcode'] = 0 executors = data.get('module_executors') or self.opts.get( 'module_executors', ['direct_call']) if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError( "Wrong executors specification: {0}. String or non-empty list expected" .format(executors)) if self.opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo.get' # replace log.trace("Executors list {0}".format(executors)) for name in executors: if name not in self.module_executors.value: raise SaltInvocationError( "Executor '{0}' is not available".format(name)) return_data = self.module_executors.value[name].execute( self.opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, list): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify( [data['jid'], 'prog', self.opts['id'], str(ind)], 'job') event_data = {'return': single} self._fire_master(event_data, tag) # Need to look into this ind += 1 ret['return'] = iret else: ret['return'] = return_data ret['retcode'] = sys.modules[func.__module__].__context__.get( 'retcode', 0) ret['success'] = True except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) except CommandExecutionError as exc: log.error('A command in \'{0}\' had a problem: {1}'.format( function_name, exc), exc_info_on_loglevel=logging.DEBUG) ret['return'] = 'ERROR: {0}'.format(exc) except SaltInvocationError as exc: log.error('Problem executing \'{0}\': {1}'.format( function_name, exc), exc_info_on_loglevel=logging.DEBUG) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc) except TypeError as exc: msg = ('TypeError encountered executing {0}: {1}. See ' 'debug log for more info.').format(function_name, exc) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) else: ret['return'] = '\'{0}\' is not available.'.format(function_name) ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] self._return_pub(msg, ret, stack) if data['ret']: ret['id'] = self.opts['id'] for returner in set(data['ret'].split(',')): try: self.returners.value['{0}.returner'.format(returner)](ret) except Exception as exc: log.error('The return failed for job {0} {1}'.format( data['jid'], exc)) console.concise("Closing Jobber Stack {0}\n".format(stack.name)) stack.server.close() salt.transport.jobber_stack = None
def update(): """ Execute an hg pull on all of the repos """ # data for the fileserver event data = {"changed": False, "backend": "hgfs"} # _clear_old_remotes runs init(), so use the value from there to avoid a # second init() data["changed"], repos = _clear_old_remotes() for repo in repos: if os.path.exists(repo["lockfile"]): log.warning( "Update lockfile is present for hgfs remote %s, skipping. " "If this warning persists, it is possible that the update " "process was interrupted. Removing %s or running " "'salt-run fileserver.clear_lock hgfs' will allow updates " "to continue for this remote.", repo["url"], repo["lockfile"], ) continue _, errors = lock(repo) if errors: log.error( "Unable to set update lock for hgfs remote %s, skipping.", repo["url"] ) continue log.debug("hgfs is fetching from %s", repo["url"]) repo["repo"].open() curtip = repo["repo"].tip() try: repo["repo"].pull() except Exception as exc: # pylint: disable=broad-except log.error( "Exception %s caught while updating hgfs remote %s", exc, repo["url"], exc_info_on_loglevel=logging.DEBUG, ) else: newtip = repo["repo"].tip() if curtip[1] != newtip[1]: data["changed"] = True repo["repo"].close() clear_lock(repo) env_cache = os.path.join(__opts__["cachedir"], "hgfs/envs.p") if data.get("changed", False) is True or not os.path.isfile(env_cache): env_cachedir = os.path.dirname(env_cache) if not os.path.exists(env_cachedir): os.makedirs(env_cachedir) new_envs = envs(ignore_cache=True) serial = salt.payload.Serial(__opts__) with salt.utils.files.fopen(env_cache, "wb+") as fp_: fp_.write(serial.dumps(new_envs)) log.trace("Wrote env cache data to %s", env_cache) # if there is a change, fire an event if __opts__.get("fileserver_events", False): with salt.utils.event.get_event( "master", __opts__["sock_dir"], __opts__["transport"], opts=__opts__, listen=False, ) as event: event.fire_event(data, tagify(["hgfs", "update"], prefix="fileserver")) try: salt.fileserver.reap_fileserver_cache_dir( os.path.join(__opts__["cachedir"], "hgfs/hash"), find_file ) except OSError: # Hash file won't exist if no files have yet been served up pass
def update(): """ Execute an svn update on all of the repos """ # data for the fileserver event data = {"changed": False, "backend": "svnfs"} # _clear_old_remotes runs init(), so use the value from there to avoid a # second init() data["changed"], repos = _clear_old_remotes() for repo in repos: if os.path.exists(repo["lockfile"]): log.warning( "Update lockfile is present for svnfs remote %s, skipping. " "If this warning persists, it is possible that the update " "process was interrupted. Removing %s or running " "'salt-run fileserver.clear_lock svnfs' will allow updates " "to continue for this remote.", repo["url"], repo["lockfile"], ) continue _, errors = lock(repo) if errors: log.error( "Unable to set update lock for svnfs remote %s, skipping.", repo["url"]) continue log.debug("svnfs is fetching from %s", repo["url"]) old_rev = _rev(repo) try: CLIENT.update(repo["repo"]) except pysvn._pysvn.ClientError as exc: log.error( "Error updating svnfs remote %s (cachedir: %s): %s", repo["url"], repo["cachedir"], exc, ) new_rev = _rev(repo) if any(x is None for x in (old_rev, new_rev)): # There were problems getting the revision ID continue if new_rev != old_rev: data["changed"] = True clear_lock(repo) env_cache = os.path.join(__opts__["cachedir"], "svnfs/envs.p") if data.get("changed", False) is True or not os.path.isfile(env_cache): env_cachedir = os.path.dirname(env_cache) if not os.path.exists(env_cachedir): os.makedirs(env_cachedir) new_envs = envs(ignore_cache=True) serial = salt.payload.Serial(__opts__) with salt.utils.files.fopen(env_cache, "wb+") as fp_: fp_.write(serial.dumps(new_envs)) log.trace("Wrote env cache data to %s", env_cache) # if there is a change, fire an event if __opts__.get("fileserver_events", False): with salt.utils.event.get_event( "master", __opts__["sock_dir"], __opts__["transport"], opts=__opts__, listen=False, ) as event: event.fire_event(data, tagify(["svnfs", "update"], prefix="fileserver")) try: salt.fileserver.reap_fileserver_cache_dir( os.path.join(__opts__["cachedir"], "svnfs/hash"), find_file) except OSError: # Hash file won't exist if no files have yet been served up pass
def thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func'.format(executor) in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = salt.minion.load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc )
def snapshot_id_to_name(name, snap_id, strict=False, runas=None): """ Attempt to convert a snapshot ID to a snapshot name. If the snapshot has no name or if the ID is not found or invalid, an empty string will be returned :param str name: Name/ID of VM whose snapshots are inspected :param str snap_id: ID of the snapshot :param bool strict: Raise an exception if a name cannot be found for the given ``snap_id`` :param str runas: The user that the prlctl command will be run as Example data .. code-block:: yaml ID: {a5b8999f-5d95-4aff-82de-e515b0101b66} Name: original Date: 2016-03-04 10:50:34 Current: yes State: poweroff Description: original state CLI Example: .. code-block:: bash salt '*' parallels.snapshot_id_to_name macvm a5b8999f-5d95-4aff-82de-e515b0101b66 runas=macdev """ # Validate VM name and snapshot ID name = salt.utils.data.decode(name) if not re.match(GUID_REGEX, snap_id): raise SaltInvocationError('Snapshot ID "{0}" is not a GUID'.format( salt.utils.data.decode(snap_id))) # Get the snapshot information of the snapshot having the requested ID info = prlctl("snapshot-list", [name, "--id", snap_id], runas=runas) # Parallels desktop returned no information for snap_id if not info: raise SaltInvocationError( 'No snapshots for VM "{0}" have ID "{1}"'.format(name, snap_id)) # Try to interpret the information try: data = salt.utils.yaml.safe_load(info) except salt.utils.yaml.YAMLError as err: log.warning( "Could not interpret snapshot data returned from prlctl: %s", err) data = {} # Find the snapshot name if isinstance(data, dict): snap_name = data.get("Name", "") # If snapshot name is of type NoneType, then the snapshot is unnamed if snap_name is None: snap_name = "" else: log.warning( "Could not interpret snapshot data returned from prlctl: " "data is not formed as a dictionary: %s", data, ) snap_name = "" # Raise or return the result if not snap_name and strict: raise SaltInvocationError( 'Could not find a snapshot name for snapshot ID "{0}" of VM ' '"{1}"'.format(snap_id, name)) return salt.utils.data.decode(snap_name)