def _tag_doc(tags): taglist = [] if tags is not None: for k, v in six.iteritems(tags): if six.text_type(k).startswith('__'): continue taglist.append({'Key': six.text_type(k), 'Value': six.text_type(v)}) return taglist
def _ordered(obj): if isinstance(obj, (list, tuple)): return sorted(_ordered(x) for x in obj) elif isinstance(obj, dict): return dict((six.text_type(k) if isinstance(k, six.string_types) else k, _ordered(v)) for k, v in obj.items()) elif isinstance(obj, six.string_types): return six.text_type(obj) return obj
def __file_hash_and_stat(self, load): ''' Common code for hashing and stating files ''' if 'env' in load: salt.utils.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. ' 'This parameter is no longer used and has been replaced by ' '\'saltenv\' as of Salt 2016.11.0. This warning will be removed ' 'in Salt Oxygen.' ) load.pop('env') if 'path' not in load or 'saltenv' not in load: return '', None if not isinstance(load['saltenv'], six.string_types): load['saltenv'] = six.text_type(load['saltenv']) fnd = self.find_file(salt.utils.locales.sdecode(load['path']), load['saltenv']) if not fnd.get('back'): return '', None stat_result = fnd.get('stat', None) fstr = '{0}.file_hash'.format(fnd['back']) if fstr in self.servers: return self.servers[fstr](load, fnd), stat_result return '', None
def dir_list(self, load): ''' List all directories in the given environment ''' if 'env' in load: salt.utils.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' ) load.pop('env') ret = set() if 'saltenv' not in load: return [] if not isinstance(load['saltenv'], six.string_types): load['saltenv'] = six.text_type(load['saltenv']) for fsb in self._gen_back(load.pop('fsbackend', None)): fstr = '{0}.dir_list'.format(fsb) if fstr in self.servers: ret.update(self.servers[fstr](load)) # upgrade all set elements to a common encoding ret = [salt.utils.locales.sdecode(f) for f in ret] # some *fs do not handle prefix. Ensure it is filtered prefix = load.get('prefix', '').strip('/') if prefix != '': ret = [f for f in ret if f.startswith(prefix)] return sorted(ret)
def symlink_list(self, load): ''' Return a list of symlinked files and dirs ''' if 'env' in load: salt.utils.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' ) load.pop('env') ret = {} if 'saltenv' not in load: return {} if not isinstance(load['saltenv'], six.string_types): load['saltenv'] = six.text_type(load['saltenv']) for fsb in self._gen_back(load.pop('fsbackend', None)): symlstr = '{0}.symlink_list'.format(fsb) if symlstr in self.servers: ret = self.servers[symlstr](load) # upgrade all set elements to a common encoding ret = dict([ (salt.utils.locales.sdecode(x), salt.utils.locales.sdecode(y)) for x, y in ret.items() ]) # some *fs do not handle prefix. Ensure it is filtered prefix = load.get('prefix', '').strip('/') if prefix != '': ret = dict([ (x, y) for x, y in six.iteritems(ret) if x.startswith(prefix) ]) return ret
def serve_file(self, load): ''' Serve up a chunk of a file ''' ret = {'data': '', 'dest': ''} if 'env' in load: salt.utils.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' ) load.pop('env') if 'path' not in load or 'loc' not in load or 'saltenv' not in load: return ret if not isinstance(load['saltenv'], six.string_types): load['saltenv'] = six.text_type(load['saltenv']) fnd = self.find_file(load['path'], load['saltenv']) if not fnd.get('back'): return ret fstr = '{0}.serve_file'.format(fnd['back']) if fstr in self.servers: return self.servers[fstr](load, fnd) return ret
def _rename(src, dst): # pylint: disable=E0102 if not isinstance(src, six.text_type): src = six.text_type(src, sys.getfilesystemencoding()) if not isinstance(dst, six.text_type): dst = six.text_type(dst, sys.getfilesystemencoding()) if _rename_atomic(src, dst): return True retry = 0 rval = False while not rval and retry < 100: rval = _MoveFileEx(src, dst, _MOVEFILE_REPLACE_EXISTING | _MOVEFILE_WRITE_THROUGH) if not rval: time.sleep(0.001) retry += 1 return rval
def returner(load): ''' Return data to a postgres server ''' # salt guarantees that there will be 'fun', 'jid', 'return' and 'id' but not # 'success' success = 'Unknown' if 'success' in load: success = load['success'] conn = _get_conn() if conn is None: return None cur = conn.cursor() sql = '''INSERT INTO salt_returns (fun, jid, return, id, success) VALUES (%s, %s, %s, %s, %s)''' cur.execute( sql, ( load['fun'], load['jid'], json.dumps(six.text_type(str(load['return']), 'utf-8', 'replace')), load['id'], success ) ) _close_conn(conn)
def output(grains): ''' Output the grains in a clean way ''' colors = salt.utils.get_colors(__opts__.get('color')) encoding = grains['locale_info']['defaultencoding'] if encoding == 'unknown': encoding = 'utf-8' # let's hope for the best ret = u'' for id_, minion in grains.items(): ret += u'{0}{1}{2}:\n'.format(colors['GREEN'], id_.decode(encoding), colors['ENDC']) for key in sorted(minion): ret += u' {0}{1}{2}:'.format(colors['CYAN'], key.decode(encoding), colors['ENDC']) if key == 'cpu_flags': ret += colors['LIGHT_GREEN'] for val in minion[key]: ret += u' {0}'.format(val.decode(encoding)) ret += '{0}\n'.format(colors['ENDC']) elif key == 'pythonversion': ret += ' {0}'.format(colors['LIGHT_GREEN']) for val in minion[key]: ret += u'{0}.'.format(six.text_type(val)) ret = ret[:-1] ret += '{0}\n'.format(colors['ENDC']) elif isinstance(minion[key], list): for val in minion[key]: ret += u'\n {0}{1}{2}'.format(colors['LIGHT_GREEN'], val.decode(encoding), colors['ENDC']) ret += '\n' else: ret += u' {0}{1}{2}\n'.format(colors['LIGHT_GREEN'], minion[key].decode(encoding), colors['ENDC']) return ret
def condition_input(args, kwargs): ''' Return a single arg structure for the publisher to safely use ''' ret = [] for arg in args: if (six.PY3 and isinstance(arg, six.integer_types) and salt.utils.jid.is_jid(six.text_type(arg))) or \ (six.PY2 and isinstance(arg, long)): # pylint: disable=incompatible-py3-code ret.append(six.text_type(arg)) else: ret.append(arg) if isinstance(kwargs, dict) and kwargs: kw_ = {'__kwarg__': True} for key, val in six.iteritems(kwargs): kw_[key] = val return ret + [kw_] return ret
def _localectl_set(locale=''): ''' Use systemd's localectl command to set the LANG locale parameter, making sure not to trample on other params that have been set. ''' locale_params = _parse_dbus_locale() if dbus is not None else _localectl_status().get('system_locale', {}) locale_params['LANG'] = six.text_type(locale) args = ' '.join(['{0}="{1}"'.format(k, v) for k, v in six.iteritems(locale_params) if v is not None]) return not __salt__['cmd.retcode']('localectl set-locale {0}'.format(args), python_shell=False)
def _mbcs_to_unicode(instr): ''' Converts from current users character encoding to unicode. When instr has a value of None, the return value of the function will also be None. ''' if instr is None or isinstance(instr, six.text_type): return instr else: return six.text_type(instr, 'mbcs')
def __assert_true(returned): ''' Test if an boolean is True ''' result = "Pass" try: assert (returned is True), "{0} not True".format(returned) except AssertionError as err: result = "Fail: " + six.text_type(err) return result
def __assert_greater(expected, returned): ''' Test if a value is greater than the returned value ''' result = "Pass" try: assert (expected > returned), "{0} not False".format(returned) except AssertionError as err: result = "Fail: " + six.text_type(err) return result
def __assert_less_equal(expected, returned): ''' Test if a value is less than or equal to the returned value ''' result = "Pass" try: assert (expected <= returned), "{0} not False".format(returned) except AssertionError as err: result = "Fail: " + six.text_type(err) return result
def __assert_not_empty(returned): ''' Test if a returned value is not empty ''' result = "Pass" try: assert (returned), "value is empty" except AssertionError as err: result = "Fail: " + six.text_type(err) return result
def yaml_dquote(text): ''' Make text into a double-quoted YAML string with correct escaping for special characters. Includes the opening and closing double quote characters. ''' with io.StringIO() as ostream: yemitter = yaml.emitter.Emitter(ostream) yemitter.write_double_quoted(six.text_type(text)) return ostream.getvalue()
def clear_lock(remote=None): ''' Clear update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_clear_lock(repo): def _add_error(errlist, repo, exc): msg = ('Unable to remove update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) errlist.append(msg) success = [] failed = [] if os.path.exists(repo['lockfile']): try: os.remove(repo['lockfile']) except OSError as exc: if exc.errno == errno.EISDIR: # Somehow this path is a directory. Should never happen # unless some wiseguy manually creates a directory at this # path, but just in case, handle it. try: shutil.rmtree(repo['lockfile']) except OSError as exc: _add_error(failed, repo, exc) else: _add_error(failed, repo, exc) else: msg = 'Removed lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_clear_lock(remote) cleared = [] errors = [] for repo in init(): if remote: try: if remote not in repo['url']: continue except TypeError: # remote was non-string, try again if six.text_type(remote) not in repo['url']: continue success, failed = _do_clear_lock(repo) cleared.extend(success) errors.extend(failed) return cleared, errors
def split_input(val, mapper=None): ''' Take an input value and split it into a list, returning the resulting list ''' if mapper is None: mapper = lambda x: x if isinstance(val, list): return list(map(mapper, val)) try: return list(map(mapper, [x.strip() for x in val.split(',')])) except AttributeError: return list(map(mapper, [x.strip() for x in six.text_type(val).split(',')]))
def query(qstr, limit=10): ''' Execute a query ''' index_dir = os.path.join(__opts__['cachedir'], 'whoosh') if whoosh.index.exists_in(index_dir): ix_ = whoosh.index.open_dir(index_dir) else: return {} qp_ = whoosh.qparser.QueryParser(u'content', schema=ix_.schema) qobj = qp_.parse(six.text_type(qstr), limit) with ix_.searcher() as searcher: return searcher.search(qobj)
def __assert_not_in(expected, returned, assert_print_result=True): ''' Test if a value is not in the list of returned values ''' result = "Pass" try: if assert_print_result: assert (expected not in returned), "{0} was found in {1}".format(expected, returned) else: assert (expected not in returned), "Result was found" except AssertionError as err: result = "Fail: " + six.text_type(err) return result
def __assert_not_equal(expected, returned, assert_print_result=True): ''' Test if two objects are not equal ''' result = "Pass" try: if assert_print_result: assert (expected != returned), "{0} is equal to {1}".format(expected, returned) else: assert (expected != returned), "Result is equal" except AssertionError as err: result = "Fail: " + six.text_type(err) return result
def find_file(self, path, saltenv, back=None): ''' Find the path and return the fnd structure, this structure is passed to other backend interfaces. ''' back = self._gen_back(back) kwargs = {} fnd = {'path': '', 'rel': ''} if os.path.isabs(path): return fnd if '../' in path: return fnd if salt.utils.url.is_escaped(path): # don't attempt to find URL query arguements in the path path = salt.utils.url.unescape(path) else: if '?' in path: hcomps = path.split('?') path = hcomps[0] comps = hcomps[1].split('&') for comp in comps: if '=' not in comp: # Invalid option, skip it continue args = comp.split('=', 1) kwargs[args[0]] = args[1] if 'env' in kwargs: salt.utils.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' ) kwargs.pop('env') if 'saltenv' in kwargs: saltenv = kwargs.pop('saltenv') if not isinstance(saltenv, six.string_types): saltenv = six.text_type(saltenv) for fsb in back: fstr = '{0}.find_file'.format(fsb) if fstr in self.servers: fnd = self.servers[fstr](path, saltenv, **kwargs) if fnd.get('path'): fnd['back'] = fsb return fnd return fnd
def delete_queue(name, region, opts=None, user=None): ''' Deletes a queue in the region. name Name of the SQS queue to deletes region Name of the region to delete the queue from opts : None Any additional options to add to the command line user : None Run hg as a user other than what the minion runs as CLI Example: salt '*' aws_sqs.delete_queue <sqs queue> <region> ''' queues = list_queues(region, opts, user) url_map = _parse_queue_list(queues) logger = logging.getLogger(__name__) logger.debug('map ' + six.text_type(url_map)) if name in url_map: delete = {'queue-url': url_map[name]} rtn = _run_aws( 'delete-queue', region=region, opts=opts, user=user, **delete) success = True err = '' out = '{0} deleted'.format(name) else: out = '' err = "Delete failed" success = False ret = { 'retcode': 0 if success else 1, 'stdout': out, 'stderr': err, } return ret
def __assert_false(returned): ''' Test if an boolean is False ''' result = "Pass" if isinstance(returned, str): try: returned = bool(returned) except ValueError: raise try: assert (returned is False), "{0} not False".format(returned) except AssertionError as err: result = "Fail: " + six.text_type(err) return result
def _iter_dir(dir_, saltenv): ''' Walk a dir path looking for files and marking their content type ''' ret = [] for fn_ in os.listdir(dir_): path = os.path.join(dir_, fn_) if os.path.isdir(path): yield _iter_dir(path, saltenv) elif os.path.isfile(path): with salt.utils.fopen(path) as fp_: if salt.utils.istextfile(fp_): ret.append( {'path': six.text_type(path), 'saltenv': six.text_type(saltenv), 'content': six.text_type(fp_.read())} ) else: ret.append( {'path': six.text_type(path), 'saltenv': six.text_type(saltenv), 'content': u'bin'} ) yield ret
def lock(remote=None): ''' Place an update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_lock(repo): success = [] failed = [] if not os.path.exists(repo['lockfile']): try: with salt.utils.fopen(repo['lockfile'], 'w+') as fp_: fp_.write('') except (IOError, OSError) as exc: msg = ('Unable to set update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) failed.append(msg) else: msg = 'Set lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_lock(remote) locked = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], six.text_type(remote)): continue success, failed = _do_lock(repo) locked.extend(success) errors.extend(failed) return locked, errors
def _gen_back(self, back): ''' Return the backend list ''' if not back: back = self.opts['fileserver_backend'] else: if not isinstance(back, list): try: back = back.split(',') except AttributeError: back = six.text_type(back).split(',') if isinstance(back, collections.Sequence): # The test suite uses an ImmutableList type (based on # collections.Sequence) for lists, which breaks this function in # the test suite. This normalizes the value from the opts into a # list if it is based on collections.Sequence. back = list(back) ret = [] if not isinstance(back, list): return ret try: subtract_only = all((x.startswith('-') for x in back)) except AttributeError: pass else: if subtract_only: # Only subtracting backends from enabled ones ret = self.opts['fileserver_backend'] for sub in back: if '{0}.envs'.format(sub[1:]) in self.servers: ret.remove(sub[1:]) elif '{0}.envs'.format(sub[1:-2]) in self.servers: ret.remove(sub[1:-2]) return ret for sub in back: if '{0}.envs'.format(sub) in self.servers: ret.append(sub) elif '{0}.envs'.format(sub[:-2]) in self.servers: ret.append(sub[:-2]) return ret
def returner(load): ''' Return data to a postgres server ''' conn = _get_conn() if conn is None: return None cur = conn.cursor() sql = '''INSERT INTO salt_returns (fun, jid, return, id, success) VALUES (%s, %s, %s, %s, %s)''' cur.execute( sql, ( load['fun'], load['jid'], json.dumps(six.text_type(str(load['return']), 'utf-8', 'replace')), load['id'], load.get('success'), ) ) _close_conn(conn)
def _parse_dbus_locale(): ''' Get the 'System Locale' parameters from dbus ''' bus = dbus.SystemBus() localed = bus.get_object('org.freedesktop.locale1', '/org/freedesktop/locale1') properties = dbus.Interface(localed, 'org.freedesktop.DBus.Properties') system_locale = properties.Get('org.freedesktop.locale1', 'Locale') ret = {} for env_var in system_locale: env_var = six.text_type(env_var) match = re.match(r'^([A-Z_]+)=(.*)$', env_var) if match: ret[match.group(1)] = match.group(2).replace('"', '') else: log.error('Odd locale parameter "%s" detected in dbus locale ' 'output. This should not happen. You should ' 'probably investigate what caused this.', env_var) return ret
def _test_hwclock_sync(self): """ Check that hw and sw clocks are sync'd. """ if not self.run_function("system.has_settable_hwclock"): return None if not self._hwclock_has_compare(): return None class CompareTimeout(BaseException): pass def _alrm_handler(sig, frame): log.warning( "hwclock --compare failed to produce output after 3 seconds") raise CompareTimeout for _ in range(2): try: orig_handler = signal.signal(signal.SIGALRM, _alrm_handler) signal.alarm(3) rpipeFd, wpipeFd = os.pipe() log.debug("Comparing hwclock to sys clock") with os.fdopen(rpipeFd, "r") as rpipe: with os.fdopen(wpipeFd, "w") as wpipe: with salt.utils.files.fopen(os.devnull, "r") as nulFd: p = subprocess.Popen( args=["hwclock", "--compare"], stdin=nulFd, stdout=wpipeFd, stderr=subprocess.PIPE, ) p.communicate() # read header rpipe.readline() # read first time comparison timeCompStr = rpipe.readline() # stop p.terminate() timeComp = timeCompStr.split() hwTime = float(timeComp[0]) swTime = float(timeComp[1]) diff = abs(hwTime - swTime) self.assertTrue( diff <= 2.0, msg=("hwclock difference too big: " + six.text_type(timeCompStr)), ) break except CompareTimeout: p.terminate() finally: signal.alarm(0) signal.signal(signal.SIGALRM, orig_handler) else: log.error("Failed to check hwclock sync")
def script(name, source=None, template=None, onlyif=None, unless=None, creates=None, cwd=None, runas=None, shell=None, env=None, stateful=False, umask=None, timeout=None, use_vt=False, output_loglevel='debug', hide_output=False, defaults=None, context=None, success_retcodes=None, **kwargs): ''' Download a script and execute it with specified arguments. source The location of the script to download. If the file is located on the master in the directory named spam, and is called eggs, the source string is salt://spam/eggs template If this setting is applied then the named templating engine will be used to render the downloaded file. Currently jinja, mako, and wempy are supported name Either "cmd arg1 arg2 arg3..." (cmd is not used) or a source "salt://...". onlyif Run the named command only if the command passed to the ``onlyif`` option returns true unless Run the named command only if the command passed to the ``unless`` option returns false cwd The current working directory to execute the command in, defaults to /root runas The name of the user to run the command as shell The shell to use for execution. The default is set in grains['shell'] env A list of environment variables to be set prior to execution. Example: .. code-block:: yaml salt://scripts/foo.sh: cmd.script: - env: - BATCH: 'yes' .. warning:: The above illustrates a common PyYAML pitfall, that **yes**, **no**, **on**, **off**, **true**, and **false** are all loaded as boolean ``True`` and ``False`` values, and must be enclosed in quotes to be used as strings. More info on this (and other) PyYAML idiosyncrasies can be found :ref:`here <yaml-idiosyncrasies>`. Variables as values are not evaluated. So $PATH in the following example is a literal '$PATH': .. code-block:: yaml salt://scripts/bar.sh: cmd.script: - env: "PATH=/some/path:$PATH" One can still use the existing $PATH by using a bit of Jinja: .. code-block:: jinja {% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %} mycommand: cmd.run: - name: ls -l / - env: - PATH: {{ [current_path, '/my/special/bin']|join(':') }} saltenv : ``base`` The Salt environment to use umask The umask (in octal) to use when running the command. stateful The command being executed is expected to return data about executing a state. For more information, see the :ref:`stateful-argument` section. timeout If the command has not terminated after timeout seconds, send the subprocess sigterm, and if sigterm is ignored, follow up with sigkill args String of command line args to pass to the script. Only used if no args are specified as part of the `name` argument. To pass a string containing spaces in YAML, you will need to doubly-quote it: "arg1 'arg two' arg3" creates Only run if the file specified by ``creates`` do not exist. If you specify a list of files then this state will only run if **any** of the files do not exist. .. versionadded:: 2014.7.0 use_vt Use VT utils (saltstack) to stream the command output more interactively to the console and the logs. This is experimental. context .. versionadded:: 2016.3.0 Overrides default context variables passed to the template. defaults .. versionadded:: 2016.3.0 Default context passed to the template. output_loglevel : debug Control the loglevel at which the output from the command is logged to the minion log. .. note:: The command being run will still be logged at the ``debug`` loglevel regardless, unless ``quiet`` is used for this value. hide_output : False Suppress stdout and stderr in the state's results. .. note:: This is separate from ``output_loglevel``, which only handles how Salt logs to the minion log. .. versionadded:: 2018.3.0 success_retcodes: This parameter will be allow a list of non-zero return codes that should be considered a success. If the return code returned from the run matches any in the provided list, the return code will be overridden with zero. .. versionadded:: 2019.2.0 ''' test_name = None if not isinstance(stateful, list): stateful = stateful is True elif isinstance(stateful, list) and 'test_name' in stateful[0]: test_name = stateful[0]['test_name'] if __opts__['test'] and test_name: name = test_name ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} # Need the check for None here, if env is not provided then it falls back # to None and it is assumed that the environment is not being overridden. if env is not None and not isinstance(env, (list, dict)): ret['comment'] = ('Invalidly-formatted \'env\' parameter. See ' 'documentation.') return ret if context and not isinstance(context, dict): ret['comment'] = ('Invalidly-formatted \'context\' parameter. Must ' 'be formed as a dict.') return ret if defaults and not isinstance(defaults, dict): ret['comment'] = ('Invalidly-formatted \'defaults\' parameter. Must ' 'be formed as a dict.') return ret tmpctx = defaults if defaults else {} if context: tmpctx.update(context) cmd_kwargs = copy.deepcopy(kwargs) cmd_kwargs.update({ 'runas': runas, 'shell': shell or __grains__['shell'], 'env': env, 'onlyif': onlyif, 'unless': unless, 'cwd': cwd, 'template': template, 'umask': umask, 'timeout': timeout, 'output_loglevel': output_loglevel, 'hide_output': hide_output, 'use_vt': use_vt, 'context': tmpctx, 'saltenv': __env__, 'success_retcodes': success_retcodes }) run_check_cmd_kwargs = { 'cwd': cwd, 'runas': runas, 'shell': shell or __grains__['shell'] } # Change the source to be the name arg if it is not specified if source is None: source = name # If script args present split from name and define args if not cmd_kwargs.get('args', None) and len(name.split()) > 1: cmd_kwargs.update({'args': name.split(' ', 1)[1]}) cret = mod_run_check(run_check_cmd_kwargs, onlyif, unless, creates) if isinstance(cret, dict): ret.update(cret) return ret if __opts__['test'] and not test_name: ret['result'] = None ret['comment'] = 'Command \'{0}\' would have been ' \ 'executed'.format(name) return _reinterpreted_state(ret) if stateful else ret if cwd and not os.path.isdir(cwd): ret['comment'] = ('Desired working directory "{0}" ' 'is not available').format(cwd) return ret # Wow, we passed the test, run this sucker! try: cmd_all = __salt__['cmd.script'](source, python_shell=True, **cmd_kwargs) except (CommandExecutionError, SaltRenderError, IOError) as err: ret['comment'] = six.text_type(err) return ret ret['changes'] = cmd_all if kwargs.get('retcode', False): ret['result'] = not bool(cmd_all) else: ret['result'] = not bool(cmd_all['retcode']) if ret.get('changes', {}).get('cache_error'): ret['comment'] = 'Unable to cache script {0} from saltenv ' \ '\'{1}\''.format(source, __env__) else: ret['comment'] = 'Command \'{0}\' run'.format(name) if stateful: ret = _reinterpreted_state(ret) if __opts__['test'] and cmd_all['retcode'] == 0 and ret['changes']: ret['result'] = None return ret
def dump(self): print(six.text_type(self))
def modify( login, password=None, password_hashed=False, domain=None, profile=None, script=None, drive=None, homedir=None, fullname=None, account_desc=None, account_control=None, machine_sid=None, user_sid=None, reset_login_hours=False, reset_bad_password_count=False, ): ''' Modify user account login : string login name password : string password password_hashed : boolean set if password is a nt hash instead of plain text domain : string users domain profile : string profile path script : string logon script drive : string home drive homedir : string home directory fullname : string full name account_desc : string account description machine_sid : string specify the machines new primary group SID or rid user_sid : string specify the users new primary group SID or rid account_control : string specify user account control properties .. note:: Only the following can be set: - N: No password required - D: Account disabled - H: Home directory required - L: Automatic Locking - X: Password does not expire reset_login_hours : boolean reset the users allowed logon hours reset_bad_password_count : boolean reset the stored bad login counter .. note:: if user is absent and password is provided, the user will be created CLI Example: .. code-block:: bash salt '*' pdbedit.modify inara fullname='Inara Serra' salt '*' pdbedit.modify simon password=r1v3r salt '*' pdbedit.modify jane drive='V:' homedir='\\\\serenity\\jane\\profile' salt '*' pdbedit.modify mal account_control=NX ''' ret = 'unchanged' # flag mapping flags = { 'domain': '--domain=', 'full name': '--fullname=', 'account desc': '--account-desc=', 'home directory': '--homedir=', 'homedir drive': '--drive=', 'profile path': '--profile=', 'logon script': '--script=', 'account flags': '--account-control=', 'user sid': '-U ', 'machine sid': '-M ', } # field mapping provided = { 'domain': domain, 'full name': fullname, 'account desc': account_desc, 'home directory': homedir, 'homedir drive': drive, 'profile path': profile, 'logon script': script, 'account flags': account_control, 'user sid': user_sid, 'machine sid': machine_sid, } # update password if password: ret = create(login, password, password_hashed)[login] if ret not in ['updated', 'created', 'unchanged']: return {login: ret} elif login not in list_users(False): return {login: '******'} # check for changes current = get_user(login, hashes=True) changes = {} for key, val in provided.items(): if key in ['user sid', 'machine sid']: if val is not None and key in current and not current[ key].endswith(six.text_type(val)): changes[key] = six.text_type(val) elif key in ['account flags']: if val is not None: if val.startswith('['): val = val[1:-1] new = [] for f in val.upper(): if f not in ['N', 'D', 'H', 'L', 'X']: log.warning( 'pdbedit.modify - unknown {f} flag for account_control, ignored' .format(f=f)) else: new.append(f) changes[key] = "[{flags}]".format(flags="".join(new)) else: if val is not None and key in current and current[key] != val: changes[key] = val # apply changes if len(changes) > 0 or reset_login_hours or reset_bad_password_count: cmds = [] for change in changes: cmds.append('{flag}{value}'.format( flag=flags[change], value=_quote_args(changes[change]), )) if reset_login_hours: cmds.append('--logon-hours-reset') if reset_bad_password_count: cmds.append('--bad-password-count-reset') res = __salt__['cmd.run_all']( 'pdbedit --modify --user {login} {changes}'.format( login=_quote_args(login), changes=" ".join(cmds), ), ) if res['retcode'] > 0: return {login: res['stderr'] if 'stderr' in res else res['stdout']} if ret != 'created': ret = 'updated' return {login: ret}
def search( filter, # pylint: disable=C0103 dn=None, # pylint: disable=C0103 scope=None, attrs=None, **kwargs): ''' Run an arbitrary LDAP query and return the results. CLI Example: .. code-block:: bash salt 'ldaphost' ldap.search "filter=cn=myhost" Return data: .. code-block:: python {'myhost': {'count': 1, 'results': [['cn=myhost,ou=hosts,o=acme,c=gb', {'saltKeyValue': ['ntpserver=ntp.acme.local', 'foo=myfoo'], 'saltState': ['foo', 'bar']}]], 'time': {'human': '1.2ms', 'raw': '0.00123'}}} Search and connection options can be overridden by specifying the relevant option as key=value pairs, for example: .. code-block:: bash salt 'ldaphost' ldap.search filter=cn=myhost dn=ou=hosts,o=acme,c=gb scope=1 attrs='' server='localhost' port='7393' tls=True bindpw='ssh' ''' if not dn: dn = _config('dn', 'basedn') # pylint: disable=C0103 if not scope: scope = _config('scope') if attrs == '': # Allow command line 'return all' attr override attrs = None elif attrs is None: attrs = salt.utils.data.decode(_config('attrs'), to_str=True) _ldap = _connect(**kwargs) start = time.time() log.debug( 'Running LDAP search with filter:%s, dn:%s, scope:%s, ' 'attrs:%s', filter, dn, scope, attrs) results = _ldap.search_s(dn, int(scope), filter, attrs) elapsed = (time.time() - start) if elapsed < 0.200: elapsed_h = six.text_type(round(elapsed * 1000, 1)) + 'ms' else: elapsed_h = six.text_type(round(elapsed, 2)) + 's' ret = { 'results': results, 'count': len(results), 'time': { 'human': elapsed_h, 'raw': six.text_type(round(elapsed, 5)) }, } return ret
def managed(name, dns_proto=None, dns_servers=None, ip_proto=None, ip_addrs=None, gateway=None, enabled=True, **kwargs): """ Ensure that the named interface is configured properly. Args: name (str): The name of the interface to manage dns_proto (str): None Set to ``static`` and use the ``dns_servers`` parameter to provide a list of DNS nameservers. set to ``dhcp`` to use DHCP to get the DNS servers. dns_servers (list): None A list of static DNS servers. To clear the list of DNS servers pass an empty list (``[]``). ``None`` will make no changes. ip_proto (str): None Set to ``static`` and use the ``ip_addrs`` and (optionally) ``gateway`` parameters to provide a list of static IP addresses and the default gateway. Set to ``dhcp`` to use DHCP. ip_addrs (list): None A list of static IP addresses with netmask flag, ie: 192.168.0.11/24 gateway (str): None The gateway to set for the interface enabled (bool): True Set to ``False`` to ensure that this interface is disabled. Returns: dict: A dictionary of old and new settings Example: .. code-block:: yaml Ethernet1: network.managed: - dns_proto: static - dns_servers: - 8.8.8.8 - 8.8.8.4 - ip_proto: static - ip_addrs: - 192.168.0.100/24 Clear DNS entries example: .. code-block:: yaml Ethernet1: network.managed: - dns_proto: static - dns_servers: [] - ip_proto: dhcp """ ret = { "name": name, "changes": {}, "result": True, "comment": "Interface '{0}' is up to date".format(name), } dns_proto = six.text_type(dns_proto).lower() ip_proto = six.text_type(ip_proto).lower() errors = [] if dns_proto not in __VALID_PROTO: ret["result"] = False errors.append("dns_proto must be one of the following: {0}".format( ", ".join(__VALID_PROTO))) if ip_proto not in __VALID_PROTO: errors.append("ip_proto must be one of the following: {0}".format( ", ".join(__VALID_PROTO))) if errors: ret["result"] = False ret["comment"] = "\n".join(errors) return ret try: currently_enabled = __salt__["ip.is_enabled"](name) except CommandExecutionError: currently_enabled = False if not enabled: if currently_enabled: if __opts__["test"]: ret["result"] = None ret["comment"] = "Interface '{0}' will be disabled".format( name) else: ret["result"] = __salt__["ip.disable"](name) if not ret["result"]: ret["comment"] = "Failed to disable interface '{0}'".format( name) else: ret["comment"] += " (already disabled)" return ret else: if not currently_enabled: if __opts__["test"]: ret["result"] = None ret["comment"] = "Interface '{0}' will be enabled".format(name) else: if not __salt__["ip.enable"](name): ret["result"] = False ret["comment"] = ("Failed to enable interface '{0}' to " "make changes".format(name)) return ret errors = _validate(dns_proto, dns_servers, ip_proto, ip_addrs, gateway) if errors: ret["result"] = False ret["comment"] = ("The following SLS configuration errors were " "detected:\n- {0}".format("\n- ".join(errors))) return ret old = __salt__["ip.get_interface"](name) if not old: ret["result"] = False ret["comment"] = ("Unable to get current configuration for " "interface '{0}'".format(name)) return ret changes = _changes(old, dns_proto, dns_servers, ip_proto, ip_addrs, gateway) # If dns_servers is the default `None` make no changes # To clear the list, pass an empty dict if str(dns_servers).lower() == "none": changes.pop("dns_servers", None) if not changes: return ret if __opts__["test"]: comments = [] if "dns_proto" in changes: comments.append("DNS protocol will be changed to: {0}".format( changes["dns_proto"])) if dns_proto == "static" and "dns_servers" in changes: if len(changes["dns_servers"]) == 0: comments.append("The list of DNS servers will be cleared") else: comments.append( "DNS servers will be set to the following: {0}".format( ", ".join(changes["dns_servers"]))) if "ip_proto" in changes: comments.append("IP protocol will be changed to: {0}".format( changes["ip_proto"])) if ip_proto == "static": if "ip_addrs" in changes: comments.append( "IP addresses will be set to the following: {0}". format(", ".join(changes["ip_addrs"]))) if "gateway" in changes: if changes["gateway"] is None: comments.append("Default gateway will be removed") else: comments.append( "Default gateway will be set to {0}".format( changes["gateway"])) ret["result"] = None ret["comment"] = ("The following changes will be made to " "interface '{0}':\n- {1}".format( name, "\n- ".join(comments))) return ret if changes.get("dns_proto") == "dhcp": __salt__["ip.set_dhcp_dns"](name) elif "dns_servers" in changes: if len(changes["dns_servers"]) == 0: # To clear the list of DNS servers you have to pass []. Later # changes gets passed like *args and a single empty list is # converted to an empty tuple. So, you have to add [] here changes["dns_servers"] = [[]] __salt__["ip.set_static_dns"](name, *changes["dns_servers"]) if changes.get("ip_proto") == "dhcp": __salt__["ip.set_dhcp_ip"](name) elif (changes.get("ip_addrs") or changes.get("gateway") or changes.get("ip_proto") == "static"): if changes.get("gateway") and not changes.get("ip_addrs"): changes["ip_addrs"] = ip_addrs if changes.get( "ip_proto") == "static" and not changes.get("ip_addrs"): changes["ip_addrs"] = ip_addrs for idx in range(len(changes["ip_addrs"])): if idx == 0: __salt__["ip.set_static_ip"](name, changes["ip_addrs"][idx], gateway=gateway, append=False) else: __salt__["ip.set_static_ip"](name, changes["ip_addrs"][idx], gateway=None, append=True) new = __salt__["ip.get_interface"](name) ret["changes"] = salt.utils.data.compare_dicts(old, new) if _changes(new, dns_proto, dns_servers, ip_proto, ip_addrs, gateway): ret["result"] = False ret["comment"] = ("Failed to set desired configuration settings " "for interface '{0}'".format(name)) else: ret["comment"] = "Successfully updated configuration for " "interface '{0}'".format( name) return ret
def installed(name, image, tag='latest', command=None, hostname=None, user=None, detach=True, stdin_open=False, tty=False, mem_limit=None, ports=None, environment=None, dns=None, volumes=None, volumes_from=None, cpu_shares=None, cpuset=None, *args, **kwargs): ''' Ensure that a container with the given name exists; if not, build a new container from the specified image. (`docker run`) name Name for the container image Image from which to build this container tag tag of the image (defaults to 'latest') environment Environment variables for the container, either - a mapping of key, values - a list of mappings of key, values ports List of ports definitions, either: - a port to map - a mapping of mapping portInHost : PortInContainer volumes List of volumes (see notes for the running function) For other parameters, see absolutely first the salt.modules.dockerio execution module and the `docker-py python bindings for docker documentation <https://github.com/dotcloud/docker-py#api>`_ for `docker.create_container`. .. note:: This command does not verify that the named container is running the specified image. ''' ins_image = __salt__['docker.inspect_image'] ins_container = __salt__['docker.inspect_container'] create = __salt__['docker.create_container'] image_name = _get_image_name(image, tag) iinfos = ins_image(image_name) if not iinfos['status']: return _invalid( comment='Image "{0}" does not exist'.format(image_name)) cinfos = ins_container(name) already_exists = cinfos['status'] # if container exists but is not started, try to start it if already_exists: return _valid(comment='Container {0!r} already exists'.format(name)) dports, denvironment = {}, {} if __opts__['test']: comment = 'Container {0!r} will be created'.format(name) return _ret_status(name=name, comment=comment) if not ports: ports = [] if not volumes: volumes = [] if isinstance(environment, dict): for k in environment: denvironment[six.text_type(k)] = six.text_type(environment[k]) if isinstance(environment, list): for p in environment: if isinstance(p, dict): for k in p: denvironment[six.text_type(k)] = six.text_type(p[k]) for p in ports: if not isinstance(p, dict): dports[str(p)] = {} else: for k in p: dports[str(p)] = {} parsed_volumes = _parse_volumes(volumes) bindvolumes = parsed_volumes['bindvols'] contvolumes = parsed_volumes['contvols'] kw = dict(binds=bindvolumes, command=command, hostname=hostname, user=user, detach=detach, stdin_open=stdin_open, tty=tty, mem_limit=mem_limit, ports=dports, environment=denvironment, dns=dns, volumes=contvolumes, volumes_from=volumes_from, name=name, cpu_shares=cpu_shares, cpuset=cpuset) out = create(image_name, **kw) # if container has been created, even if not started, we mark # it as installed changes = 'Container created' try: cid = out['out']['info']['id'] except Exception as e: log.debug(str(e)) else: changes = 'Container {0} created'.format(cid) out['comment'] = changes ret = _ret_status(out, name, changes=changes) return ret
def admin_password_present(name, password=None, **kwargs): """ Initial change of Zabbix Admin password to password taken from one of the sources (only the most prioritized one): 1. 'password' parameter 2. '_connection_password' parameter 3. pillar 'zabbix.password' setting 1) Tries to log in as Admin with password found in state password parameter or _connection_password or pillar or default zabbix password in this precise order, if any of them is present. 2) If one of above passwords matches, it tries to change the password to the most prioritized one. 3) If not able to connect with any password then it fails. :param name: Just a name of state :param password: Optional - desired password for Admin to be set :param _connection_user: Optional - Ignored in this state (always assumed 'Admin') :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) .. code-block:: yaml # password taken from pillar or _connection_password zabbix-admin-password: zabbix_user.admin_password_present # directly set password zabbix-admin-password: zabbix_user.admin_password_present: - password: SECRET_PASS """ dry_run = __opts__["test"] default_zabbix_user = "******" default_zabbix_password = "******" ret = {"name": name, "changes": {}, "result": False, "comment": ""} passwords = [] connection_args = {} connection_args["_connection_user"] = default_zabbix_user if "_connection_url" in kwargs: connection_args["_connection_url"] = kwargs["_connection_url"] config_password = __salt__["config.option"]("zabbix.password", None) if config_password: passwords.append(config_password) if "_connection_password" in kwargs: passwords.append(kwargs["_connection_password"]) if password: passwords.append(password) # get unique list in preserved order and reverse it seen = set() unique_passwords = [ six.text_type(x) for x in passwords if x not in seen and not seen.add(x) ] unique_passwords.reverse() if not unique_passwords: ret["comment"] = "Could not find any Zabbix Admin password setting! See documentation." return ret else: desired_password = unique_passwords[0] unique_passwords.append(default_zabbix_password) for pwd in unique_passwords: connection_args["_connection_password"] = pwd try: user_get = __salt__["zabbix.user_get"](default_zabbix_user, **connection_args) except SaltException as err: if "Login name or password is incorrect" in six.text_type(err): user_get = False else: raise if user_get: if pwd == desired_password: ret["result"] = True ret["comment"] = "Admin password is correct." return ret else: break if user_get: if not dry_run: user_update = __salt__["zabbix.user_update"]( user_get[0]["userid"], passwd=desired_password, **connection_args) if user_update: ret["result"] = True ret["changes"]["passwd"] = ("changed to '" + six.text_type(desired_password) + "'") else: ret["result"] = None ret["comment"] = ("Password for user " + six.text_type(default_zabbix_user) + " updated to '" + six.text_type(desired_password) + "'") return ret
def port_bindings(val, **kwargs): ''' On the CLI, these are passed as multiple instances of a given CLI option. In Salt, we accept these as a comma-delimited list but the API expects a Python dictionary mapping ports to their bindings. The format the API expects is complicated depending on whether or not the external port maps to a different internal port, or if the port binding is for UDP instead of TCP (the default). For reference, see the "Port bindings" section in the docker-py documentation at the following URL: http://docker-py.readthedocs.io/en/stable/api.html ''' validate_ip_addrs = kwargs.get('validate_ip_addrs', True) if not isinstance(val, dict): if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: val = helpers.split(six.text_type(val)) for idx in range(len(val)): if not isinstance(val[idx], six.string_types): val[idx] = six.text_type(val[idx]) def _format_port(port_num, proto): return six.text_type(port_num) + '/udp' if proto.lower( ) == 'udp' else port_num bindings = {} for binding in val: bind_parts = helpers.split(binding, ':') num_bind_parts = len(bind_parts) if num_bind_parts == 1: # Single port or port range being passed through (no # special mapping) container_port = six.text_type(bind_parts[0]) if container_port == '': raise SaltInvocationError( 'Empty port binding definition found') container_port, _, proto = container_port.partition('/') try: start, end = helpers.get_port_range(container_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for using # the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) bind_vals = [(_format_port(port_num, proto), None) for port_num in range(start, end + 1)] elif num_bind_parts == 2: if bind_parts[0] == '': raise SaltInvocationError( 'Empty host port in port binding definition ' '\'{0}\''.format(binding)) if bind_parts[1] == '': raise SaltInvocationError( 'Empty container port in port binding definition ' '\'{0}\''.format(binding)) container_port, _, proto = bind_parts[1].partition('/') try: cport_start, cport_end = \ helpers.get_port_range(container_port) hport_start, hport_end = \ helpers.get_port_range(bind_parts[0]) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) if (hport_end - hport_start) != (cport_end - cport_start): # Port range is mismatched raise SaltInvocationError( 'Host port range ({0}) does not have the same ' 'number of ports as the container port range ' '({1})'.format(bind_parts[0], container_port)) cport_list = list(range(cport_start, cport_end + 1)) hport_list = list(range(hport_start, hport_end + 1)) bind_vals = [(_format_port(cport_list[x], proto), hport_list[x]) for x in range(len(cport_list))] elif num_bind_parts == 3: host_ip, host_port = bind_parts[0:2] if validate_ip_addrs: helpers.validate_ip(host_ip) container_port, _, proto = bind_parts[2].partition('/') try: cport_start, cport_end = \ helpers.get_port_range(container_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) cport_list = list(range(cport_start, cport_end + 1)) if host_port == '': hport_list = [None] * len(cport_list) else: try: hport_start, hport_end = \ helpers.get_port_range(host_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) hport_list = list(range(hport_start, hport_end + 1)) if (hport_end - hport_start) != (cport_end - cport_start): # Port range is mismatched raise SaltInvocationError( 'Host port range ({0}) does not have the same ' 'number of ports as the container port range ' '({1})'.format(host_port, container_port)) bind_vals = [(_format_port(val, proto), (host_ip, ) if hport_list[idx] is None else (host_ip, hport_list[idx])) for idx, val in enumerate(cport_list)] else: raise SaltInvocationError( '\'{0}\' is an invalid port binding definition (at most ' '3 components are allowed, found {1})'.format( binding, num_bind_parts)) for cport, bind_def in bind_vals: if cport not in bindings: bindings[cport] = bind_def else: if isinstance(bindings[cport], list): # Append to existing list of bindings for this # container port. bindings[cport].append(bind_def) else: bindings[cport] = [bindings[cport], bind_def] for idx in range(len(bindings[cport])): if bindings[cport][idx] is None: # Now that we are adding multiple # bindings try: # Convert 1234/udp to 1234 bindings[cport][idx] = int(cport.split('/')[0]) except AttributeError: # Port was tcp, the AttributeError # signifies that the split failed # because the port number was # already defined as an integer. # Just use the cport. bindings[cport][idx] = cport val = bindings return val
def create(vm_): ''' Create a single VM from a data dict CLI Example: .. code-block:: bash salt-cloud -p proxmox-ubuntu vmhostname ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured( __opts__, __active_provider_name__ or 'proxmox', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass ret = {} __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport']) log.info('Creating Cloud VM %s', vm_['name']) if 'use_dns' in vm_ and 'ip_address' not in vm_: use_dns = vm_['use_dns'] if use_dns: from socket import gethostbyname, gaierror try: ip_address = gethostbyname(six.text_type(vm_['name'])) except gaierror: log.debug('Resolving of %s failed', vm_['name']) else: vm_['ip_address'] = six.text_type(ip_address) try: newid = _get_next_vmid() data = create_node(vm_, newid) except Exception as exc: log.error( 'Error creating %s on PROXMOX\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: \n%s', vm_['name'], exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG) return False ret['creation_data'] = data name = vm_['name'] # hostname which we know if 'clone' in vm_ and vm_['clone'] is True: vmid = newid else: vmid = data['vmid'] # vmid which we have received host = data['node'] # host which we have received nodeType = data['technology'] # VM tech (Qemu / OpenVZ) # Determine which IP to use in order of preference: if 'ip_address' in vm_: ip_address = six.text_type(vm_['ip_address']) elif 'public_ips' in data: ip_address = six.text_type(data['public_ips'][0]) # first IP elif 'private_ips' in data: ip_address = six.text_type(data['private_ips'][0]) # first IP else: raise SaltCloudExecutionFailure # err.. not a good idea i reckon log.debug('Using IP address %s', ip_address) # wait until the vm has been created so we can start it if not wait_for_created(data['upid'], timeout=300): return { 'Error': 'Unable to create {0}, command timed out'.format(name) } # VM has been created. Starting.. if not start(name, vmid, call='action'): log.error('Node %s (%s) failed to start!', name, vmid) raise SaltCloudExecutionFailure # Wait until the VM has fully started log.debug('Waiting for state "running" for vm %s on %s', vmid, host) if not wait_for_state(vmid, 'running'): return {'Error': 'Unable to start {0}, command timed out'.format(name)} ssh_username = config.get_cloud_config_value('ssh_username', vm_, __opts__, default='root') ssh_password = config.get_cloud_config_value( 'password', vm_, __opts__, ) ret['ip_address'] = ip_address ret['username'] = ssh_username ret['password'] = ssh_password vm_['ssh_host'] = ip_address vm_['password'] = ssh_password ret = __utils__['cloud.bootstrap'](vm_, __opts__) # Report success! log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug('\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data)) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], ) return ret
def get_error_message(self): ''' Fake function return error message ''' return six.text_type(self)
def clear_file_list_cache(self, load): """ Deletes the file_lists cache files """ if "env" in load: # "env" is not supported; Use "saltenv". load.pop("env") saltenv = load.get("saltenv", []) if saltenv is not None: if not isinstance(saltenv, list): try: saltenv = [x.strip() for x in saltenv.split(",")] except AttributeError: saltenv = [ x.strip() for x in six.text_type(saltenv).split(",") ] for idx, val in enumerate(saltenv): if not isinstance(val, six.string_types): saltenv[idx] = six.text_type(val) ret = {} fsb = self.backends(load.pop("fsbackend", None)) list_cachedir = os.path.join(self.opts["cachedir"], "file_lists") try: file_list_backends = os.listdir(list_cachedir) except OSError as exc: if exc.errno == errno.ENOENT: log.debug("No file list caches found") return {} else: log.error( "Failed to get list of saltenvs for which the master has " "cached file lists: %s", exc, ) for back in file_list_backends: # Account for the fact that the file_list cache directory for gitfs # is 'git', hgfs is 'hg', etc. back_virtualname = re.sub("fs$", "", back) try: cache_files = os.listdir(os.path.join(list_cachedir, back)) except OSError as exc: log.error( "Failed to find file list caches for saltenv '%s': %s", back, exc) continue for cache_file in cache_files: try: cache_saltenv, extension = cache_file.rsplit(".", 1) except ValueError: # Filename has no dot in it. Not a cache file, ignore. continue if extension != "p": # Filename does not end in ".p". Not a cache file, ignore. continue elif back_virtualname not in fsb or ( saltenv is not None and cache_saltenv not in saltenv): log.debug( "Skipping %s file list cache for saltenv '%s'", back, cache_saltenv, ) continue try: os.remove(os.path.join(list_cachedir, back, cache_file)) except OSError as exc: if exc.errno != errno.ENOENT: log.error("Failed to remove %s: %s", exc.filename, exc.strerror) else: ret.setdefault(back, []).append(cache_saltenv) log.debug( "Removed %s file list cache for saltenv '%s'", cache_saltenv, back, ) return ret
def absent(name, **kwargs): """ Ensures that the host group does not exist, eventually delete host group. .. versionadded:: 2016.3.0 :param name: name of the host group :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) .. code-block:: yaml delete_testing_host_group: zabbix_hostgroup.absent: - name: 'My hostgroup name' """ ret = {"name": name, "changes": {}, "result": False, "comment": ""} # Comment and change messages comment_hostgroup_deleted = "Host group {0} deleted.".format(name) comment_hostgroup_notdeleted = "Unable to delete host group: {0}. ".format(name) comment_hostgroup_notexists = "Host group {0} does not exist.".format(name) changes_hostgroup_deleted = { name: { "old": "Host group {0} exists.".format(name), "new": "Host group {0} deleted.".format(name), } } connection_args = {} if "_connection_user" in kwargs: connection_args["_connection_user"] = kwargs["_connection_user"] if "_connection_password" in kwargs: connection_args["_connection_password"] = kwargs["_connection_password"] if "_connection_url" in kwargs: connection_args["_connection_url"] = kwargs["_connection_url"] hostgroup_exists = __salt__["zabbix.hostgroup_exists"](name, **connection_args) # Dry run, test=true mode if __opts__["test"]: if not hostgroup_exists: ret["result"] = True ret["comment"] = comment_hostgroup_notexists else: ret["result"] = None ret["comment"] = comment_hostgroup_deleted ret["changes"] = changes_hostgroup_deleted return ret hostgroup_get = __salt__["zabbix.hostgroup_get"](name, **connection_args) if not hostgroup_get: ret["result"] = True ret["comment"] = comment_hostgroup_notexists else: try: groupid = hostgroup_get[0]["groupid"] hostgroup_delete = __salt__["zabbix.hostgroup_delete"]( groupid, **connection_args ) except KeyError: hostgroup_delete = False if hostgroup_delete and "error" not in hostgroup_delete: ret["result"] = True ret["comment"] = comment_hostgroup_deleted ret["changes"] = changes_hostgroup_deleted else: ret["result"] = False ret["comment"] = comment_hostgroup_notdeleted + six.text_type( hostgroup_delete["error"] ) return ret
def returner(ret): ''' Process the return from Salt ''' job_fun = ret['fun'] job_fun_escaped = job_fun.replace('.', '_') job_id = ret['jid'] job_retcode = ret.get('retcode', 1) job_success = True if not job_retcode else False options = _get_options(ret) if job_fun in options['functions_blacklist']: log.info( 'Won\'t push new data to Elasticsearch, job with jid=%s and ' 'function=%s which is in the user-defined list of ignored ' 'functions', job_id, job_fun ) return if ret.get('return', None) is None: log.info( 'Won\'t push new data to Elasticsearch, job with jid=%s was ' 'not succesful', job_id ) return # Build the index name if options['states_single_index'] and job_fun in STATE_FUNCTIONS: index = 'salt-{0}'.format(STATE_FUNCTIONS[job_fun]) else: index = 'salt-{0}'.format(job_fun_escaped) if options['index_date']: index = '{0}-{1}'.format(index, datetime.date.today().strftime('%Y.%m.%d')) counts = {} # Do some special processing for state returns if job_fun in STATE_FUNCTIONS: # Init the state counts if options['states_count']: counts = { 'suceeded': 0, 'failed': 0, } # Prepend each state execution key in ret['return'] with a zero-padded # version of the '__run_num__' field allowing the states to be ordered # more easily. Change the index to be # index to be '<index>-ordered' so as not to clash with the unsorted # index data format if options['states_order_output'] and isinstance(ret['return'], dict): index = '{0}-ordered'.format(index) max_chars = len(six.text_type(len(ret['return']))) for uid, data in six.iteritems(ret['return']): # Skip keys we've already prefixed if uid.startswith(tuple('0123456789')): continue # Store the function being called as it's a useful key to search decoded_uid = uid.split('_|-') ret['return'][uid]['_func'] = '{0}.{1}'.format( decoded_uid[0], decoded_uid[-1] ) # Prefix the key with the run order so it can be sorted new_uid = '{0}_|-{1}'.format( six.text_type(data['__run_num__']).zfill(max_chars), uid, ) ret['return'][new_uid] = ret['return'].pop(uid) # Catch a state output that has failed and where the error message is # not in a dict as expected. This prevents elasticsearch from # complaining about a mapping error elif not isinstance(ret['return'], dict): ret['return'] = {'return': ret['return']} # Need to count state successes and failures if options['states_count']: for state_data in ret['return'].values(): if state_data['result'] is False: counts['failed'] += 1 else: counts['suceeded'] += 1 # Ensure the index exists _ensure_index(index) # Build the payload class UTC(tzinfo): def utcoffset(self, dt): return timedelta(0) def tzname(self, dt): return 'UTC' def dst(self, dt): return timedelta(0) utc = UTC() data = { '@timestamp': datetime.datetime.now(utc).isoformat(), 'success': job_success, 'retcode': job_retcode, 'minion': ret['id'], 'fun': job_fun, 'jid': job_id, 'counts': counts, 'data': _convert_keys(ret['return']) } if options['debug_returner_payload']: log.debug('elasicsearch payload: %s', data) # Post the payload ret = __salt__['elasticsearch.document_create'](index=index, doc_type=options['doc_type'], body=json.dumps(data))
def state(name, tgt, ssh=False, tgt_type='glob', ret='', ret_config=None, ret_kwargs=None, highstate=None, sls=None, top=None, saltenv=None, test=None, pillar=None, pillarenv=None, expect_minions=True, fail_minions=None, allow_fail=0, exclude=None, concurrent=False, timeout=None, batch=None, queue=False, subset=None, orchestration_jid=None, **kwargs): ''' Invoke a state run on a given target name An arbitrary name used to track the state execution tgt The target specification for the state run. .. versionadded: 2016.11.0 Masterless support: When running on a masterless minion, the ``tgt`` is ignored and will always be the local minion. tgt_type The target type to resolve, defaults to ``glob`` ret Optionally set a single or a list of returners to use ret_config Use an alternative returner configuration ret_kwargs Override individual returner configuration items highstate Defaults to None, if set to True the target systems will ignore any sls references specified in the sls option and call state.highstate on the targeted minions top Should be the name of a top file. If set state.top is called with this top file instead of state.sls. sls A group of sls files to execute. This can be defined as a single string containing a single sls file, or a list of sls files test Pass ``test=true`` or ``test=false`` through to the state function. This can be used to overide a test mode set in the minion's config file. If left as the default of None and the 'test' mode is supplied on the command line, that value is passed instead. pillar Pass the ``pillar`` kwarg through to the state function pillarenv The pillar environment to grab pillars from .. versionadded:: 2017.7.0 saltenv The default salt environment to pull sls files from ssh Set to `True` to use the ssh client instead of the standard salt client roster In the event of using salt-ssh, a roster system can be set expect_minions An optional boolean for failing if some minions do not respond fail_minions An optional list of targeted minions where failure is an option allow_fail Pass in the number of minions to allow for failure before setting the result of the execution to False exclude Pass exclude kwarg to state concurrent Allow multiple state runs to occur at once. WARNING: This flag is potentially dangerous. It is designed for use when multiple state runs can safely be run at the same Do not use this flag for performance optimization. queue Pass ``queue=true`` through to the state function batch Execute the command :ref:`in batches <targeting-batch>`. E.g.: ``10%``. .. versionadded:: 2016.3.0 subset Number of minions from the targeted set to randomly use .. versionadded:: 2017.7.0 Examples: Run a list of sls files via :py:func:`state.sls <salt.state.sls>` on target minions: .. code-block:: yaml webservers: salt.state: - tgt: 'web*' - sls: - apache - django - core - saltenv: prod Run sls file via :py:func:`state.sls <salt.state.sls>` on target minions with exclude: .. code-block:: yaml docker: salt.state: - tgt: 'docker*' - sls: docker - exclude: docker.swarm - saltenv: prod Run a full :py:func:`state.highstate <salt.state.highstate>` on target mininons. .. code-block:: yaml databases: salt.state: - tgt: role:database - tgt_type: grain - highstate: True ''' cmd_kw = {'arg': [], 'kwarg': {}, 'ret': ret, 'timeout': timeout} if ret_config: cmd_kw['ret_config'] = ret_config if ret_kwargs: cmd_kw['ret_kwargs'] = ret_kwargs state_ret = {'name': name, 'changes': {}, 'comment': '', 'result': True} try: allow_fail = int(allow_fail) except ValueError: state_ret['result'] = False state_ret[ 'comment'] = 'Passed invalid value for \'allow_fail\', must be an int' return state_ret cmd_kw['tgt_type'] = tgt_type cmd_kw['ssh'] = ssh cmd_kw['expect_minions'] = expect_minions if highstate: fun = 'state.highstate' elif top: fun = 'state.top' cmd_kw['arg'].append(top) elif sls: fun = 'state.sls' if isinstance(sls, list): sls = ','.join(sls) cmd_kw['arg'].append(sls) else: state_ret[ 'comment'] = 'No highstate or sls specified, no execution made' state_ret['result'] = False return state_ret if test is not None or __opts__.get('test'): cmd_kw['kwarg']['test'] = test if test is not None else __opts__.get( 'test') if pillar: cmd_kw['kwarg']['pillar'] = pillar if pillarenv is not None: cmd_kw['kwarg']['pillarenv'] = pillarenv if saltenv is not None: cmd_kw['kwarg']['saltenv'] = saltenv if exclude is not None: cmd_kw['kwarg']['exclude'] = exclude cmd_kw['kwarg']['queue'] = queue if isinstance(concurrent, bool): cmd_kw['kwarg']['concurrent'] = concurrent else: state_ret['comment'] = ( 'Must pass in boolean for value of \'concurrent\'') state_ret['result'] = False return state_ret if batch is not None: cmd_kw['batch'] = six.text_type(batch) if subset is not None: cmd_kw['subset'] = subset masterless = __opts__['__role'] == 'minion' and \ __opts__['file_client'] == 'local' if not masterless: _fire_args({'type': 'state', 'tgt': tgt, 'name': name, 'args': cmd_kw}) cmd_ret = __salt__['saltutil.cmd'](tgt, fun, **cmd_kw) else: if top: cmd_kw['topfn'] = ''.join(cmd_kw.pop('arg')) elif sls: cmd_kw['mods'] = ''.join(cmd_kw.pop('arg')) cmd_kw.update(cmd_kw.pop('kwarg')) tmp_ret = __salt__[fun](**cmd_kw) cmd_ret = { __opts__['id']: { 'ret': tmp_ret, 'out': tmp_ret.get('out', 'highstate') if isinstance(tmp_ret, dict) else 'highstate' } } try: state_ret['__jid__'] = cmd_ret[next(iter(cmd_ret))]['jid'] except (StopIteration, KeyError): pass changes = {} fail = set() no_change = set() if fail_minions is None: fail_minions = () elif isinstance(fail_minions, six.string_types): fail_minions = [minion.strip() for minion in fail_minions.split(',')] elif not isinstance(fail_minions, list): state_ret.setdefault('warnings', []).append( '\'fail_minions\' needs to be a list or a comma separated ' 'string. Ignored.') fail_minions = () if not cmd_ret and expect_minions: state_ret['result'] = False state_ret['comment'] = 'No minions returned' return state_ret for minion, mdata in six.iteritems(cmd_ret): if mdata.get('out', '') != 'highstate': log.warning('Output from salt state not highstate') m_ret = False if 'return' in mdata and 'ret' not in mdata: mdata['ret'] = mdata.pop('return') m_state = True if mdata.get('failed', False): m_state = False else: try: m_ret = mdata['ret'] except KeyError: m_state = False if m_state: m_state = __utils__['state.check_result'](m_ret, recurse=True) if not m_state: if minion not in fail_minions: fail.add(minion) changes[minion] = m_ret continue try: for state_item in six.itervalues(m_ret): if isinstance(state_item, dict): if 'changes' in state_item and state_item['changes']: changes[minion] = m_ret break else: no_change.add(minion) except AttributeError: log.error("m_ret did not have changes %s %s", type(m_ret), m_ret) no_change.add(minion) if changes: state_ret['changes'] = {'out': 'highstate', 'ret': changes} if len(fail) > allow_fail: state_ret['result'] = False state_ret['comment'] = 'Run failed on minions: {0}'.format( ', '.join(fail)) else: state_ret['comment'] = 'States ran successfully.' if changes: state_ret['comment'] += ' Updating {0}.'.format(', '.join(changes)) if no_change: state_ret['comment'] += ' No changes made to {0}.'.format( ', '.join(no_change)) if test or __opts__.get('test'): if state_ret['changes'] and state_ret['result'] is True: # Test mode with changes is the only case where result should ever be none state_ret['result'] = None return state_ret
def _is_true(val): if val and six.text_type(val).lower() in ('true', 'yes', '1'): return True elif six.text_type(val).lower() in ('false', 'no', '0'): return False raise ValueError('Failed parsing boolean value: {0}'.format(val))
def absent(name, **kwargs): """ Ensures that the user does not exist, eventually delete user. .. versionadded:: 2016.3.0 :param name: user alias :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) .. code-block:: yaml George: zabbix_user.absent """ connection_args = {} if "_connection_user" in kwargs: connection_args["_connection_user"] = kwargs["_connection_user"] if "_connection_password" in kwargs: connection_args["_connection_password"] = kwargs[ "_connection_password"] if "_connection_url" in kwargs: connection_args["_connection_url"] = kwargs["_connection_url"] ret = {"name": name, "changes": {}, "result": False, "comment": ""} # Comment and change messages comment_user_deleted = "USer {0} deleted.".format(name) comment_user_notdeleted = "Unable to delete user: {0}. ".format(name) comment_user_notexists = "User {0} does not exist.".format(name) changes_user_deleted = { name: { "old": "User {0} exists.".format(name), "new": "User {0} deleted.".format(name), } } user_get = __salt__["zabbix.user_get"](name, **connection_args) # Dry run, test=true mode if __opts__["test"]: if not user_get: ret["result"] = True ret["comment"] = comment_user_notexists else: ret["result"] = None ret["comment"] = comment_user_deleted ret["changes"] = changes_user_deleted if not user_get: ret["result"] = True ret["comment"] = comment_user_notexists else: try: userid = user_get[0]["userid"] user_delete = __salt__["zabbix.user_delete"](userid, **connection_args) except KeyError: user_delete = False if user_delete and "error" not in user_delete: ret["result"] = True ret["comment"] = comment_user_deleted ret["changes"] = changes_user_deleted else: ret["result"] = False ret["comment"] = comment_user_notdeleted + six.text_type( user_delete["error"]) return ret
def _new_init(self, username, project_id, auth_url, region_name, password, os_auth_plugin, auth=None, **kwargs): if auth is None: auth = {} ks_version = self._get_version_from_url(auth_url) if not ks_version: ks_version = self._discover_ks_version(auth_url) auth_url = '{0}/{1}'.format(auth_url, ks_version) loader = keystoneauth1.loading.get_plugin_loader(os_auth_plugin or 'password') self.client_kwargs = kwargs.copy() self.kwargs = auth.copy() if not self.extensions: if hasattr(OpenStackComputeShell, '_discover_extensions'): self.extensions = OpenStackComputeShell()._discover_extensions('2.0') else: self.extensions = client.discover_extensions('2.0') for extension in self.extensions: extension.run_hooks('__pre_parse_args__') self.client_kwargs['extensions'] = self.extensions self.kwargs['username'] = username self.kwargs['project_name'] = project_id self.kwargs['auth_url'] = auth_url self.kwargs['password'] = password if ks_version == 'v3': self.kwargs['project_id'] = kwargs.get('project_id') self.kwargs['project_name'] = kwargs.get('project_name') self.kwargs['user_domain_name'] = kwargs.get('user_domain_name', 'default') self.kwargs['project_domain_name'] = kwargs.get('project_domain_name', 'default') self.client_kwargs['region_name'] = region_name self.client_kwargs['service_type'] = 'compute' if hasattr(self, 'extensions'): # needs an object, not a dictionary self.kwargstruct = KwargsStruct(**self.client_kwargs) for extension in self.extensions: extension.run_hooks('__post_parse_args__', self.kwargstruct) self.client_kwargs = self.kwargstruct.__dict__ # Requires novaclient version >= 2.6.1 self.version = six.text_type(kwargs.get('version', 2)) self.client_kwargs = sanatize_novaclient(self.client_kwargs) options = loader.load_from_options(**self.kwargs) self.session = keystoneauth1.session.Session(auth=options) conn = client.Client(version=self.version, session=self.session, **self.client_kwargs) self.kwargs['auth_token'] = conn.client.session.get_token() identity_service_type = kwargs.get('identity_service_type', 'identity') self.catalog = conn.client.session.get('/' + ks_version + '/auth/catalog', endpoint_filter={'service_type': identity_service_type} ).json().get('catalog', []) for ep_type in self.catalog: if ep_type['type'] == identity_service_type: for ep_id in ep_type['endpoints']: ep_ks_version = self._get_version_from_url(ep_id['url']) if not ep_ks_version: ep_id['url'] = '{0}/{1}'.format(ep_id['url'], ks_version) if ks_version == 'v3': self._v3_setup(region_name) else: self._v2_setup(region_name)
def present(alias, passwd, usrgrps, medias=None, password_reset=False, **kwargs): """ Ensures that the user exists, eventually creates new user. NOTE: use argument firstname instead of name to not mess values with name from salt sls. .. versionadded:: 2016.3.0 :param alias: user alias :param passwd: user's password :param usrgrps: user groups to add the user to :param medias: Optional - user's medias to create :param password_reset: whether or not to reset password at update :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) :param firstname: string with firstname of the user, use 'firstname' instead of 'name' parameter to not mess \ with value supplied from Salt sls file. .. code-block:: yaml make_user: zabbix_user.present: - alias: George - passwd: donottellanyonE@456x - password_reset: True - usrgrps: - 13 - 7 - medias: - [email protected]: - mediatype: mail - period: '1-7,00:00-24:00' - severity: NIWAHD - make_jabber: - active: true - mediatype: jabber - period: '1-5,08:00-19:00' - sendto: [email protected] - text_me_morning_disabled: - active: false - mediatype: sms - period: '1-5,09:30-10:00' - severity: D - sendto: '+42032132588568' """ if medias is None: medias = [] connection_args = {} if "_connection_user" in kwargs: connection_args["_connection_user"] = kwargs["_connection_user"] if "_connection_password" in kwargs: connection_args["_connection_password"] = kwargs[ "_connection_password"] if "_connection_url" in kwargs: connection_args["_connection_url"] = kwargs["_connection_url"] ret = {"name": alias, "changes": {}, "result": False, "comment": ""} # Comment and change messages comment_user_created = "User {0} created.".format(alias) comment_user_updated = "User {0} updated.".format(alias) comment_user_notcreated = "Unable to create user: {0}. ".format(alias) comment_user_exists = "User {0} already exists.".format(alias) changes_user_created = { alias: { "old": "User {0} does not exist.".format(alias), "new": "User {0} created.".format(alias), } } def _media_format(medias_data): """ Formats medias from SLS file into valid JSON usable for zabbix API. Completes JSON with default values. :param medias_data: list of media data from SLS file """ if not medias_data: return list() medias_json = salt.utils.json.loads(salt.utils.json.dumps(medias_data)) medias_attr = ("active", "mediatype", "period", "severity", "sendto") media_type = {"mail": 1, "jabber": 2, "sms": 3} media_severities = ("D", "H", "A", "W", "I", "N") medias_dict = dict() for media in medias_json: for med in media: medias_dict[med] = dict() for medattr in media[med]: for key, value in medattr.items(): if key in medias_attr: medias_dict[med][key] = value medias_list = list() for key, value in medias_dict.items(): # Load media values or default values active = ("0" if six.text_type(value.get( "active", "true")).lower() == "true" else "1") mediatype_sls = six.text_type(value.get("mediatype", "mail")).lower() mediatypeid = six.text_type(media_type.get(mediatype_sls, 1)) period = value.get("period", "1-7,00:00-24:00") sendto = value.get("sendto", key) severity_sls = value.get("severity", "HD") severity_bin = six.text_type() for sev in media_severities: if sev in severity_sls: severity_bin += "1" else: severity_bin += "0" severity = six.text_type(int(severity_bin, 2)) medias_list.append({ "active": active, "mediatypeid": mediatypeid, "period": period, "sendto": sendto, "severity": severity, }) return medias_list user_exists = __salt__["zabbix.user_exists"](alias, **connection_args) if user_exists: user = __salt__["zabbix.user_get"](alias, **connection_args)[0] userid = user["userid"] update_usrgrps = False update_medias = False usergroups = __salt__["zabbix.usergroup_get"](userids=userid, **connection_args) cur_usrgrps = list() for usergroup in usergroups: cur_usrgrps.append(int(usergroup["usrgrpid"])) if set(cur_usrgrps) != set(usrgrps): update_usrgrps = True user_medias = __salt__["zabbix.user_getmedia"](userid, **connection_args) medias_formated = _media_format(medias) if user_medias: user_medias_copy = deepcopy(user_medias) for user_med in user_medias_copy: user_med.pop("userid") user_med.pop("mediaid") media_diff = [ x for x in medias_formated if x not in user_medias_copy ] + [y for y in user_medias_copy if y not in medias_formated] if media_diff: update_medias = True elif not user_medias and medias: update_medias = True # Dry run, test=true mode if __opts__["test"]: if user_exists: if update_usrgrps or password_reset or update_medias: ret["result"] = None ret["comment"] = comment_user_updated else: ret["result"] = True ret["comment"] = comment_user_exists else: ret["result"] = None ret["comment"] = comment_user_created error = [] if user_exists: ret["result"] = True if update_usrgrps or password_reset or update_medias: ret["comment"] = comment_user_updated if update_usrgrps: __salt__["zabbix.user_update"](userid, usrgrps=usrgrps, **connection_args) updated_groups = __salt__["zabbix.usergroup_get"]( userids=userid, **connection_args) cur_usrgrps = list() for usergroup in updated_groups: cur_usrgrps.append(int(usergroup["usrgrpid"])) usrgrp_diff = list(set(usrgrps) - set(cur_usrgrps)) if usrgrp_diff: error.append( "Unable to update grpup(s): {0}".format(usrgrp_diff)) ret["changes"]["usrgrps"] = six.text_type(updated_groups) if password_reset: updated_password = __salt__["zabbix.user_update"]( userid, passwd=passwd, **connection_args) if "error" in updated_password: error.append(updated_groups["error"]) else: ret["changes"]["passwd"] = "updated" if update_medias: for user_med in user_medias: deletedmed = __salt__["zabbix.user_deletemedia"]( user_med["mediaid"], **connection_args) if "error" in deletedmed: error.append(deletedmed["error"]) for media in medias_formated: updatemed = __salt__["zabbix.user_addmedia"]( userids=userid, active=media["active"], mediatypeid=media["mediatypeid"], period=media["period"], sendto=media["sendto"], severity=media["severity"], **connection_args) if "error" in updatemed: error.append(updatemed["error"]) ret["changes"]["medias"] = six.text_type(medias_formated) else: ret["comment"] = comment_user_exists else: user_create = __salt__["zabbix.user_create"](alias, passwd, usrgrps, **kwargs) if "error" not in user_create: ret["result"] = True ret["comment"] = comment_user_created ret["changes"] = changes_user_created else: ret["result"] = False ret["comment"] = comment_user_notcreated + six.text_type( user_create["error"]) # error detected if error: ret["changes"] = {} ret["result"] = False ret["comment"] = six.text_type(error) return ret
def mounted(name, device, fstype, mkmnt=False, opts='defaults', dump=0, pass_num=0, config='/etc/fstab', persist=True, mount=True, user=None, match_on='auto', device_name_regex=None, extra_mount_invisible_options=None, extra_mount_invisible_keys=None, extra_mount_ignore_fs_keys=None, extra_mount_translate_options=None, hidden_opts=None, **kwargs): ''' Verify that a device is mounted name The path to the location where the device is to be mounted device The device name, typically the device node, such as ``/dev/sdb1`` or ``UUID=066e0200-2867-4ebe-b9e6-f30026ca2314`` or ``LABEL=DATA`` fstype The filesystem type, this will be ``xfs``, ``ext2/3/4`` in the case of classic filesystems, ``fuse`` in the case of fuse mounts, and ``nfs`` in the case of nfs mounts mkmnt If the mount point is not present then the state will fail, set ``mkmnt: True`` to create the mount point if it is otherwise not present opts A list object of options or a comma delimited list dump The dump value to be passed into the fstab, Default is ``0`` pass_num The pass value to be passed into the fstab, Default is ``0`` config Set an alternative location for the fstab, Default is ``/etc/fstab`` persist Set if the mount should be saved in the fstab, Default is ``True`` mount Set if the mount should be mounted immediately, Default is ``True`` user The account used to execute the mount; this defaults to the user salt is running as on the minion match_on A name or list of fstab properties on which this state should be applied. Default is ``auto``, a special value indicating to guess based on fstype. In general, ``auto`` matches on name for recognized special devices and device otherwise. device_name_regex A list of device exact names or regular expressions which should not force a remount. For example, glusterfs may be mounted with a comma-separated list of servers in fstab, but the /proc/self/mountinfo will show only the first available server. .. code-block:: jinja {% set glusterfs_ip_list = ['10.0.0.1', '10.0.0.2', '10.0.0.3'] %} mount glusterfs volume: mount.mounted: - name: /mnt/glusterfs_mount_point - device: {{ glusterfs_ip_list|join(',') }}:/volume_name - fstype: glusterfs - opts: _netdev,rw,defaults,direct-io-mode=disable - mkmnt: True - persist: True - dump: 0 - pass_num: 0 - device_name_regex: - ({{ glusterfs_ip_list|join('|') }}):/volume_name .. versionadded:: 2016.11.0 extra_mount_invisible_options A list of extra options that are not visible through the ``/proc/self/mountinfo`` interface. If a option is not visible through this interface it will always remount the device. This option extends the builtin ``mount_invisible_options`` list. extra_mount_invisible_keys A list of extra key options that are not visible through the ``/proc/self/mountinfo`` interface. If a key option is not visible through this interface it will always remount the device. This option extends the builtin ``mount_invisible_keys`` list. A good example for a key option is the password option:: password=badsecret extra_mount_ignore_fs_keys A dict of filesystem options which should not force a remount. This will update the internal dictionary. The dict should look like this:: { 'ramfs': ['size'] } extra_mount_translate_options A dict of mount options that gets translated when mounted. To prevent a remount add additional options to the default dictionary. This will update the internal dictionary. The dictionary should look like this:: { 'tcp': 'proto=tcp', 'udp': 'proto=udp' } hidden_opts A list of mount options that will be ignored when considering a remount as part of the state application .. versionadded:: 2015.8.2 ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} update_mount_cache = False if not name: ret['result'] = False ret['comment'] = 'Must provide name to mount.mounted' return ret if not device: ret['result'] = False ret['comment'] = 'Must provide device to mount.mounted' return ret if not fstype: ret['result'] = False ret['comment'] = 'Must provide fstype to mount.mounted' return ret if device_name_regex is None: device_name_regex = [] # Defaults is not a valid option on Mac OS if __grains__['os'] in ['MacOS', 'Darwin'] and opts == 'defaults': opts = 'noowners' # Defaults is not a valid option on AIX if __grains__['os'] in ['AIX']: if opts == 'defaults': opts = '' # Make sure that opts is correct, it can be a list or a comma delimited # string if isinstance(opts, string_types): opts = opts.split(',') if isinstance(hidden_opts, string_types): hidden_opts = hidden_opts.split(',') # remove possible trailing slash if not name == '/': name = name.rstrip('/') device_list = [] # Get the active data active = __salt__['mount.active'](extended=True) real_name = os.path.realpath(name) if device.startswith('/'): if 'bind' in opts and real_name in active: _device = device if active[real_name]['device'].startswith('/'): # Find the device that the bind really points at. while True: if _device in active: _real_device = active[_device]['device'] opts = list( set(opts + active[_device]['opts'] + active[_device]['superopts'])) active[real_name]['opts'].append('bind') break _device = os.path.dirname(_device) real_device = _real_device else: # Remote file systems act differently. if _device in active: opts = list( set(opts + active[_device]['opts'] + active[_device]['superopts'])) active[real_name]['opts'].append('bind') real_device = active[real_name]['device'] else: real_device = os.path.realpath(device) elif device.upper().startswith('UUID='): real_device = device.split('=')[1].strip('"').lower() elif device.upper().startswith('LABEL='): _label = device.split('=')[1] cmd = 'blkid -t LABEL={0}'.format(_label) res = __salt__['cmd.run_all']('{0}'.format(cmd)) if res['retcode'] > 0: ret['comment'] = 'Unable to find device with label {0}.'.format( _label) ret['result'] = False return ret else: # output is a list of entries like this: # /dev/sda: LABEL="<label>" UUID="<uuid>" UUID_SUB="<uuid>" TYPE="btrfs" # exact list of properties varies between filesystems, but we're # only interested in the device in the first column for line in res['stdout']: dev_with_label = line.split(':')[0] device_list.append(dev_with_label) real_device = device_list[0] else: real_device = device # LVS devices have 2 names under /dev: # /dev/mapper/vg--name-lv--name and /dev/vg-name/lv-name # No matter what name is used for mounting, # mount always displays the device as /dev/mapper/vg--name-lv--name # Note the double-dash escaping. # So, let's call that the canonical device name # We should normalize names of the /dev/vg-name/lv-name type to the canonical name lvs_match = re.match(r'^/dev/(?P<vg_name>[^/]+)/(?P<lv_name>[^/]+$)', device) if lvs_match: double_dash_escaped = dict( (k, re.sub(r'-', '--', v)) for k, v in six.iteritems(lvs_match.groupdict())) mapper_device = '/dev/mapper/{vg_name}-{lv_name}'.format( **double_dash_escaped) if os.path.exists(mapper_device): real_device = mapper_device # When included in a Salt state file, FUSE devices are prefaced by the # filesystem type and a hash, e.g. sshfs. In the mount list only the # hostname is included. So if we detect that the device is a FUSE device # then we remove the prefaced string so that the device in state matches # the device in the mount list. fuse_match = re.match(r'^\w+\#(?P<device_name>.+)', device) if fuse_match: if 'device_name' in fuse_match.groupdict(): real_device = fuse_match.group('device_name') if real_name in active: if 'superopts' not in active[real_name]: active[real_name]['superopts'] = [] if mount: device_list.append(active[real_name]['device']) device_list.append(os.path.realpath(device_list[0])) alt_device = active[real_name][ 'alt_device'] if 'alt_device' in active[real_name] else None uuid_device = active[real_name][ 'device_uuid'] if 'device_uuid' in active[real_name] else None label_device = active[real_name][ 'device_label'] if 'device_label' in active[real_name] else None if alt_device and alt_device not in device_list: device_list.append(alt_device) if uuid_device and uuid_device not in device_list: device_list.append(uuid_device) if label_device and label_device not in device_list: device_list.append(label_device) if opts: mount_invisible_options = [ '_netdev', 'actimeo', 'bg', 'comment', 'defaults', 'delay_connect', 'direct-io-mode', 'intr', 'loop', 'nointr', 'nobootwait', 'nofail', 'password', 'reconnect', 'retry', 'soft', 'auto', 'users', 'bind', 'nonempty', 'transform_symlinks', 'port', 'backup-volfile-servers', ] if extra_mount_invisible_options: mount_invisible_options.extend( extra_mount_invisible_options) if hidden_opts: mount_invisible_options = list( set(mount_invisible_options) | set(hidden_opts)) # options which are provided as key=value (e.g. password=Zohp5ohb) mount_invisible_keys = [ 'actimeo', 'comment', 'credentials', 'direct-io-mode', 'password', 'port', 'retry', 'secretfile', ] if extra_mount_invisible_keys: mount_invisible_keys.extend(extra_mount_invisible_keys) # Some filesystems have options which should not force a remount. mount_ignore_fs_keys = {'ramfs': ['size']} if extra_mount_ignore_fs_keys: mount_ignore_fs_keys.update(extra_mount_ignore_fs_keys) # Some options are translated once mounted mount_translate_options = { 'tcp': 'proto=tcp', 'udp': 'proto=udp', } if extra_mount_translate_options: mount_translate_options.update( extra_mount_translate_options) for opt in opts: if opt in mount_translate_options: opt = mount_translate_options[opt] keyval_option = opt.split('=')[0] if keyval_option in mount_invisible_keys: opt = keyval_option size_match = re.match( r'size=(?P<size_value>[0-9]+)(?P<size_unit>k|m|g)', opt) if size_match: converted_size = _size_convert(size_match) opt = "size={0}k".format(converted_size) # make cifs option user synonym for option username which is reported by /proc/mounts if fstype in ['cifs'] and opt.split('=')[0] == 'user': opt = "username={0}".format(opt.split('=')[1]) if opt.split('=')[0] in mount_ignore_fs_keys.get( fstype, []): opt = opt.split('=')[0] # convert uid/gid to numeric value from user/group name name_id_opts = {'uid': 'user.info', 'gid': 'group.info'} if opt.split('=')[0] in name_id_opts and len( opt.split('=')) > 1: _givenid = opt.split('=')[1] _param = opt.split('=')[0] _id = _givenid if not re.match('[0-9]+$', _givenid): _info = __salt__[name_id_opts[_param]](_givenid) if _info and _param in _info: _id = _info[_param] opt = _param + '=' + six.text_type(_id) _active_superopts = active[real_name].get('superopts', []) for _active_opt in _active_superopts: size_match = re.match( r'size=(?P<size_value>[0-9]+)(?P<size_unit>k|m|g)', _active_opt) if size_match: converted_size = _size_convert(size_match) opt = "size={0}k".format(converted_size) _active_superopts.remove(_active_opt) _active_opt = "size={0}k".format(converted_size) _active_superopts.append(_active_opt) if opt not in active[real_name]['opts'] \ and opt not in _active_superopts \ and opt not in mount_invisible_options \ and opt not in mount_ignore_fs_keys.get(fstype, []) \ and opt not in mount_invisible_keys: if __opts__['test']: ret['result'] = None ret['comment'] = "Remount would be forced because options ({0}) changed".format( opt) return ret else: # Some file systems require umounting and mounting if options change # add others to list that require similiar functionality if fstype in ['nfs', 'cvfs' ] or fstype.startswith('fuse'): ret['changes']['umount'] = "Forced unmount and mount because " \ + "options ({0}) changed".format(opt) unmount_result = __salt__['mount.umount']( real_name) if unmount_result is True: mount_result = __salt__['mount.mount']( real_name, device, mkmnt=mkmnt, fstype=fstype, opts=opts) ret['result'] = mount_result else: ret['result'] = False ret['comment'] = 'Unable to unmount {0}: {1}.'.format( real_name, unmount_result) return ret else: ret['changes']['umount'] = "Forced remount because " \ + "options ({0}) changed".format(opt) remount_result = __salt__['mount.remount']( real_name, device, mkmnt=mkmnt, fstype=fstype, opts=opts) ret['result'] = remount_result # Cleanup after the remount, so we # don't write remount into fstab if 'remount' in opts: opts.remove('remount') # Update the cache update_mount_cache = True mount_cache = __salt__['mount.read_mount_cache'](real_name) if 'opts' in mount_cache: _missing = [ opt for opt in mount_cache['opts'] if opt not in opts ] if _missing: if __opts__['test']: ret['result'] = None ret['comment'] = ('Remount would be forced because' ' options ({0})' 'changed'.format( ','.join(_missing))) return ret else: # Some file systems require umounting and mounting if options change # add others to list that require similiar functionality if fstype in ['nfs', 'cvfs' ] or fstype.startswith('fuse'): ret['changes']['umount'] = "Forced unmount and mount because " \ + "options ({0}) changed".format(opt) unmount_result = __salt__['mount.umount']( real_name) if unmount_result is True: mount_result = __salt__['mount.mount']( real_name, device, mkmnt=mkmnt, fstype=fstype, opts=opts) ret['result'] = mount_result else: ret['result'] = False ret['comment'] = 'Unable to unmount {0}: {1}.'.format( real_name, unmount_result) return ret else: ret['changes']['umount'] = "Forced remount because " \ + "options ({0}) changed".format(opt) remount_result = __salt__['mount.remount']( real_name, device, mkmnt=mkmnt, fstype=fstype, opts=opts) ret['result'] = remount_result # Cleanup after the remount, so we # don't write remount into fstab if 'remount' in opts: opts.remove('remount') update_mount_cache = True else: update_mount_cache = True if real_device not in device_list: # name matches but device doesn't - need to umount _device_mismatch_is_ignored = None for regex in list(device_name_regex): for _device in device_list: if re.match(regex, _device): _device_mismatch_is_ignored = _device break if __opts__['test']: ret['result'] = None ret['comment'] = "An umount would have been forced " \ + "because devices do not match. Watched: " \ + device elif _device_mismatch_is_ignored: ret['result'] = True ret['comment'] = "An umount will not be forced " \ + "because device matched device_name_regex: " \ + _device_mismatch_is_ignored else: ret['changes']['umount'] = "Forced unmount because devices " \ + "don't match. Wanted: " + device if real_device != device: ret['changes']['umount'] += " (" + real_device + ")" ret['changes']['umount'] += ", current: " + ', '.join( device_list) out = __salt__['mount.umount'](real_name, user=user) active = __salt__['mount.active'](extended=True) if real_name in active: ret['comment'] = "Unable to unmount" ret['result'] = None return ret update_mount_cache = True else: ret['comment'] = 'Target was already mounted' # using a duplicate check so I can catch the results of a umount if real_name not in active: if mount: # The mount is not present! Mount it if __opts__['test']: ret['result'] = None if os.path.exists(name): ret['comment'] = '{0} would be mounted'.format(name) elif mkmnt: ret['comment'] = '{0} would be created and mounted'.format( name) else: ret['comment'] = '{0} does not exist and would not be created'.format( name) return ret if not os.path.exists(name) and not mkmnt: ret['result'] = False ret['comment'] = 'Mount directory is not present' return ret out = __salt__['mount.mount'](name, device, mkmnt, fstype, opts, user=user) active = __salt__['mount.active'](extended=True) update_mount_cache = True if isinstance(out, string_types): # Failed to (re)mount, the state has failed! ret['comment'] = out ret['result'] = False return ret elif real_name in active: # (Re)mount worked! ret['comment'] = 'Target was successfully mounted' ret['changes']['mount'] = True elif not os.path.exists(name): if __opts__['test']: ret['result'] = None if mkmnt: ret['comment'] = '{0} would be created, but not mounted'.format( name) else: ret['comment'] = '{0} does not exist and would neither be created nor mounted'.format( name) elif mkmnt: __salt__['file.mkdir'](name, user=user) ret['comment'] = '{0} was created, not mounted'.format(name) else: ret['comment'] = '{0} not present and not mounted'.format(name) else: if __opts__['test']: ret['result'] = None ret['comment'] = '{0} would not be mounted'.format(name) else: ret['comment'] = '{0} not mounted'.format(name) if persist: if '/etc/fstab' == config: # Override default for Mac OS if __grains__['os'] in ['MacOS', 'Darwin']: config = "/etc/auto_salt" # Override default for AIX elif 'AIX' in __grains__['os']: config = "/etc/filesystems" if __opts__['test']: if __grains__['os'] in ['MacOS', 'Darwin']: out = __salt__['mount.set_automaster'](name, device, fstype, opts, config, test=True) elif __grains__['os'] in ['AIX']: out = __salt__['mount.set_filesystems'](name, device, fstype, opts, mount, config, test=True, match_on=match_on) else: out = __salt__['mount.set_fstab'](name, device, fstype, opts, dump, pass_num, config, test=True, match_on=match_on) if out != 'present': ret['result'] = None if out == 'new': if mount: comment = ('{0} is mounted, but needs to be ' 'written to the fstab in order to be ' 'made persistent.').format(name) else: comment = ('{0} needs to be ' 'written to the fstab in order to be ' 'made persistent.').format(name) elif out == 'change': if mount: comment = ('{0} is mounted, but its fstab entry ' 'must be updated.').format(name) else: comment = ('The {0} fstab entry ' 'must be updated.').format(name) else: ret['result'] = False comment = ('Unable to detect fstab status for ' 'mount point {0} due to unexpected ' 'output \'{1}\' from call to ' 'mount.set_fstab. This is most likely ' 'a bug.').format(name, out) if 'comment' in ret: ret['comment'] = '{0}. {1}'.format(ret['comment'], comment) else: ret['comment'] = comment return ret else: if __grains__['os'] in ['MacOS', 'Darwin']: out = __salt__['mount.set_automaster'](name, device, fstype, opts, config) elif __grains__['os'] in ['AIX']: out = __salt__['mount.set_filesystems'](name, device, fstype, opts, mount, config, match_on=match_on) else: out = __salt__['mount.set_fstab'](name, device, fstype, opts, dump, pass_num, config, match_on=match_on) if update_mount_cache: cache_result = __salt__['mount.write_mount_cache'](real_name, device, mkmnt=mkmnt, fstype=fstype, mount_opts=opts) if out == 'present': ret['comment'] += '. Entry already exists in the fstab.' return ret if out == 'new': ret['changes']['persist'] = 'new' ret['comment'] += '. Added new entry to the fstab.' return ret if out == 'change': ret['changes']['persist'] = 'update' ret['comment'] += '. Updated the entry in the fstab.' return ret if out == 'bad config': ret['result'] = False ret['comment'] += '. However, the fstab was not found.' return ret return ret
def delete_key( keyid=None, fingerprint=None, delete_secret=False, user=None, gnupghome=None ): """ Get a key from the GPG keychain keyid The keyid of the key to be deleted. fingerprint The fingerprint of the key to be deleted. delete_secret Whether to delete a corresponding secret key prior to deleting the public key. Secret keys must be deleted before deleting any corresponding public keys. user Which user's keychain to access, defaults to user Salt is running as. Passing the user as ``salt`` will set the GnuPG home directory to the ``/etc/salt/gpgkeys``. gnupghome Specify the location where GPG keyring and related files are stored. CLI Example: .. code-block:: bash salt '*' gpg.delete_key keyid=3FAD9F1E salt '*' gpg.delete_key fingerprint=53C96788253E58416D20BCD352952C84C3252192 salt '*' gpg.delete_key keyid=3FAD9F1E user=username salt '*' gpg.delete_key keyid=3FAD9F1E user=username delete_secret=True """ ret = {"res": True, "message": ""} if fingerprint and keyid: ret["res"] = False ret["message"] = "Only specify one argument, fingerprint or keyid" return ret if not fingerprint and not keyid: ret["res"] = False ret["message"] = "Required argument, fingerprint or keyid" return ret gpg = _create_gpg(user, gnupghome) key = get_key(keyid, fingerprint, user) if key: fingerprint = key["fingerprint"] skey = get_secret_key(keyid, fingerprint, user) if skey and not delete_secret: ret["res"] = False ret[ "message" ] = "Secret key exists, delete first or pass delete_secret=True." return ret elif ( skey and delete_secret and six.text_type(gpg.delete_keys(fingerprint, True)) == "ok" ): # Delete the secret key ret["message"] = "Secret key for {0} deleted\n".format(fingerprint) # Delete the public key if six.text_type(gpg.delete_keys(fingerprint)) == "ok": ret["message"] += "Public key for {0} deleted".format(fingerprint) ret["res"] = True return ret else: ret["res"] = False ret["message"] = "Key not available in keychain." return ret
def verify( text=None, user=None, filename=None, gnupghome=None, signature=None, trustmodel=None ): """ Verify a message or file text The text to verify. filename The filename to verify. user Which user's keychain to access, defaults to user Salt is running as. Passing the user as ``salt`` will set the GnuPG home directory to the ``/etc/salt/gpgkeys``. gnupghome Specify the location where GPG keyring and related files are stored. signature Specify the filename of a detached signature. .. versionadded:: 2018.3.0 trustmodel Explicitly define the used trust model. One of: - pgp - classic - tofu - tofu+pgp - direct - always - auto .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' gpg.verify text='Hello there. How are you?' salt '*' gpg.verify filename='/path/to/important.file' salt '*' gpg.verify filename='/path/to/important.file' use_passphrase=True salt '*' gpg.verify filename='/path/to/important.file' trustmodel=direct """ gpg = _create_gpg(user) trustmodels = ("pgp", "classic", "tofu", "tofu+pgp", "direct", "always", "auto") if trustmodel and trustmodel not in trustmodels: msg = "Invalid trustmodel defined: {}. Use one of: {}".format( trustmodel, ", ".join(trustmodels) ) log.warn(msg) return {"res": False, "message": msg} extra_args = [] if trustmodel: extra_args.extend(["--trust-model", trustmodel]) if text: verified = gpg.verify(text, extra_args=extra_args) elif filename: if signature: # need to call with fopen instead of flopen due to: # https://bitbucket.org/vinay.sajip/python-gnupg/issues/76/verify_file-closes-passed-file-handle with salt.utils.files.fopen(signature, "rb") as _fp: verified = gpg.verify_file(_fp, filename, extra_args=extra_args) else: with salt.utils.files.flopen(filename, "rb") as _fp: verified = gpg.verify_file(_fp, extra_args=extra_args) else: raise SaltInvocationError("filename or text must be passed.") ret = {} if verified.trust_level is not None: ret["res"] = True ret["username"] = verified.username ret["key_id"] = verified.key_id ret["trust_level"] = VERIFY_TRUST_LEVELS[six.text_type(verified.trust_level)] ret["message"] = "The signature is verified." else: ret["res"] = False ret["message"] = "The signature could not be verified." return ret
def running(name, image, tag='latest', container=None, command=None, hostname=None, user=None, detach=True, stdin_open=False, tty=False, mem_limit=None, ports=None, environment=None, dns=None, volumes=None, volumes_from=None, start=True, cap_add=None, cap_drop=None, privileged=None, lxc_conf=None, network_mode=None, check_is_running=True, publish_all_ports=False, links=None, restart_policy=None, cpu_shares=None, cpuset=None, kill_signal=None, *args, **kwargs): ''' Ensure that a container is running. If the container does not exist, it will be created from the specified image. (`docker run`) name / container Name for the container image Image from which to build this container tag tag of the image (defaults to 'latest') environment Environment variables for the container, either - a mapping of key, values - a list of mappings of key, values ports List of ports definitions, either: - a port to map - a mapping of mapping portInHost : PortInContainer .. code-block:: yaml - ports: - "5000/tcp": HostIp: "" HostPort: "5000" publish_all_ports Publish all ports from the port list (default is false, only meaningful if port does not contain portinhost:portincontainer mapping) volumes List of volumes to mount or create in the container (like ``-v`` of ``docker run`` command), mapping host directory to container directory. To specify a volume in the container in terse list format: .. code-block:: yaml - volumes: - "/var/log/service" # container-only volume - "/srv/timezone:/etc/timezone" # bound volume - "/usr/local/etc/passwd:/etc/passwd:ro" # read-only bound volume You can also use the short dictionary form (note that the notion of source:target from docker is preserved): .. code-block:: yaml - volumes: - /var/log/service: /var/log/service # mandatory read-write implied Or, alternatively, to specify read-only mounting, use the extended form: .. code-block:: yaml - volumes: - /home/user1: bind: /mnt/vol2 ro: True - /var/www: bind: /mnt/vol1 ro: False Or (for backwards compatibility) another dict style: .. code-block:: yaml - volumes: /home/user1: bind: /mnt/vol2 ro: True /var/www: bind: /mnt/vol1 ro: False volumes_from List of containers to share volumes with dns List of DNS servers. .. code-block:: yaml - dns: - 127.0.0.1 network_mode - 'bridge': creates a new network stack for the container on the docker bridge - 'none': no networking for this container - 'container:[name|id]': reuses another container network stack) - 'host': use the host network stack inside the container .. code-block:: yaml - network_mode: host restart_policy Restart policy to apply when a container exits (no, on-failure[:max-retry], always) .. code-block:: yaml - restart_policy: MaximumRetryCount: 5 Name: on-failure cap_add List of capabilities to add in a container. cap_drop List of capabilities to drop in a container. check_is_running Enable checking if a container should run or not. Useful for data-only containers that must be linked to another one. e.g. nginx <- static-files cpu_shares CPU shares (relative weight) .. code-block:: yaml - cpu_shares: 2 cpuset CPUs in which to allow execution ('0-3' or '0,1') .. code-block:: yaml - cpuset: '0-3' kill_signal If defined, its value will be sent as a kill signal to the running container. i.e. It will use client.kill(signal=kill_signal) instead of client.restart(), when the state is triggered by a watcher requisite. possible use case: Soft reload of nginx .. code-block:: yaml nginx: docker.running: - image: some-fictional-registry.com/nginx - tag: latest - kill_signal: SIGHUP - watch: - file: /etc/nginx/nginx.conf This state will ask nginx to reload (instead of restart) each time the /etc/nginx/nginx.conf is modified. .. versionadded:: 2015.8.0 For other parameters, see salt.modules.dockerio execution module and the docker-py python bindings for docker documentation <https://github.com/dotcloud/docker-py#api>`_ for `docker.create_container`. .. note:: This command does not verify that the named container is running the specified image. ''' if container is None: container = name ins_image = __salt__['docker.inspect_image'] ins_container = __salt__['docker.inspect_container'] create = __salt__['docker.create_container'] image_name = _get_image_name(image, tag) iinfos = ins_image(image_name) image_exists = iinfos['status'] if not image_exists: return _invalid( comment='image "{0}" does not exists'.format(image_name)) cinfos = ins_container(name) already_exists = cinfos['status'] already_exists_with_same_image = ( # if container is known by name, already_exists # and the container is based on expected image, and cinfos['out']['Image'] == iinfos['out']['Id'] # then assume it already exists. ) is_running = __salt__['docker.is_running'](container) # if container exists but is not started, try to start it if already_exists_with_same_image and (is_running or not start): return _valid(comment='container {0!r} already exists'.format(name)) if not already_exists_with_same_image and already_exists: # Outdated container: It means it runs against an old image. # We're gonna have to stop and remove the old container, to let # the name available for the new one. if __opts__['test']: comment = 'Will replace outdated container {0!r}'.format(name) return _ret_status(name=name, comment=comment) if is_running: stop_status = __salt__['docker.stop'](name) if not stop_status['status']: return _invalid( comment='Failed to stop outdated container {0!r}'.format( name)) remove_status = __salt__['docker.remove_container'](name) if not remove_status['status']: return _invalid( comment='Failed to remove outdated container {0!r}'.format( name)) already_exists = False # now it's clear, the name is available for the new container if __opts__['test']: comment = 'Will create container {0!r}'.format(name) return _ret_status(name=name, comment=comment) # parse input data exposeports, bindports, contvolumes, bindvolumes, denvironment, changes = [], {}, [], {}, {}, [] if not ports: ports = {} if not volumes: volumes = {} if not volumes_from: volumes_from = [] if isinstance(environment, dict): for key in environment: denvironment[six.text_type(key)] = six.text_type(environment[key]) if isinstance(environment, list): for var in environment: if isinstance(var, dict): for key in var: denvironment[six.text_type(key)] = six.text_type(var[key]) if isinstance(volumes, dict): bindvolumes = volumes if isinstance(volumes, list): for vol in volumes: if isinstance(vol, dict): # get source as the dict key source = list(vol.keys())[0] # then find target if isinstance(vol[source], dict): target = vol[source]['bind'] read_only = vol[source].get('ro', False) else: target = str(vol[source]) read_only = False bindvolumes[source] = {'bind': target, 'ro': read_only} else: # assume just an own volumes contvolumes.append(str(vol)) if isinstance(ports, dict): bindports = ports # in dict form all ports bind, so no need for exposeports if isinstance(ports, list): for port in ports: if isinstance(port, dict): container_port = list(port.keys())[0] # find target if isinstance(port[container_port], dict): host_port = port[container_port]['HostPort'] host_ip = port[container_port].get('HostIp', '0.0.0.0') else: host_port = str(port[container_port]) host_ip = '0.0.0.0' bindports[container_port] = { 'HostPort': host_port, 'HostIp': host_ip } else: # assume just a port to expose exposeports.append(str(port)) parsed_volumes = _parse_volumes(volumes) bindvolumes = parsed_volumes['bindvols'] contvolumes = parsed_volumes['contvols'] if not already_exists: kwargs = dict(command=command, hostname=hostname, user=user, detach=detach, stdin_open=stdin_open, tty=tty, mem_limit=mem_limit, ports=exposeports, environment=denvironment, dns=dns, binds=bindvolumes, volumes=contvolumes, name=name, cpu_shares=cpu_shares, cpuset=cpuset) out = create(image_name, **kwargs) # if container has been created, even if not started, we mark # it as installed try: cid = out['out']['info']['id'] log.debug(str(cid)) except Exception as e: changes.append('Container created') log.debug(str(e)) else: changes.append('Container {0} created'.format(cid)) if start: started = __salt__['docker.start'](name, binds=bindvolumes, port_bindings=bindports, lxc_conf=lxc_conf, publish_all_ports=publish_all_ports, links=links, privileged=privileged, dns=dns, volumes_from=volumes_from, network_mode=network_mode, restart_policy=restart_policy, cap_add=cap_add, cap_drop=cap_drop) if check_is_running: is_running = __salt__['docker.is_running'](name) log.debug("Docker-io running:" + str(started)) log.debug("Docker-io running:" + str(is_running)) if is_running: changes.append('Container {0!r} started.\n'.format(name)) else: return _invalid( comment=('Container {0!r} cannot be started\n{1!s}'.format( name, started['out'], ))) else: changes.append('Container {0!r} started.\n'.format(name)) return _valid(comment='\n'.join(changes), changes={name: True})
AUTOREMOVE = ''' Reading package lists... Done Building dependency tree Reading state information... Done 0 upgraded, 0 newly installed, 0 to remove and 0 not upgraded. ''' UPGRADE = ''' Reading package lists... Building dependency tree... Reading state information... 0 upgraded, 0 newly installed, 0 to remove and 0 not upgraded. ''' UNINSTALL = {'tmux': {'new': six.text_type(), 'old': '1.8-5'}} class AptPkgTestCase(TestCase, LoaderModuleMockMixin): ''' Test cases for salt.modules.aptpkg ''' def setup_loader_modules(self): return {aptpkg: {}} @patch( 'salt.modules.aptpkg.__salt__', { 'pkg_resource.version': MagicMock(return_value=LOWPKG_INFO['wget']['version']) }) def test_version(self):
def rotate(name, pattern=None, conf_file=default_conf, **kwargs): ''' Set up pattern for logging. name : string alias for entryname pattern : string alias for log_file conf_file : string optional path to alternative configuration file **kwargs : boolean|string|int optional additional flags and parameters .. note:: ``name`` and ``pattern`` were kept for backwards compatibility reasons. ``name`` is an alias for the ``entryname`` argument, ``pattern`` is an alias for ``log_file``. These aliasses wil only be used if the ``entryname`` and ``log_file`` arguments are not passed. For a full list of arguments see ```logadm.show_args```. CLI Example: .. code-block:: bash salt '*' logadm.rotate myapplog pattern='/var/log/myapp/*.log' count=7 salt '*' logadm.rotate myapplog log_file='/var/log/myapp/*.log' count=4 owner=myappd mode='0700' ''' ## cleanup kwargs kwargs = salt.utils.args.clean_kwargs(**kwargs) ## inject name into kwargs if 'entryname' not in kwargs and name and not name.startswith('/'): kwargs['entryname'] = name ## inject pattern into kwargs if 'log_file' not in kwargs: if pattern and pattern.startswith('/'): kwargs['log_file'] = pattern # NOTE: for backwards compatibility check if name is a path elif name and name.startswith('/'): kwargs['log_file'] = name ## build command log.debug("logadm.rotate - kwargs: %s", kwargs) command = "logadm -f {}".format(conf_file) for arg, val in kwargs.items(): if arg in option_toggles.values() and val: command = "{} {}".format( command, _arg2opt(arg), ) elif arg in option_flags.values(): command = "{} {} {}".format(command, _arg2opt(arg), _quote_args(six.text_type(val))) elif arg != 'log_file': log.warning("Unknown argument %s, don't know how to map this!", arg) if 'log_file' in kwargs: # NOTE: except from ```man logadm``` # If no log file name is provided on a logadm command line, the entry # name is assumed to be the same as the log file name. For example, # the following two lines achieve the same thing, keeping two copies # of rotated log files: # # % logadm -C2 -w mylog /my/really/long/log/file/name # % logadm -C2 -w /my/really/long/log/file/name if 'entryname' not in kwargs: command = "{} -w {}".format(command, _quote_args(kwargs['log_file'])) else: command = "{} {}".format(command, _quote_args(kwargs['log_file'])) log.debug("logadm.rotate - command: %s", command) result = __salt__['cmd.run_all'](command, python_shell=False) if result['retcode'] != 0: return dict(Error='Failed in adding log', Output=result['stderr']) return dict(Result='Success')
def set_(key, value, setting=None, conf_file=_DEFAULT_CONF): """ Set a new value for a specific configuration line. :param str key: The command or block to configure. :param str value: The command value or command of the block specified by the key parameter. :param str setting: The command value for the command specified by the value parameter. :param str conf_file: The logrotate configuration file. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' logrotate.set rotate 2 Can also be used to set a single value inside a multiline configuration block. For instance, to change rotate in the following block: .. code-block:: text /var/log/wtmp { monthly create 0664 root root rotate 1 } Use the following command: .. code-block:: bash salt '*' logrotate.set /var/log/wtmp rotate 2 This module also has the ability to scan files inside an include directory, and make changes in the appropriate file. """ conf = _parse_conf(conf_file) for include in conf["include files"]: if key in conf["include files"][include]: conf_file = os.path.join(conf["include"], include) new_line = six.text_type() kwargs = { "flags": 8, "backup": False, "path": conf_file, "pattern": "^{0}.*".format(key), "show_changes": False, } if setting is None: current_value = conf.get(key, False) if isinstance(current_value, dict): error_msg = ( "Error: {0} includes a dict, and a specific setting inside the " "dict was not declared").format(key) raise SaltInvocationError(error_msg) if value == current_value: _LOG.debug("Command '%s' already has: %s", key, value) return True # This is the new config line that will be set if value is True: new_line = key elif value: new_line = "{0} {1}".format(key, value) kwargs.update({"prepend_if_not_found": True}) else: stanza = conf.get(key, dict()) if stanza and not isinstance(stanza, dict): error_msg = ("Error: A setting for a dict was declared, but the " "configuration line given is not a dict") raise SaltInvocationError(error_msg) if setting == stanza.get(value, False): _LOG.debug("Command '%s' already has: %s", value, setting) return True # We're going to be rewriting an entire stanza if setting: stanza[value] = setting else: del stanza[value] new_line = _dict_to_stanza(key, stanza) kwargs.update({ "pattern": "^{0}.*?{{.*?}}".format(key), "flags": 24, "append_if_not_found": True, }) kwargs.update({"repl": new_line}) _LOG.debug("Setting file '%s' line: %s", conf_file, new_line) return __salt__["file.replace"](**kwargs)
def run(self): ''' Execute the salt command line ''' import salt.client self.parse_args() if self.config['log_level'] not in ('quiet', ): # Setup file logging! self.setup_logfile_logger() verify_log(self.config) try: # We don't need to bail on config file permission errors # if the CLI process is run with the -a flag skip_perm_errors = self.options.eauth != '' self.local_client = salt.client.get_local_client( self.get_config_file_path(), skip_perm_errors=skip_perm_errors, auto_reconnect=True) except SaltClientError as exc: self.exit(2, '{0}\n'.format(exc)) return if self.options.batch or self.options.static: # _run_batch() will handle all output and # exit with the appropriate error condition # Execution will not continue past this point # in batch mode. self._run_batch() return if self.options.preview_target: minion_list = self._preview_target() self._output_ret(minion_list, self.config.get('output', 'nested')) return if self.options.timeout <= 0: self.options.timeout = self.local_client.opts['timeout'] kwargs = { 'tgt': self.config['tgt'], 'fun': self.config['fun'], 'arg': self.config['arg'], 'timeout': self.options.timeout, 'show_timeout': self.options.show_timeout, 'show_jid': self.options.show_jid } if 'token' in self.config: import salt.utils.files try: with salt.utils.files.fopen( os.path.join(self.config['cachedir'], '.root_key'), 'r') as fp_: kwargs['key'] = fp_.readline() except IOError: kwargs['token'] = self.config['token'] kwargs['delimiter'] = self.options.delimiter if self.selected_target_option: kwargs['tgt_type'] = self.selected_target_option else: kwargs['tgt_type'] = 'glob' # If batch_safe_limit is set, check minions matching target and # potentially switch to batch execution if self.options.batch_safe_limit > 1: if len(self._preview_target()) >= self.options.batch_safe_limit: salt.utils.stringutils.print_cli( '\nNOTICE: Too many minions targeted, switching to batch execution.' ) self.options.batch = self.options.batch_safe_size self._run_batch() return if getattr(self.options, 'return'): kwargs['ret'] = getattr(self.options, 'return') if getattr(self.options, 'return_config'): kwargs['ret_config'] = getattr(self.options, 'return_config') if getattr(self.options, 'return_kwargs'): kwargs['ret_kwargs'] = yamlify_arg( getattr(self.options, 'return_kwargs')) if getattr(self.options, 'module_executors'): kwargs['module_executors'] = yamlify_arg( getattr(self.options, 'module_executors')) if getattr(self.options, 'executor_opts'): kwargs['executor_opts'] = yamlify_arg( getattr(self.options, 'executor_opts')) if getattr(self.options, 'metadata'): kwargs['metadata'] = yamlify_arg(getattr(self.options, 'metadata')) # If using eauth and a token hasn't already been loaded into # kwargs, prompt the user to enter auth credentials if 'token' not in kwargs and 'key' not in kwargs and self.options.eauth: # This is expensive. Don't do it unless we need to. import salt.auth resolver = salt.auth.Resolver(self.config) res = resolver.cli(self.options.eauth) if self.options.mktoken and res: tok = resolver.token_cli(self.options.eauth, res) if tok: kwargs['token'] = tok.get('token', '') if not res: sys.stderr.write('ERROR: Authentication failed\n') sys.exit(2) kwargs.update(res) kwargs['eauth'] = self.options.eauth if self.config['async']: jid = self.local_client.cmd_async(**kwargs) salt.utils.stringutils.print_cli( 'Executed command with job ID: {0}'.format(jid)) return # local will be None when there was an error if not self.local_client: return retcodes = [] errors = [] try: if self.options.subset: cmd_func = self.local_client.cmd_subset kwargs['sub'] = self.options.subset kwargs['cli'] = True else: cmd_func = self.local_client.cmd_cli if self.options.progress: kwargs['progress'] = True self.config['progress'] = True ret = {} for progress in cmd_func(**kwargs): out = 'progress' try: self._progress_ret(progress, out) except LoaderError as exc: raise SaltSystemExit(exc) if 'return_count' not in progress: ret.update(progress) self._progress_end(out) self._print_returns_summary(ret) elif self.config['fun'] == 'sys.doc': ret = {} out = '' for full_ret in self.local_client.cmd_cli(**kwargs): ret_, out, retcode = self._format_ret(full_ret) ret.update(ret_) self._output_ret(ret, out, retcode=retcode) else: if self.options.verbose: kwargs['verbose'] = True ret = {} for full_ret in cmd_func(**kwargs): try: ret_, out, retcode = self._format_ret(full_ret) retcodes.append(retcode) self._output_ret(ret_, out, retcode=retcode) ret.update(full_ret) except KeyError: errors.append(full_ret) # Returns summary if self.config['cli_summary'] is True: if self.config['fun'] != 'sys.doc': if self.options.output is None: self._print_returns_summary(ret) self._print_errors_summary(errors) # NOTE: Return code is set here based on if all minions # returned 'ok' with a retcode of 0. # This is the final point before the 'salt' cmd returns, # which is why we set the retcode here. if not all(exit_code == salt.defaults.exitcodes.EX_OK for exit_code in retcodes): sys.stderr.write( 'ERROR: Minions returned with non-zero exit code\n') sys.exit(salt.defaults.exitcodes.EX_GENERIC) except (AuthenticationError, AuthorizationError, SaltInvocationError, EauthAuthenticationError, SaltClientError) as exc: ret = six.text_type(exc) self._output_ret(ret, '', retcode=1)
def latest(name, target=None, rev=None, user=None, username=None, password=None, force=False, externals=True, trust=False): ''' Checkout or update the working directory to the latest revision from the remote repository. name Address of the name repository as passed to "svn checkout" target Name of the target directory where the checkout will put the working directory rev : None The name revision number to checkout. Enable "force" if the directory already exists. user : None Name of the user performing repository management operations username : None The user to access the name repository with. The svn default is the current user password Connect to the Subversion server with this password .. versionadded:: 0.17.0 force : False Continue if conflicts are encountered externals : True Change to False to not checkout or update externals trust : False Automatically trust the remote server. SVN's --trust-server-cert ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not target: return _fail(ret, 'Target option is required') svn_cmd = 'svn.checkout' cwd, basename = os.path.split(target) opts = tuple() if os.path.exists(target) and not os.path.isdir(target): return _fail( ret, 'The path "{0}" exists and is not ' 'a directory.'.format(target)) if __opts__['test']: if rev: new_rev = six.text_type(rev) else: new_rev = 'HEAD' if not os.path.exists(target): return _neutral_test( ret, ('{0} doesn\'t exist and is set to be checked out at revision ' + new_rev + '.').format(target)) try: current_info = __salt__['svn.info'](cwd, target, user=user, username=username, password=password, fmt='dict') svn_cmd = 'svn.diff' except exceptions.CommandExecutionError: return _fail( ret, ('{0} exists but is not a svn working copy.').format(target)) current_rev = current_info[0]['Revision'] opts += ('-r', current_rev + ':' + new_rev) if trust: opts += ('--trust-server-cert', ) out = __salt__[svn_cmd](cwd, target, user, username, password, *opts) return _neutral_test(ret, ('{0}').format(out)) try: current_info = __salt__['svn.info'](cwd, target, user=user, username=username, password=password, fmt='dict') svn_cmd = 'svn.update' except exceptions.CommandExecutionError: pass if rev: opts += ('-r', six.text_type(rev)) if force: opts += ('--force', ) if externals is False: opts += ('--ignore-externals', ) if trust: opts += ('--trust-server-cert', ) if svn_cmd == 'svn.update': out = __salt__[svn_cmd](cwd, basename, user, username, password, *opts) current_rev = current_info[0]['Revision'] new_rev = __salt__['svn.info'](cwd=target, targets=None, user=user, username=username, password=password, fmt='dict')[0]['Revision'] if current_rev != new_rev: ret['changes']['revision'] = "{0} => {1}".format( current_rev, new_rev) else: out = __salt__[svn_cmd](cwd, name, basename, user, username, password, *opts) ret['changes']['new'] = name ret['changes']['revision'] = __salt__['svn.info']( cwd=target, targets=None, user=user, username=username, password=password, fmt='dict')[0]['Revision'] ret['comment'] = out return ret
def _format_port(port_num, proto): return six.text_type(port_num) + '/udp' if proto.lower( ) == 'udp' else port_num
def run(name, onlyif=None, unless=None, creates=None, cwd=None, root=None, runas=None, shell=None, env=None, prepend_path=None, stateful=False, umask=None, output_loglevel='debug', hide_output=False, timeout=None, ignore_timeout=False, use_vt=False, success_retcodes=None, **kwargs): ''' Run a command if certain circumstances are met. Use ``cmd.wait`` if you want to use the ``watch`` requisite. name The command to execute, remember that the command will execute with the path and permissions of the salt-minion. onlyif A command to run as a check, run the named command only if the command passed to the ``onlyif`` option returns a zero exit status unless A command to run as a check, only run the named command if the command passed to the ``unless`` option returns a non-zero exit status cwd The current working directory to execute the command in, defaults to /root root Path to the root of the jail to use. If this parameter is set, the command will run inside a chroot runas The user name to run the command as shell The shell to use for execution, defaults to the shell grain env A list of environment variables to be set prior to execution. Example: .. code-block:: yaml script-foo: cmd.run: - env: - BATCH: 'yes' .. warning:: The above illustrates a common PyYAML pitfall, that **yes**, **no**, **on**, **off**, **true**, and **false** are all loaded as boolean ``True`` and ``False`` values, and must be enclosed in quotes to be used as strings. More info on this (and other) PyYAML idiosyncrasies can be found :ref:`here <yaml-idiosyncrasies>`. Variables as values are not evaluated. So $PATH in the following example is a literal '$PATH': .. code-block:: yaml script-bar: cmd.run: - env: "PATH=/some/path:$PATH" One can still use the existing $PATH by using a bit of Jinja: .. code-block:: jinja {% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %} mycommand: cmd.run: - name: ls -l / - env: - PATH: {{ [current_path, '/my/special/bin']|join(':') }} prepend_path $PATH segment to prepend (trailing ':' not necessary) to $PATH. This is an easier alternative to the Jinja workaround. .. versionadded:: 2018.3.0 stateful The command being executed is expected to return data about executing a state. For more information, see the :ref:`stateful-argument` section. umask The umask (in octal) to use when running the command. output_loglevel : debug Control the loglevel at which the output from the command is logged to the minion log. .. note:: The command being run will still be logged at the ``debug`` loglevel regardless, unless ``quiet`` is used for this value. hide_output : False Suppress stdout and stderr in the state's results. .. note:: This is separate from ``output_loglevel``, which only handles how Salt logs to the minion log. .. versionadded:: 2018.3.0 timeout If the command has not terminated after timeout seconds, send the subprocess sigterm, and if sigterm is ignored, follow up with sigkill ignore_timeout Ignore the timeout of commands, which is useful for running nohup processes. .. versionadded:: 2015.8.0 creates Only run if the file specified by ``creates`` do not exist. If you specify a list of files then this state will only run if **any** of the files do not exist. .. versionadded:: 2014.7.0 use_vt : False Use VT utils (saltstack) to stream the command output more interactively to the console and the logs. This is experimental. bg : False If ``True``, run command in background and do not await or deliver its results. .. versionadded:: 2016.3.6 success_retcodes: This parameter will be allow a list of non-zero return codes that should be considered a success. If the return code returned from the run matches any in the provided list, the return code will be overridden with zero. .. versionadded:: 2019.2.0 .. note:: cmd.run supports the usage of ``reload_modules``. This functionality allows you to force Salt to reload all modules. You should only use ``reload_modules`` if your cmd.run does some sort of installation (such as ``pip``), if you do not reload the modules future items in your state which rely on the software being installed will fail. .. code-block:: yaml getpip: cmd.run: - name: /usr/bin/python /usr/local/sbin/get-pip.py - unless: which pip - require: - pkg: python - file: /usr/local/sbin/get-pip.py - reload_modules: True ''' ### NOTE: The keyword arguments in **kwargs are passed directly to the ### ``cmd.run_all`` function and cannot be removed from the function ### definition, otherwise the use of unsupported arguments in a ### ``cmd.run`` state will result in a traceback. ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} test_name = None if not isinstance(stateful, list): stateful = stateful is True elif isinstance(stateful, list) and 'test_name' in stateful[0]: test_name = stateful[0]['test_name'] if __opts__['test'] and test_name: name = test_name # Need the check for None here, if env is not provided then it falls back # to None and it is assumed that the environment is not being overridden. if env is not None and not isinstance(env, (list, dict)): ret['comment'] = ('Invalidly-formatted \'env\' parameter. See ' 'documentation.') return ret cmd_kwargs = copy.deepcopy(kwargs) cmd_kwargs.update({ 'cwd': cwd, 'root': root, 'runas': runas, 'use_vt': use_vt, 'shell': shell or __grains__['shell'], 'env': env, 'prepend_path': prepend_path, 'umask': umask, 'output_loglevel': output_loglevel, 'hide_output': hide_output, 'success_retcodes': success_retcodes }) cret = mod_run_check(cmd_kwargs, onlyif, unless, creates) if isinstance(cret, dict): ret.update(cret) return ret if __opts__['test'] and not test_name: ret['result'] = None ret['comment'] = 'Command "{0}" would have been executed'.format(name) return _reinterpreted_state(ret) if stateful else ret if cwd and not os.path.isdir(cwd): ret['comment'] = ('Desired working directory "{0}" ' 'is not available').format(cwd) return ret # Wow, we passed the test, run this sucker! try: run_cmd = 'cmd.run_all' if not root else 'cmd.run_chroot' cmd_all = __salt__[run_cmd](cmd=name, timeout=timeout, python_shell=True, **cmd_kwargs) except Exception as err: ret['comment'] = six.text_type(err) return ret ret['changes'] = cmd_all ret['result'] = not bool(cmd_all['retcode']) ret['comment'] = 'Command "{0}" run'.format(name) # Ignore timeout errors if asked (for nohups) and treat cmd as a success if ignore_timeout: trigger = 'Timed out after' if ret['changes'].get( 'retcode') == 1 and trigger in ret['changes'].get('stdout'): ret['changes']['retcode'] = 0 ret['result'] = True if stateful: ret = _reinterpreted_state(ret) if __opts__['test'] and cmd_all['retcode'] == 0 and ret['changes']: ret['result'] = None return ret