def get_instance(name, provider=None): ''' Return details on an instance. Similar to the cloud action show_instance but returns only the instance details. CLI Example: .. code-block:: bash salt '*' cloud.get_instance myinstance SLS Example: .. code-block:: bash {{ salt['cloud.get_instance']('myinstance')['mac_address'] }} ''' data = action(fun='show_instance', names=[name], provider=provider) info = salt.utils.simple_types_filter(data) try: # get the first: [alias][driver][vm_name] info = next(six.itervalues(next(six.itervalues(next(six.itervalues(info)))))) except AttributeError: return None return info
def execution(): ''' Collect all the sys.doc output from each minion and return the aggregate CLI Example: .. code-block:: bash salt-run doc.execution ''' client = salt.client.get_local_client(__opts__['conf_file']) docs = {} try: for ret in client.cmd_iter('*', 'sys.doc', timeout=__opts__['timeout']): for v in six.itervalues(ret): docs.update(v) except SaltClientError as exc: print exc return [] i = itertools.chain.from_iterable([i.items() for i in six.itervalues(docs)]) ret = dict(list(i)) return ret
def _alarms_present(name, alarms, alarms_from_pillar, region, key, keyid, profile): '''helper method for present. ensure that cloudwatch_alarms are set''' # load data from alarms_from_pillar tmp = __salt__['config.option'](alarms_from_pillar, {}) # merge with data from alarms if alarms: tmp = dictupdate.update(tmp, alarms) # set alarms, using boto_cloudwatch_alarm.present merged_return_value = {'name': name, 'result': True, 'comment': '', 'changes': {}} for _, info in six.iteritems(tmp): # add elb to name and description info["name"] = name + " " + info["name"] info["attributes"]["description"] = name + " " + info["attributes"]["description"] # add dimension attribute info["attributes"]["dimensions"] = {"LoadBalancerName": [name]} # set alarm kwargs = { "name": info["name"], "attributes": info["attributes"], "region": region, "key": key, "keyid": keyid, "profile": profile, } ret = __salt__["state.single"]('boto_cloudwatch_alarm.present', **kwargs) results = next(six.itervalues(ret)) if not results["result"]: merged_return_value["result"] = results["result"] if results.get("changes", {}) != {}: merged_return_value["changes"][info["name"]] = results["changes"] if "comment" in results: merged_return_value["comment"] += results["comment"] return merged_return_value
def dir_list(load): ''' Return a list of all directories on the master ''' if 'env' in load: salt.utils.warn_until( 'Boron', 'Passing a salt environment should be done using \'saltenv\' ' 'not \'env\'. This functionality will be removed in Salt Boron.' ) load['saltenv'] = load.pop('env') ret = [] if 'saltenv' not in load: return ret saltenv = load['saltenv'] metadata = _init() if not metadata or saltenv not in metadata: return ret # grab all the dirs from the buckets cache file for dirs in six.itervalues(_find_dirs(metadata[saltenv])): # trim env and trailing slash dirs = _trim_env_off_path(dirs, saltenv, trim_slash=True) # remove empty string left by the base env dir in single bucket mode ret += [_f for _f in dirs if _f] return ret
def ip_addrs6(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv6 addresses assigned to the host. ::1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' ret = set() ifaces = interface_data \ if isinstance(interface_data, dict) \ else interfaces() if interface is None: target_ifaces = ifaces else: target_ifaces = dict([(k, v) for k, v in six.iteritems(ifaces) if k == interface]) if not target_ifaces: log.error('Interface {0} not found.'.format(interface)) for ipv6_info in six.itervalues(target_ifaces): for ipv6 in ipv6_info.get('inet6', []): if include_loopback or ipv6['address'] != '::1': ret.add(ipv6['address']) for secondary in ipv6_info.get('secondary', []): addr = secondary.get('address') if addr and secondary.get('type') == 'inet6': if include_loopback or addr != '::1': ret.add(addr) return sorted(list(ret))
def ip_addrs(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv4 addresses assigned to the host. 127.0.0.1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' ret = set() ifaces = interface_data \ if isinstance(interface_data, dict) \ else interfaces() if interface is None: target_ifaces = ifaces else: target_ifaces = dict([(k, v) for k, v in six.iteritems(ifaces) if k == interface]) if not target_ifaces: log.error('Interface {0} not found.'.format(interface)) for ipv4_info in six.itervalues(target_ifaces): for ipv4 in ipv4_info.get('inet', []): loopback = in_subnet('127.0.0.0/8', [ipv4.get('address')]) or ipv4.get('label') == 'lo' if not loopback or include_loopback: ret.add(ipv4['address']) for secondary in ipv4_info.get('secondary', []): addr = secondary.get('address') if addr and secondary.get('type') == 'inet': if include_loopback or (not include_loopback and not in_subnet('127.0.0.0/8', [addr])): ret.add(addr) return sorted(list(ret))
def _subnets(proto='inet', interfaces_=None): ''' Returns a list of subnets to which the host belongs ''' if interfaces_ is None: ifaces = interfaces() elif isinstance(interfaces_, list): ifaces = {} for key, value in six.iteritems(interfaces()): if key in interfaces_: ifaces[key] = value else: ifaces = {interfaces_: interfaces().get(interfaces_, {})} ret = set() if proto == 'inet': subnet = 'netmask' elif proto == 'inet6': subnet = 'prefixlen' else: log.error('Invalid proto {0} calling subnets()'.format(proto)) return for ip_info in six.itervalues(ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for intf in addrs: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], intf[subnet])) if not intf.is_loopback: ret.add(intf.network) return [str(net) for net in sorted(ret)]
def __clean_tmp(sfn): if sfn.startswith(tempfile.gettempdir()): all_roots = itertools.chain.from_iterable( six.itervalues(__opts__['file_roots'])) in_roots = any(sfn.startswith(root) for root in all_roots) if os.path.exists(sfn) and not in_roots: os.remove(sfn)
def _ip_addrs(interface=None, include_loopback=False, interface_data=None, proto='inet'): ''' Return the full list of IP adresses matching the criteria proto = inet|inet6 ''' ret = set() ifaces = interface_data \ if isinstance(interface_data, dict) \ else interfaces() if interface is None: target_ifaces = ifaces else: target_ifaces = dict([(k, v) for k, v in six.iteritems(ifaces) if k == interface]) if not target_ifaces: log.error('Interface {0} not found.'.format(interface)) for ip_info in six.itervalues(target_ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for addr in addrs: addr = ipaddress.ip_address(addr.get('address')) if not addr.is_loopback or include_loopback: ret.add(addr) return [str(addr) for addr in sorted(ret)]
def _alarms_present(name, alarms, alarms_from_pillar, region, key, keyid, profile): '''helper method for present. ensure that cloudwatch_alarms are set''' # load data from alarms_from_pillar tmp = __salt__['config.option'](alarms_from_pillar, {}) # merge with data from alarms if alarms: tmp = dictupdate.update(tmp, alarms) # set alarms, using boto_cloudwatch_alarm.present merged_return_value = {'name': name, 'result': True, 'comment': '', 'changes': {}} for _, info in six.iteritems(tmp): # add asg to name and description info['name'] = name + ' ' + info['name'] info['attributes']['description'] = name + ' ' + info['attributes']['description'] # add dimension attribute info['attributes']['dimensions'] = {'AutoScalingGroupName': [name]} # set alarm kwargs = { 'name': info['name'], 'attributes': info['attributes'], 'region': region, 'key': key, 'keyid': keyid, 'profile': profile, } ret = __salt__['state.single']('boto_cloudwatch_alarm.present', **kwargs) results = next(six.itervalues(ret)) if not results['result']: merged_return_value['result'] = False if results.get('changes', {}) != {}: merged_return_value['changes'][info['name']] = results['changes'] if 'comment' in results: merged_return_value['comment'] += results['comment'] return merged_return_value
def file_list(load): ''' Return a list of all files on the file server in a specified environment ''' if 'env' in load: salt.utils.warn_until( 'Boron', 'Passing a salt environment should be done using \'saltenv\' ' 'not \'env\'. This functionality will be removed in Salt Boron.' ) load['saltenv'] = load.pop('env') ret = [] if 'saltenv' not in load: return ret saltenv = load['saltenv'] metadata = _init() if not metadata or saltenv not in metadata: return ret for buckets in six.itervalues(_find_files(metadata[saltenv])): files = [f for f in buckets if not fs.is_file_ignored(__opts__, f)] ret += _trim_env_off_path(files, saltenv) return ret
def owner(*paths): ''' .. versionadded:: 2014.7.0 Return the name of the package that owns the file. Multiple file paths can be passed. Like :mod:`pkg.version <salt.modules.yumpkg.version`, if a single path is passed, a string will be returned, and if multiple paths are passed, a dictionary of file/package name pairs will be returned. If the file is not owned by a package, or is not present on the minion, then an empty string will be returned for that path. CLI Example: salt '*' pkg.owner /usr/bin/apachectl salt '*' pkg.owner /usr/bin/apachectl /usr/bin/zsh ''' if not paths: return '' ret = {} cmd_prefix = ['pacman', '-Qqo'] for path in paths: ret[path] = __salt__['cmd.run_stdout'](cmd_prefix + [path], python_shell=False) if len(ret) == 1: return next(six.itervalues(ret)) return ret
def dir_list(load): ''' Return a list of all directories on the master ''' if 'env' in load: salt.utils.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' ) load.pop('env') ret = [] if 'saltenv' not in load: return ret saltenv = load['saltenv'] metadata = _init() if not metadata or saltenv not in metadata: return ret # grab all the dirs from the buckets cache file for dirs in six.itervalues(_find_dirs(metadata[saltenv])): # trim env and trailing slash dirs = _trim_env_off_path(dirs, saltenv, trim_slash=True) # remove empty string left by the base env dir in single bucket mode ret += [_f for _f in dirs if _f] return ret
def file_list(load): ''' Return a list of all files on the file server in a specified environment ''' if 'env' in load: salt.utils.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' ) load.pop('env') ret = [] if 'saltenv' not in load: return ret saltenv = load['saltenv'] metadata = _init() if not metadata or saltenv not in metadata: return ret for buckets in six.itervalues(_find_files(metadata[saltenv])): files = [f for f in buckets if not fs.is_file_ignored(__opts__, f)] ret += _trim_env_off_path(files, saltenv) return ret
def merge_tops(self, tops): ''' Cleanly merge the top files ''' top = collections.defaultdict(OrderedDict) orders = collections.defaultdict(OrderedDict) for ctops in six.itervalues(tops): for ctop in ctops: for saltenv, targets in ctop.items(): if saltenv == 'include': continue for tgt in targets: matches = [] states = OrderedDict() orders[saltenv][tgt] = 0 for comp in ctop[saltenv][tgt]: if isinstance(comp, dict): if 'match' in comp: matches.append(comp) if 'order' in comp: order = comp['order'] if not isinstance(order, int): try: order = int(order) except ValueError: order = 0 orders[saltenv][tgt] = order if isinstance(comp, string_types): states[comp] = True top[saltenv][tgt] = matches top[saltenv][tgt].extend(states) return self.sort_top_targets(top, orders)
def kill_children(self, *args): ''' Kill all of the children ''' # check that this is the correct process, children inherit this # handler, if we are in a child lets just run the original handler if os.getpid() != self._pid: if callable(self._sigterm_handler): return self._sigterm_handler(*args) elif self._sigterm_handler is not None: return signal.default_int_handler(signal.SIGTERM)(*args) else: return for p_map in six.itervalues(self._process_map): p_map['Process'].terminate() end_time = time.time() + self.wait_for_kill # when to die while self._process_map and time.time() < end_time: for pid, p_map in self._process_map.items(): p_map['Process'].join(0) # This is a race condition if a signal was passed to all children try: del self._process_map[pid] except KeyError: pass # if anyone is done after for pid in self._process_map: try: os.kill(signal.SIGKILL, pid) # in case the process has since decided to die, os.kill returns OSError except OSError: pass
def latest_version(*names, **kwargs): ''' Return the latest version of the named package available for upgrade or installation Currently chooses stable versions, falling back to devel if that does not exist. CLI Example: .. code-block:: bash salt '*' pkg.latest_version <package name> salt '*' pkg.latest_version <package1> <package2> <package3> ''' refresh = salt.utils.is_true(kwargs.pop('refresh', True)) if refresh: refresh_db() def get_version(pkg_info): # Perhaps this will need an option to pick devel by default return pkg_info['versions']['stable'] or pkg_info['versions']['devel'] versions_dict = dict((key, get_version(val)) for key, val in six.iteritems(_info(*names))) if len(names) == 1: return next(six.itervalues(versions_dict)) else: return versions_dict
def _get_service(name): ''' Get information about a service. If the service is not found, raise an error :param str name: Service label, file name, or full path :return: The service information for the service, otherwise an Error :rtype: dict ''' services = __salt__['service.available_services']() name = name.lower() if name in services: # Match on label return services[name] for service in six.itervalues(services): if service['file_path'].lower() == name: # Match on full path return service basename, ext = os.path.splitext(service['file_name']) if basename.lower() == name: # Match on basename return service # Could not find service raise CommandExecutionError('Service not found: {0}'.format(name))
def version(*names, **kwargs): ''' Common interface for obtaining the version of installed packages. CLI Example: .. code-block:: bash salt '*' pkg_resource.version vim salt '*' pkg_resource.version foo bar baz salt '*' pkg_resource.version 'python*' ''' ret = {} versions_as_list = \ salt.utils.is_true(kwargs.pop('versions_as_list', False)) pkg_glob = False if len(names) != 0: pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) for name in names: if '*' in name: pkg_glob = True for match in fnmatch.filter(pkgs, name): ret[match] = pkgs.get(match, []) else: ret[name] = pkgs.get(name, []) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) # Return a string if no globbing is used, and there is one item in the # return dict if len(ret) == 1 and not pkg_glob: try: return next(six.itervalues(ret)) except StopIteration: return '' return ret
def dir_list(load): ''' Return a list of all directories on the master ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') ret = [] if 'saltenv' not in load: return ret saltenv = load['saltenv'] metadata = _init() if not metadata or saltenv not in metadata: return ret # grab all the dirs from the buckets cache file for bucket in _find_dirs(metadata[saltenv]): for dirs in six.itervalues(bucket): # trim env and trailing slash dirs = _trim_env_off_path(dirs, saltenv, trim_slash=True) # remove empty string left by the base env dir in single bucket mode ret += [_f for _f in dirs if _f] return ret
def _info(vm_): dom = _get_dom(vm_) nics = get_nics(vm_) ret = { 'rx_bytes': 0, 'rx_packets': 0, 'rx_errs': 0, 'rx_drop': 0, 'tx_bytes': 0, 'tx_packets': 0, 'tx_errs': 0, 'tx_drop': 0 } for attrs in six.itervalues(nics): if 'target' in attrs: dev = attrs['target'] stats = dom.interfaceStats(dev) ret['rx_bytes'] += stats[0] ret['rx_packets'] += stats[1] ret['rx_errs'] += stats[2] ret['rx_drop'] += stats[3] ret['tx_bytes'] += stats[4] ret['tx_packets'] += stats[5] ret['tx_errs'] += stats[6] ret['tx_drop'] += stats[7] return ret
def owner(*paths): ''' Return the name of the package that owns the file. Multiple file paths can be passed. Like :mod:`pkg.version <salt.modules.opkg.version`, if a single path is passed, a string will be returned, and if multiple paths are passed, a dictionary of file/package name pairs will be returned. If the file is not owned by a package, or is not present on the minion, then an empty string will be returned for that path. CLI Example: salt '*' pkg.owner /usr/bin/apachectl salt '*' pkg.owner /usr/bin/apachectl /usr/bin/basename ''' if not paths: return '' ret = {} cmd_search = ['opkg', 'search'] for path in paths: cmd = cmd_search[:] cmd.append(path) output = __salt__['cmd.run_stdout'](cmd, output_loglevel='trace', python_shell=False) if output: ret[path] = output.split(' - ')[0].strip() else: ret[path] = '' if len(ret) == 1: return six.itervalues(ret) return ret
def assertNoOrderedDict(self, data): if isinstance(data, OrderedDict): raise AssertionError( 'Found an ordered dictionary' ) if isinstance(data, dict): for value in six.itervalues(data): self.assertNoOrderedDict(value) elif isinstance(data, (list, tuple)): for chunk in data: self.assertNoOrderedDict(chunk)
def test_implicit_require_with_goal_state(self): result = self._render_sls(''' {% for sid in "ABCDE": %} {{sid}}: cmd.run: - name: echo this is {{sid}} - cwd: / {% endfor %} F: cmd.run: - name: echo this is F - cwd: / - require: - cmd: A - cmd: B G: cmd.run: - name: echo this is G - cwd: / - require: - cmd: D - cmd: F ''', sls='test', argline='-o yaml . jinja') sids = 'ABCDEFG'[::-1] for i, sid in enumerate(sids): if i < len(sids) - 1: self.assertEqual( result[sid]['cmd.run'][2]['require'][0]['cmd'], sids[i + 1] ) F_args = result['F']['cmd.run'] self.assertEqual(len(F_args), 3) F_req = F_args[2]['require'] self.assertEqual(len(F_req), 3) self.assertEqual(F_req[1]['cmd'], 'A') self.assertEqual(F_req[2]['cmd'], 'B') G_args = result['G']['cmd.run'] self.assertEqual(len(G_args), 3) G_req = G_args[2]['require'] self.assertEqual(len(G_req), 3) self.assertEqual(G_req[1]['cmd'], 'D') self.assertEqual(G_req[2]['cmd'], 'F') goal_args = result['test::goal']['stateconf.set'] self.assertEqual(len(goal_args), 1) self.assertEqual( [next(six.itervalues(i)) for i in goal_args[0]['require']], list('ABCDEFG') )
def clear(self): "od.clear() -> None. Remove all items from od." try: for node in six.itervalues(self.__map): del node[:] root = self.__root root[:] = [root, root, None] self.__map.clear() except AttributeError: pass dict.clear(self)
def server_show_libcloud(self, uuid): ''' Make output look like libcloud output for consistency ''' server_info = self.server_show(uuid) server = next(six.itervalues(server_info)) server_name = next(six.iterkeys(server_info)) if not hasattr(self, 'password'): self.password = None ret = NovaServer(server_name, server, self.password) return ret
def test_implicit_require_with_goal_state(self): result = self._render_sls( """ {% for sid in "ABCDE": %} {{sid}}: cmd.run: - name: echo this is {{sid}} - cwd: / {% endfor %} F: cmd.run: - name: echo this is F - cwd: / - require: - cmd: A - cmd: B G: cmd.run: - name: echo this is G - cwd: / - require: - cmd: D - cmd: F """, sls="test", argline="-o yaml . jinja", ) sids = "ABCDEFG"[::-1] for i, sid in enumerate(sids): if i < len(sids) - 1: self.assertEqual(result[sid]["cmd.run"][2]["require"][0]["cmd"], sids[i + 1]) F_args = result["F"]["cmd.run"] self.assertEqual(len(F_args), 3) F_req = F_args[2]["require"] self.assertEqual(len(F_req), 3) self.assertEqual(F_req[1]["cmd"], "A") self.assertEqual(F_req[2]["cmd"], "B") G_args = result["G"]["cmd.run"] self.assertEqual(len(G_args), 3) G_req = G_args[2]["require"] self.assertEqual(len(G_req), 3) self.assertEqual(G_req[1]["cmd"], "D") self.assertEqual(G_req[2]["cmd"], "F") goal_args = result["test::goal"]["stateconf.set"] self.assertEqual(len(goal_args), 1) self.assertEqual([next(six.itervalues(i)) for i in goal_args[0]["require"]], list("ABCDEFG"))
def running_service_owners( exclude=('/dev', '/home', '/media', '/proc', '/run', '/sys/', '/tmp', '/var') ): ''' Determine which packages own the currently running services. By default, excludes files whose full path starts with ``/dev``, ``/home``, ``/media``, ``/proc``, ``/run``, ``/sys``, ``/tmp`` and ``/var``. This can be overridden by passing in a new list to ``exclude``. CLI Example: salt myminion introspect.running_service_owners ''' error = {} if 'pkg.owner' not in __salt__: error['Unsupported Package Manager'] = ( 'The module for the package manager on this system does not ' 'support looking up which package(s) owns which file(s)' ) if 'file.open_files' not in __salt__: error['Unsupported File Module'] = ( 'The file module on this system does not ' 'support looking up open files on the system' ) if error: return {'Error': error} ret = {} open_files = __salt__['file.open_files']() execs = __salt__['service.execs']() for path in open_files: ignore = False for bad_dir in exclude: if path.startswith(bad_dir): ignore = True if ignore: continue if not os.access(path, os.X_OK): continue for service in execs: if path == execs[service]: pkg = __salt__['pkg.owner'](path) ret[service] = next(six.itervalues(pkg)) return ret
def _run_batch(self): import salt.cli.batch eauth = {} if 'token' in self.config: eauth['token'] = self.config['token'] # If using eauth and a token hasn't already been loaded into # kwargs, prompt the user to enter auth credentials if 'token' not in eauth and self.options.eauth: resolver = salt.auth.Resolver(self.config) res = resolver.cli(self.options.eauth) if self.options.mktoken and res: tok = resolver.token_cli( self.options.eauth, res ) if tok: eauth['token'] = tok.get('token', '') if not res: sys.stderr.write('ERROR: Authentication failed\n') sys.exit(2) eauth.update(res) eauth['eauth'] = self.options.eauth if self.options.static: if not self.options.batch: self.config['batch'] = '100%' batch = salt.cli.batch.Batch(self.config, eauth=eauth, quiet=True) ret = {} for res in batch.run(): ret.update(res) self._output_ret(ret, '') else: try: batch = salt.cli.batch.Batch(self.config, eauth=eauth) except salt.exceptions.SaltClientError as exc: # We will print errors to the console further down the stack sys.exit(1) # Printing the output is already taken care of in run() itself for res in batch.run(): if self.options.failhard: for ret in six.itervalues(res): retcode = salt.utils.job.get_retcode(ret) if retcode != 0: sys.stderr.write('ERROR: Minions returned with non-zero exit code\n') sys.exit(retcode)
def _verify_globals(self, mod_dict): ''' Verify that the globals listed in the doc string (from the test) are in these modules ''' # find the globals global_vars = [] for val in six.itervalues(mod_dict): # only find salty globals if val.__module__.startswith('salt.loaded') and hasattr(val, '__globals__'): global_vars.append(val.__globals__) # if we couldn't find any, then we have no modules -- so something is broken self.assertNotEqual(global_vars, [], msg='No modules were loaded.') # get the names of the globals you should have func_name = inspect.stack()[1][3] names = next(six.itervalues(yaml.load(getattr(self, func_name).__doc__))) # Now, test each module! for item in global_vars: for name in names: self.assertIn(name, list(item.keys()))
def get_docker(interfaces=None, cidrs=None): ''' Get all mine data for 'docker.get_containers' and run an aggregation routine. The "interfaces" parameter allows for specifying which network interfaces to select ip addresses from. The "cidrs" parameter allows for specifying a list of cidrs which the ip address must match. CLI Example: .. code-block:: bash salt '*' mine.get_docker salt '*' mine.get_docker interfaces='eth0' salt '*' mine.get_docker interfaces='["eth0", "eth1"]' salt '*' mine.get_docker cidrs='107.170.147.0/24' salt '*' mine.get_docker cidrs='["107.170.147.0/24", "172.17.42.0/24"]' salt '*' mine.get_docker interfaces='["eth0", "eth1"]' cidrs='["107.170.147.0/24", "172.17.42.0/24"]' ''' # Enforce that interface and cidr are lists if interfaces: interface_ = [] interface_.extend( interfaces if isinstance(interfaces, list) else [interfaces]) interfaces = interface_ if cidrs: cidr_ = [] cidr_.extend(cidrs if isinstance(cidrs, list) else [cidrs]) cidrs = cidr_ # Get docker info cmd = 'docker.get_containers' docker_hosts = get('*', cmd) proxy_lists = {} # Process docker info for containers in six.itervalues(docker_hosts): host_ips = [] # Prepare host_ips list if not interfaces: for info in six.itervalues(containers['host']['interfaces']): if 'inet' in info: for ip_ in info['inet']: host_ips.append(ip_['address']) else: for interface in interfaces: if interface in containers['host']['interfaces']: if 'inet' in containers['host']['interfaces'][interface]: for item in containers['host']['interfaces'][ interface]['inet']: host_ips.append(item['address']) host_ips = list(set(host_ips)) # Filter out ips from host_ips with cidrs if cidrs: good_ips = [] for cidr in cidrs: for ip_ in host_ips: if salt.utils.network.in_subnet(cidr, [ip_]): good_ips.append(ip_) host_ips = list(set(good_ips)) # Process each container if containers['out']: for container in containers['out']: if container['Image'] not in proxy_lists: proxy_lists[container['Image']] = {} for dock_port in container['Ports']: # If port is 0.0.0.0, then we must get the docker host IP if dock_port['IP'] == '0.0.0.0': for ip_ in host_ips: proxy_lists[container['Image']].setdefault( 'ipv4', {}).setdefault(dock_port['PrivatePort'], []).append('{0}:{1}'.format( ip_, dock_port['PublicPort'])) proxy_lists[container['Image']]['ipv4'][ dock_port['PrivatePort']] = list( set(proxy_lists[container['Image']]['ipv4'] [dock_port['PrivatePort']])) elif dock_port['IP']: proxy_lists[container['Image']].setdefault( 'ipv4', {}).setdefault(dock_port['PrivatePort'], []).append('{0}:{1}'.format( dock_port['IP'], dock_port['PublicPort'])) proxy_lists[container['Image']]['ipv4'][ dock_port['PrivatePort']] = list( set(proxy_lists[container['Image']]['ipv4'][ dock_port['PrivatePort']])) return proxy_lists
def present(name, level, devices, **kwargs): ''' Verify that the raid is present .. versionchanged:: 2014.7.0 name The name of raid device to be created level The RAID level to use when creating the raid. devices A list of devices used to build the array. Example: .. code-block:: yaml /dev/md0: raid.present: - level: 5 - devices: - /dev/xvdd - /dev/xvde - /dev/xvdf - chunk: 256 - run: True ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} # Device exists raids = __salt__['raid.list']() if raids.get(name): ret['comment'] = 'Raid {0} already present'.format(name) return ret # Decide whether to create or assemble can_assemble = {} for dev in devices: # mdadm -E exits with 0 iff all devices given are part of an array cmd = 'mdadm -E {0}'.format(dev) can_assemble[dev] = __salt__['cmd.retcode'](cmd) == 0 if True in six.itervalues(can_assemble) and False in six.itervalues(can_assemble): in_raid = sorted([x[0] for x in six.iteritems(can_assemble) if x[1]]) not_in_raid = sorted([x[0] for x in six.iteritems(can_assemble) if not x[1]]) ret['comment'] = 'Devices are a mix of RAID constituents ({0}) and '\ 'non-RAID-constituents({1}).'.format(in_raid, not_in_raid) ret['result'] = False return ret elif next(six.itervalues(can_assemble)): do_assemble = True verb = 'assembled' else: do_assemble = False verb = 'created' # If running with test use the test_mode with create or assemble if __opts__['test']: if do_assemble: res = __salt__['raid.assemble'](name, devices, test_mode=True, **kwargs) else: res = __salt__['raid.create'](name, level, devices, test_mode=True, **kwargs) ret['comment'] = 'Raid will be {0} with: {1}'.format(verb, res) ret['result'] = None return ret # Attempt to create or assemble the array if do_assemble: __salt__['raid.assemble'](name, devices, **kwargs) else: __salt__['raid.create'](name, level, devices, **kwargs) raids = __salt__['raid.list']() changes = raids.get(name) if changes: ret['comment'] = 'Raid {0} {1}.'.format(name, verb) ret['changes'] = changes # Saving config __salt__['raid.save_config']() else: ret['comment'] = 'Raid {0} failed to be {1}.'.format(name, verb) ret['result'] = False return ret
def setUpClass(cls): # pylint: disable=arguments-differ """ Set up all the webserver paths. Designed to be run once in a setUpClass function. """ super(WebserverMixin, cls).setUpClass() cls.root_dir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP) cls.config_dir = os.path.join(cls.root_dir, "config") cls.nginx_conf = os.path.join(cls.config_dir, "nginx.conf") cls.uwsgi_conf = os.path.join(cls.config_dir, "uwsgi.yml") cls.git_dir = os.path.join(cls.root_dir, "git") cls.repo_dir = os.path.join(cls.git_dir, "repos") cls.venv_dir = os.path.join(cls.root_dir, "venv") cls.uwsgi_bin = os.path.join(cls.venv_dir, "bin", "uwsgi") cls.nginx_port = cls.uwsgi_port = get_unused_localhost_port() while cls.uwsgi_port == cls.nginx_port: # Ensure we don't hit a corner case in which two sucessive calls to # get_unused_localhost_port() return identical port numbers. cls.uwsgi_port = get_unused_localhost_port() cls.url = "http://127.0.0.1:{port}/repo.git".format(port=cls.nginx_port) cls.url_extra_repo = "http://127.0.0.1:{port}/extra_repo.git".format( port=cls.nginx_port ) cls.ext_opts = {"url": cls.url, "url_extra_repo": cls.url_extra_repo} # Add auth params if present (if so this will trigger the spawned # server to turn on HTTP basic auth). for credential_param in ("user", "password"): if hasattr(cls, credential_param): cls.ext_opts[credential_param] = getattr(cls, credential_param) auth_enabled = hasattr(cls, "username") and hasattr(cls, "password") pillar = { "git_pillar": { "config_dir": cls.config_dir, "git_dir": cls.git_dir, "venv_dir": cls.venv_dir, "root_dir": cls.root_dir, "nginx_port": cls.nginx_port, "uwsgi_port": cls.uwsgi_port, "auth_enabled": auth_enabled, } } # Different libexec dir for git backend on Debian-based systems git_core = "/usr/libexec/git-core" if not os.path.exists(git_core): git_core = "/usr/lib/git-core" if not os.path.exists(git_core): cls.tearDownClass() raise AssertionError( "{} not found. Either git is not installed, or the test " "class needs to be updated.".format(git_core) ) pillar["git_pillar"]["git-http-backend"] = os.path.join( git_core, "git-http-backend" ) try: if cls.prep_states_ran is False: ret = cls.cls_run_function( "state.apply", mods="git_pillar.http", pillar=pillar ) assert next(six.itervalues(ret))["result"] is True cls.prep_states_ran = True log.info("%s: States applied", cls.__name__) if cls.uwsgi_proc is not None: if not psutil.pid_exists(cls.uwsgi_proc.pid): log.warning( "%s: uWsgi started but appears to be dead now. Will try to restart it.", cls.__name__, ) cls.uwsgi_proc = None if cls.uwsgi_proc is None: cls.uwsgi_proc = start_daemon( cls.uwsgi_bin, cls.config_dir, cls.uwsgi_port, UwsgiDaemon ) log.info("%s: %s started", cls.__name__, cls.uwsgi_bin) if cls.nginx_proc is not None: if not psutil.pid_exists(cls.nginx_proc.pid): log.warning( "%s: nginx started but appears to be dead now. Will try to restart it.", cls.__name__, ) cls.nginx_proc = None if cls.nginx_proc is None: cls.nginx_proc = start_daemon( "nginx", cls.config_dir, cls.nginx_port, NginxDaemon ) log.info("%s: nginx started", cls.__name__) except AssertionError: cls.tearDownClass() six.reraise(*sys.exc_info())
def _run_batch(self): import salt.cli.batch eauth = {} if 'token' in self.config: eauth['token'] = self.config['token'] # If using eauth and a token hasn't already been loaded into # kwargs, prompt the user to enter auth credentials if 'token' not in eauth and self.options.eauth: # This is expensive. Don't do it unless we need to. import salt.auth resolver = salt.auth.Resolver(self.config) res = resolver.cli(self.options.eauth) if self.options.mktoken and res: tok = resolver.token_cli( self.options.eauth, res ) if tok: eauth['token'] = tok.get('token', '') if not res: sys.stderr.write('ERROR: Authentication failed\n') sys.exit(2) eauth.update(res) eauth['eauth'] = self.options.eauth if self.options.static: if not self.options.batch: self.config['batch'] = '100%' try: batch = salt.cli.batch.Batch(self.config, eauth=eauth, quiet=True) except salt.exceptions.SaltClientError as exc: sys.exit(2) ret = {} for res in batch.run(): ret.update(res) self._output_ret(ret, '') else: try: batch = salt.cli.batch.Batch(self.config, eauth=eauth, parser=self.options) except salt.exceptions.SaltClientError as exc: # We will print errors to the console further down the stack sys.exit(1) # Printing the output is already taken care of in run() itself for res in batch.run(): if self.options.failhard: for ret in six.itervalues(res): retcode = self._get_retcode(ret) if retcode != 0: sys.stderr.write( '{0}\nERROR: Minions returned with non-zero exit code.\n'.format( res ) ) sys.exit(retcode)
def test_cp_testfile(self): ''' test salt-cp ''' minions = [] for line in self.run_salt('--out yaml "*" test.ping'): if not line: continue data = salt.utils.yaml.safe_load(line) minions.extend(data.keys()) self.assertNotEqual(minions, []) testfile = os.path.abspath( os.path.join(os.path.dirname(os.path.dirname(__file__)), 'files', 'file', 'base', 'testfile')) with salt.utils.files.fopen(testfile, 'r') as fh_: testfile_contents = fh_.read() def quote(arg): if salt.utils.platform.is_windows(): return arg return pipes.quote(arg) for idx, minion in enumerate(minions): if 'localhost' in minion: continue ret = self.run_salt( '--out yaml {0} file.directory_exists {1}'.format( quote(minion), RUNTIME_VARS.TMP)) data = salt.utils.yaml.safe_load('\n'.join(ret)) if data[minion] is False: ret = self.run_salt('--out yaml {0} file.makedirs {1}'.format( quote(minion), RUNTIME_VARS.TMP)) data = salt.utils.yaml.safe_load('\n'.join(ret)) self.assertTrue(data[minion]) minion_testfile = os.path.join(RUNTIME_VARS.TMP, 'cp_{0}_testfile'.format(idx)) ret = self.run_cp('--out pprint {0} {1} {2}'.format( quote(minion), quote(testfile), quote(minion_testfile), )) data = eval('\n'.join(ret), {}, {}) # pylint: disable=eval-used for part in six.itervalues(data): key = minion_testfile self.assertTrue(part[key]) ret = self.run_salt('--out yaml {0} file.file_exists {1}'.format( quote(minion), quote(minion_testfile))) data = salt.utils.yaml.safe_load('\n'.join(ret)) self.assertTrue(data[minion]) ret = self.run_salt('--out yaml {0} file.contains {1} {2}'.format( quote(minion), quote(minion_testfile), quote(testfile_contents))) data = salt.utils.yaml.safe_load('\n'.join(ret)) self.assertTrue(data[minion]) ret = self.run_salt('--out yaml {0} file.remove {1}'.format( quote(minion), quote(minion_testfile))) data = salt.utils.yaml.safe_load('\n'.join(ret)) self.assertTrue(data[minion])
def destroy(name, call=None): """ This function irreversibly destroys a virtual machine on the cloud provider. Before doing so, it should fire an event on the Salt event bus. The tag for this event is `salt/cloud/<vm name>/destroying`. Once the virtual machine has been destroyed, another event is fired. The tag for that event is `salt/cloud/<vm name>/destroyed`. Dependencies: list_nodes @param name: @type name: str @param call: @type call: @return: True if all went well, otherwise an error message @rtype: bool|str """ log.info("Attempting to delete instance {0}".format(name)) if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.') found = [] providers = __opts__.get('providers', {}) providers_to_check = [ _f for _f in [cfg.get('libvirt') for cfg in six.itervalues(providers)] if _f ] for provider in providers_to_check: conn = __get_conn(provider['url']) log.info("looking at {0}".format(provider['url'])) try: domain = conn.lookupByName(name) found.append({'domain': domain, 'conn': conn}) except libvirtError: pass if not found: return "{0} doesn't exist and can't be deleted".format(name) if len(found) > 1: return "{0} doesn't identify a unique machine leaving things".format( name) __utils__['cloud.fire_event']('event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), { 'name': name }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport']) destroy_domain(found[0]['conn'], found[0]['domain']) __utils__['cloud.fire_event']('event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), { 'name': name }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'])
def run(self): ''' Execute the salt command line ''' import salt.auth import salt.client self.parse_args() # Setup file logging! self.setup_logfile_logger() try: # We don't need to bail on config file permission errors # if the CLI # process is run with the -a flag skip_perm_errors = self.options.eauth != '' local = salt.client.get_local_client( self.get_config_file_path(), skip_perm_errors=skip_perm_errors) except SaltClientError as exc: self.exit(2, '{0}\n'.format(exc)) return if self.options.batch or self.options.static: import salt.cli.batch eauth = {} if 'token' in self.config: eauth['token'] = self.config['token'] # If using eauth and a token hasn't already been loaded into # kwargs, prompt the user to enter auth credentials if 'token' not in eauth and self.options.eauth: resolver = salt.auth.Resolver(self.config) res = resolver.cli(self.options.eauth) if self.options.mktoken and res: tok = resolver.token_cli(self.options.eauth, res) if tok: eauth['token'] = tok.get('token', '') if not res: sys.stderr.write('ERROR: Authentication failed\n') sys.exit(2) eauth.update(res) eauth['eauth'] = self.options.eauth if self.options.static: if not self.options.batch: self.config['batch'] = '100%' batch = salt.cli.batch.Batch(self.config, eauth=eauth, quiet=True) ret = {} for res in batch.run(): ret.update(res) self._output_ret(ret, '') else: try: batch = salt.cli.batch.Batch(self.config, eauth=eauth) except salt.exceptions.SaltClientError as exc: # We will print errors to the console further down the stack sys.exit(1) # Printing the output is already taken care of in run() itself for res in batch.run(): if self.options.failhard: for ret in six.itervalues(res): retcode = salt.utils.job.get_retcode(ret) if retcode != 0: sys.stderr.write( 'ERROR: Minions returned with non-zero exit code\n' ) sys.exit(retcode) else: if self.options.timeout <= 0: self.options.timeout = local.opts['timeout'] kwargs = { 'tgt': self.config['tgt'], 'fun': self.config['fun'], 'arg': self.config['arg'], 'timeout': self.options.timeout, 'show_timeout': self.options.show_timeout, 'show_jid': self.options.show_jid } if 'token' in self.config: try: with salt.utils.fopen( os.path.join(self.config['cachedir'], '.root_key'), 'r') as fp_: kwargs['key'] = fp_.readline() except IOError: kwargs['token'] = self.config['token'] kwargs['delimiter'] = self.options.delimiter if self.selected_target_option: kwargs['expr_form'] = self.selected_target_option else: kwargs['expr_form'] = 'glob' if getattr(self.options, 'return'): kwargs['ret'] = getattr(self.options, 'return') if getattr(self.options, 'return_config'): kwargs['ret_config'] = getattr(self.options, 'return_config') if getattr(self.options, 'metadata'): kwargs['metadata'] = yamlify_arg( getattr(self.options, 'metadata')) # If using eauth and a token hasn't already been loaded into # kwargs, prompt the user to enter auth credentials if 'token' not in kwargs and self.options.eauth: resolver = salt.auth.Resolver(self.config) res = resolver.cli(self.options.eauth) if self.options.mktoken and res: tok = resolver.token_cli(self.options.eauth, res) if tok: kwargs['token'] = tok.get('token', '') if not res: sys.stderr.write('ERROR: Authentication failed\n') sys.exit(2) kwargs.update(res) kwargs['eauth'] = self.options.eauth if self.config['async']: jid = local.cmd_async(**kwargs) print_cli('Executed command with job ID: {0}'.format(jid)) return retcodes = [] try: # local will be None when there was an error errors = [] if local: if self.options.subset: cmd_func = local.cmd_subset kwargs['sub'] = self.options.subset kwargs['cli'] = True else: cmd_func = local.cmd_cli if self.options.progress: kwargs['progress'] = True self.config['progress'] = True ret = {} for progress in cmd_func(**kwargs): out = 'progress' try: self._progress_ret(progress, out) except salt.exceptions.LoaderError as exc: raise salt.exceptions.SaltSystemExit(exc) if 'return_count' not in progress: ret.update(progress) self._progress_end(out) self._print_returns_summary(ret) elif self.config['fun'] == 'sys.doc': ret = {} out = '' for full_ret in local.cmd_cli(**kwargs): ret_, out, retcode = self._format_ret(full_ret) ret.update(ret_) self._output_ret(ret, out) else: if self.options.verbose: kwargs['verbose'] = True ret = {} for full_ret in cmd_func(**kwargs): try: ret_, out, retcode = self._format_ret(full_ret) retcodes.append(retcode) self._output_ret(ret_, out) ret.update(ret_) except KeyError: errors.append(full_ret) # Returns summary if self.config['cli_summary'] is True: if self.config['fun'] != 'sys.doc': if self.options.output is None: self._print_returns_summary(ret) self._print_errors_summary(errors) # NOTE: Return code is set here based on if all minions # returned 'ok' with a retcode of 0. # This is the final point before the 'salt' cmd returns, # which is why we set the retcode here. if retcodes.count(0) < len(retcodes): sys.stderr.write( 'ERROR: Minions returned with non-zero exit code\n' ) sys.exit(11) except (SaltInvocationError, EauthAuthenticationError, SaltClientError) as exc: ret = str(exc) out = '' self._output_ret(ret, out)
def _is_inline_definition(arg): ''' Returns True, if arg is an inline definition of a statement. ''' return isinstance(arg, dict) and len(arg) == 1 and isinstance( next(six.itervalues(arg)), list)
if not paths: return "" ret = {} cmd_search = ["opkg", "search"] for path in paths: cmd = cmd_search[:] cmd.append(path) output = __salt__["cmd.run_stdout"]( cmd, output_loglevel="trace", python_shell=False ) if output: ret[path] = output.split(" - ")[0].strip() else: ret[path] = "" if len(ret) == 1: return next(six.itervalues(ret)) return ret def version_clean(version): ''' Clean the version string removing extra data. There's nothing do to here for nipkg.py, therefore it will always return the given version. ''' return version def check_extra_requirements(pkgname, pkgver): ''' Check if the installed package already has the given requirements.
def _is_reference(arg): ''' Return True, if arg is a reference to a previously defined statement. ''' return isinstance(arg, dict) and len(arg) == 1 and isinstance( next(six.itervalues(arg)), six.string_types)
def init(names, host=None, saltcloud_mode=False, quiet=False, **kwargs): ''' Initialize a new container .. code-block:: bash salt-run lxc.init name host=minion_id [cpuset=cgroups_cpuset] \\ [cpushare=cgroups_cpushare] [memory=cgroups_memory] \\ [template=lxc_template_name] [clone=original name] \\ [profile=lxc_profile] [network_proflile=network_profile] \\ [nic=network_profile] [nic_opts=nic_opts] \\ [start=(true|false)] [seed=(true|false)] \\ [install=(true|false)] [config=minion_config] \\ [snapshot=(true|false)] names Name of the containers, supports a single name or a comma delimited list of names. host Minion on which to initialize the container **(required)** path path to the container parent default: /var/lib/lxc (system default) .. versionadded:: 2015.8.0 saltcloud_mode init the container with the saltcloud opts format instead See lxc.init_interface module documentation cpuset cgroups cpuset. cpushare cgroups cpu shares. memory cgroups memory limit, in MB .. versionchanged:: 2015.5.0 If no value is passed, no limit is set. In earlier Salt versions, not passing this value causes a 1024MB memory limit to be set, and it was necessary to pass ``memory=0`` to set no limit. template Name of LXC template on which to base this container clone Clone this container from an existing container profile A LXC profile (defined in config or pillar). network_profile Network profile to use for the container .. versionadded:: 2015.5.2 nic .. deprecated:: 2015.5.0 Use ``network_profile`` instead nic_opts Extra options for network interfaces. E.g.: ``{"eth0": {"mac": "aa:bb:cc:dd:ee:ff", "ipv4": "10.1.1.1", "ipv6": "2001:db8::ff00:42:8329"}}`` start Start the newly created container. seed Seed the container with the minion config and autosign its key. Default: true install If salt-minion is not already installed, install it. Default: true config Optional config parameters. By default, the id is set to the name of the container. ''' path = kwargs.get('path', None) if quiet: log.warn('\'quiet\' argument is being deprecated.' ' Please migrate to --quiet') ret = {'comment': '', 'result': True} if host is None: # TODO: Support selection of host based on available memory/cpu/etc. ret['comment'] = 'A host must be provided' ret['result'] = False return ret if isinstance(names, six.string_types): names = names.split(',') if not isinstance(names, list): ret['comment'] = 'Container names are not formed as a list' ret['result'] = False return ret # check that the host is alive client = salt.client.get_local_client(__opts__['conf_file']) alive = False try: if client.cmd(host, 'test.ping', timeout=20).get(host, None): alive = True except (TypeError, KeyError): pass if not alive: ret['comment'] = 'Host {0} is not reachable'.format(host) ret['result'] = False return ret log.info('Searching for LXC Hosts') data = __salt__['lxc.list'](host, quiet=True, path=path) for host, containers in six.iteritems(data): for name in names: if name in sum(six.itervalues(containers), []): log.info('Container \'{0}\' already exists' ' on host \'{1}\',' ' init can be a NO-OP'.format(name, host)) if host not in data: ret['comment'] = 'Host \'{0}\' was not found'.format(host) ret['result'] = False return ret kw = salt.utils.clean_kwargs(**kwargs) pub_key = kw.get('pub_key', None) priv_key = kw.get('priv_key', None) explicit_auth = pub_key and priv_key approve_key = kw.get('approve_key', True) seeds = {} seed_arg = kwargs.get('seed', True) if approve_key and not explicit_auth: skey = salt.key.Key(__opts__) all_minions = skey.all_keys().get('minions', []) for name in names: seed = seed_arg if name in all_minions: try: if client.cmd(name, 'test.ping', timeout=20).get(name, None): seed = False except (TypeError, KeyError): pass seeds[name] = seed kv = salt.utils.virt.VirtKey(host, name, __opts__) if kv.authorize(): log.info('Container key will be preauthorized') else: ret['comment'] = 'Container key preauthorization failed' ret['result'] = False return ret log.info('Creating container(s) \'{0}\'' ' on host \'{1}\''.format(names, host)) cmds = [] for name in names: args = [name] kw = salt.utils.clean_kwargs(**kwargs) if saltcloud_mode: kw = copy.deepcopy(kw) kw['name'] = name kw = client.cmd(host, 'lxc.cloud_init_interface', args + [kw], expr_form='list', timeout=600).get(host, {}) name = kw.pop('name', name) # be sure not to seed an already seeded host kw['seed'] = seeds.get(name, seed_arg) if not kw['seed']: kw.pop('seed_cmd', '') cmds.append((host, name, client.cmd_iter(host, 'lxc.init', args, kwarg=kw, timeout=600))) done = ret.setdefault('done', []) errors = ret.setdefault('errors', _OrderedDict()) for ix, acmd in enumerate(cmds): hst, container_name, cmd = acmd containers = ret.setdefault(hst, []) herrs = errors.setdefault(hst, _OrderedDict()) serrs = herrs.setdefault(container_name, []) sub_ret = next(cmd) error = None if isinstance(sub_ret, dict) and host in sub_ret: j_ret = sub_ret[hst] container = j_ret.get('ret', {}) if container and isinstance(container, dict): if not container.get('result', False): error = container else: error = 'Invalid return for {0}: {1} {2}'.format( container_name, container, sub_ret) else: error = sub_ret if not error: error = 'unknown error (no return)' if error: ret['result'] = False serrs.append(error) else: container['container_name'] = name containers.append(container) done.append(container) # marking ping status as True only and only if we have at # least provisioned one container ret['ping_status'] = bool(len(done)) # for all provisioned containers, last job is to verify # - the key status # - we can reach them for container in done: # explicitly check and update # the minion key/pair stored on the master container_name = container['container_name'] key = os.path.join(__opts__['pki_dir'], 'minions', container_name) if explicit_auth: fcontent = '' if os.path.exists(key): with salt.utils.fopen(key) as fic: fcontent = fic.read().strip() if pub_key.strip() != fcontent: with salt.utils.fopen(key, 'w') as fic: fic.write(pub_key) fic.flush() mid = j_ret.get('mid', None) if not mid: continue def testping(**kw): mid_ = kw['mid'] ping = client.cmd(mid_, 'test.ping', timeout=20) time.sleep(1) if ping: return 'OK' raise Exception('Unresponsive {0}'.format(mid_)) ping = salt.utils.cloud.wait_for_fun(testping, timeout=21, mid=mid) if ping != 'OK': ret['ping_status'] = False ret['result'] = False # if no lxc detected as touched (either inited or verified) # we result to False if not done: ret['result'] = False if not quiet: __jid_event__.fire_event({'message': ret}, 'progress') return ret
def translate_input(**kwargs): ''' Translate CLI/SLS input into the format the API expects. A ``skip_translate`` kwarg can be passed to control which arguments are translated. It can be either a comma-separated list or an iterable containing strings (e.g. a list or tuple), and members of that tuple will have their translation skipped. Optionally, skip_translate can be set to True to skip *all* translation. ''' kwargs = salt.utils.clean_kwargs(**kwargs) invalid = {} collisions = [] skip_translate = kwargs.pop('skip_translate', None) if skip_translate is True: # Skip all translation return kwargs, invalid, collisions else: if not skip_translate: skip_translate = () else: try: skip_translate = _split(skip_translate) except AttributeError: pass if not hasattr(skip_translate, '__iter__'): log.error('skip_translate is not an iterable, ignoring') skip_translate = () validate_ip_addrs = kwargs.pop('validate_ip_addrs', True) # Using list(kwargs) here because if there are any invalid arguments we # will be popping them from the kwargs. for key in list(kwargs): real_key = ALIASES.get(key, key) if real_key in skip_translate: continue if salt.utils.is_dictlist(kwargs[key]): kwargs[key] = salt.utils.repack_dictlist(kwargs[key]) try: func = getattr(salt.utils.docker.translate, real_key) kwargs[key] = func(kwargs[key], validate_ip_addrs=validate_ip_addrs) except AttributeError: log.debug('No translation function for argument \'%s\'', key) continue except SaltInvocationError as exc: kwargs.pop(key) invalid[key] = exc.strerror log_driver = kwargs.pop('log_driver', NOTSET) log_opt = kwargs.pop('log_opt', NOTSET) if 'log_config' not in kwargs: # The log_config is a mixture of the CLI options --log-driver and # --log-opt (which we support in Salt as log_driver and log_opt, # respectively), but it must be submitted to the host config in the # format {'Type': log_driver, 'Config': log_opt}. So, we need to # construct this argument to be passed to the API from those two # arguments. if log_driver is not NOTSET and log_opt is not NOTSET: kwargs['log_config'] = { 'Type': log_driver if log_driver is not NOTSET else 'none', 'Config': log_opt if log_opt is not NOTSET else {} } # Convert CLI versions of commands to their API counterparts for key in ALIASES: if key in kwargs: new_key = ALIASES[key] value = kwargs.pop(key) if new_key in kwargs: collisions.append(new_key) else: kwargs[new_key] = value # Don't allow conflicting options to be set if kwargs.get('port_bindings') is not None \ and kwargs.get('publish_all_ports'): kwargs.pop('port_bindings') invalid['port_bindings'] = 'Cannot be used when publish_all_ports=True' if kwargs.get('hostname') is not None \ and kwargs.get('network_mode') == 'host': kwargs.pop('hostname') invalid['hostname'] = 'Cannot be used when network_mode=True' # Make sure volumes and ports are defined to match the binds and port_bindings if kwargs.get('binds') is not None \ and (skip_translate is True or all(x not in skip_translate for x in ('binds', 'volume', 'volumes'))): # Make sure that all volumes defined in "binds" are included in the # "volumes" param. auto_volumes = [] if isinstance(kwargs['binds'], dict): for val in six.itervalues(kwargs['binds']): try: if 'bind' in val: auto_volumes.append(val['bind']) except TypeError: continue else: if isinstance(kwargs['binds'], list): auto_volume_defs = kwargs['binds'] else: try: auto_volume_defs = _split(kwargs['binds']) except AttributeError: auto_volume_defs = [] for val in auto_volume_defs: try: auto_volumes.append(_split(val, ':')[1]) except IndexError: continue if auto_volumes: actual_volumes = kwargs.setdefault('volumes', []) actual_volumes.extend([x for x in auto_volumes if x not in actual_volumes]) # Sort list to make unit tests more reliable actual_volumes.sort() if kwargs.get('port_bindings') is not None \ and (skip_translate is True or all(x not in skip_translate for x in ('port_bindings', 'expose', 'ports'))): # Make sure that all ports defined in "port_bindings" are included in # the "ports" param. auto_ports = list(kwargs['port_bindings']) if auto_ports: actual_ports = kwargs.setdefault('ports', []) actual_ports.extend([x for x in auto_ports if x not in actual_ports]) # Sort list to make unit tests more reliable actual_ports.sort() return kwargs, invalid, sorted(collisions)
def setUpClass(cls): # pylint: disable=arguments-differ ''' Set up all the webserver paths. Designed to be run once in a setUpClass function. ''' super(WebserverMixin, cls).setUpClass() cls.root_dir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP) cls.config_dir = os.path.join(cls.root_dir, 'config') cls.nginx_conf = os.path.join(cls.config_dir, 'nginx.conf') cls.uwsgi_conf = os.path.join(cls.config_dir, 'uwsgi.yml') cls.git_dir = os.path.join(cls.root_dir, 'git') cls.repo_dir = os.path.join(cls.git_dir, 'repos') cls.venv_dir = os.path.join(cls.root_dir, 'venv') cls.uwsgi_bin = os.path.join(cls.venv_dir, 'bin', 'uwsgi') cls.nginx_port = cls.uwsgi_port = get_unused_localhost_port() while cls.uwsgi_port == cls.nginx_port: # Ensure we don't hit a corner case in which two sucessive calls to # get_unused_localhost_port() return identical port numbers. cls.uwsgi_port = get_unused_localhost_port() cls.url = 'http://127.0.0.1:{port}/repo.git'.format( port=cls.nginx_port) cls.url_extra_repo = 'http://127.0.0.1:{port}/extra_repo.git'.format( port=cls.nginx_port) cls.ext_opts = {'url': cls.url, 'url_extra_repo': cls.url_extra_repo} # Add auth params if present (if so this will trigger the spawned # server to turn on HTTP basic auth). for credential_param in ('user', 'password'): if hasattr(cls, credential_param): cls.ext_opts[credential_param] = getattr(cls, credential_param) auth_enabled = hasattr(cls, 'username') and hasattr(cls, 'password') pillar = { 'git_pillar': { 'config_dir': cls.config_dir, 'git_dir': cls.git_dir, 'venv_dir': cls.venv_dir, 'root_dir': cls.root_dir, 'nginx_port': cls.nginx_port, 'uwsgi_port': cls.uwsgi_port, 'auth_enabled': auth_enabled } } # Different libexec dir for git backend on Debian-based systems git_core = '/usr/libexec/git-core' if not os.path.exists(git_core): git_core = '/usr/lib/git-core' if not os.path.exists(git_core): cls.tearDownClass() raise AssertionError( '{} not found. Either git is not installed, or the test ' 'class needs to be updated.'.format(git_core)) pillar['git_pillar']['git-http-backend'] = os.path.join( git_core, 'git-http-backend') try: if cls.prep_states_ran is False: ret = cls.cls_run_function('state.apply', mods='git_pillar.http', pillar=pillar) assert next(six.itervalues(ret))['result'] is True cls.prep_states_ran = True log.info('%s: States applied', cls.__name__) if cls.uwsgi_proc is not None: if not psutil.pid_exists(cls.uwsgi_proc.pid): log.warning( '%s: uWsgi started but appears to be dead now. Will try to restart it.', cls.__name__) cls.uwsgi_proc = None if cls.uwsgi_proc is None: cls.uwsgi_proc = start_daemon(cls.uwsgi_bin, cls.config_dir, cls.uwsgi_port, UwsgiDaemon) log.info('%s: %s started', cls.__name__, cls.uwsgi_bin) if cls.nginx_proc is not None: if not psutil.pid_exists(cls.nginx_proc.pid): log.warning( '%s: nginx started but appears to be dead now. Will try to restart it.', cls.__name__) cls.nginx_proc = None if cls.nginx_proc is None: cls.nginx_proc = start_daemon('nginx', cls.config_dir, cls.nginx_port, NginxDaemon) log.info('%s: nginx started', cls.__name__) except AssertionError: cls.tearDownClass() six.reraise(*sys.exc_info())
def _format_host(host, data, indent_level=1): """ Main highstate formatter. can be called recursively if a nested highstate contains other highstates (ie in an orchestration) """ host = salt.utils.data.decode(host) colors = salt.utils.color.get_colors(__opts__.get("color"), __opts__.get("color_theme")) tabular = __opts__.get("state_tabular", False) rcounts = {} rdurations = [] hcolor = colors["GREEN"] hstrs = [] nchanges = 0 strip_colors = __opts__.get("strip_colors", True) if isinstance(data, int) or isinstance(data, six.string_types): # Data in this format is from saltmod.function, # so it is always a 'change' nchanges = 1 hstrs.append(("{0} {1}{2[ENDC]}".format(hcolor, data, colors))) hcolor = colors["CYAN"] # Print the minion name in cyan if isinstance(data, list): # Errors have been detected, list them in RED! hcolor = colors["LIGHT_RED"] hstrs.append( (" {0}Data failed to compile:{1[ENDC]}".format(hcolor, colors))) for err in data: if strip_colors: err = salt.output.strip_esc_sequence( salt.utils.data.decode(err)) hstrs.append( ("{0}----------\n {1}{2[ENDC]}".format(hcolor, err, colors))) if isinstance(data, dict): # Verify that the needed data is present data_tmp = {} for tname, info in six.iteritems(data): if (isinstance(info, dict) and tname is not "changes" and info and "__run_num__" not in info): err = ("The State execution failed to record the order " "in which all states were executed. The state " "return missing data is:") hstrs.insert(0, pprint.pformat(info)) hstrs.insert(0, err) if isinstance(info, dict) and "result" in info: data_tmp[tname] = info data = data_tmp # Everything rendered as it should display the output for tname in sorted(data, key=lambda k: data[k].get("__run_num__", 0)): ret = data[tname] # Increment result counts rcounts.setdefault(ret["result"], 0) rcounts[ret["result"]] += 1 rduration = ret.get("duration", 0) try: rdurations.append(float(rduration)) except ValueError: rduration, _, _ = rduration.partition(" ms") try: rdurations.append(float(rduration)) except ValueError: log.error("Cannot parse a float from duration %s", ret.get("duration", 0)) tcolor = colors["GREEN"] if ret.get("name") in [ "state.orch", "state.orchestrate", "state.sls" ]: nested = output(ret["changes"]["return"], indent_level=indent_level + 1) ctext = re.sub("^", " " * 14 * indent_level, "\n" + nested, flags=re.MULTILINE) schanged = True nchanges += 1 else: schanged, ctext = _format_changes(ret["changes"]) nchanges += 1 if schanged else 0 # Skip this state if it was successful & diff output was requested if (__opts__.get("state_output_diff", False) and ret["result"] and not schanged): continue # Skip this state if state_verbose is False, the result is True and # there were no changes made if (not __opts__.get("state_verbose", False) and ret["result"] and not schanged): continue if schanged: tcolor = colors["CYAN"] if ret["result"] is False: hcolor = colors["RED"] tcolor = colors["RED"] if ret["result"] is None: hcolor = colors["LIGHT_YELLOW"] tcolor = colors["LIGHT_YELLOW"] state_output = __opts__.get("state_output", "full").lower() comps = tname.split("_|-") if state_output.endswith("_id"): # Swap in the ID for the name. Refs #35137 comps[2] = comps[1] if state_output.startswith("filter"): # By default, full data is shown for all types. However, return # data may be excluded by setting state_output_exclude to a # comma-separated list of True, False or None, or including the # same list with the exclude option on the command line. For # now, this option must include a comma. For example: # exclude=True, # The same functionality is also available for making return # data terse, instead of excluding it. cliargs = __opts__.get("arg", []) clikwargs = {} for item in cliargs: if isinstance(item, dict) and "__kwarg__" in item: clikwargs = item.copy() exclude = clikwargs.get( "exclude", __opts__.get("state_output_exclude", [])) if isinstance(exclude, six.string_types): exclude = six.text_type(exclude).split(",") terse = clikwargs.get("terse", __opts__.get("state_output_terse", [])) if isinstance(terse, six.string_types): terse = six.text_type(terse).split(",") if six.text_type(ret["result"]) in terse: msg = _format_terse(tcolor, comps, ret, colors, tabular) hstrs.append(msg) continue if six.text_type(ret["result"]) in exclude: continue elif any(( state_output.startswith("terse"), state_output.startswith("mixed") and ret["result"] is not False, # only non-error'd state_output.startswith("changes") and ret["result"] and not schanged, # non-error'd non-changed )): # Print this chunk in a terse way and continue in the loop msg = _format_terse(tcolor, comps, ret, colors, tabular) hstrs.append(msg) continue state_lines = [ "{tcolor}----------{colors[ENDC]}", " {tcolor} ID: {comps[1]}{colors[ENDC]}", " {tcolor}Function: {comps[0]}.{comps[3]}{colors[ENDC]}", " {tcolor} Result: {ret[result]!s}{colors[ENDC]}", " {tcolor} Comment: {comment}{colors[ENDC]}", ] if __opts__.get("state_output_profile", True) and "start_time" in ret: state_lines.extend([ " {tcolor} Started: {ret[start_time]!s}{colors[ENDC]}", " {tcolor}Duration: {ret[duration]!s}{colors[ENDC]}", ]) # This isn't the prettiest way of doing this, but it's readable. if comps[1] != comps[2]: state_lines.insert( 3, " {tcolor} Name: {comps[2]}{colors[ENDC]}") # be sure that ret['comment'] is utf-8 friendly try: if not isinstance(ret["comment"], six.text_type): ret["comment"] = six.text_type(ret["comment"]) except UnicodeDecodeError: # If we got here, we're on Python 2 and ret['comment'] somehow # contained a str type with unicode content. ret["comment"] = salt.utils.stringutils.to_unicode( ret["comment"]) try: comment = salt.utils.data.decode(ret["comment"]) comment = comment.strip().replace("\n", "\n" + " " * 14) except AttributeError: # Assume comment is a list try: comment = ret["comment"].join(" ").replace( "\n", "\n" + " " * 13) except AttributeError: # Comment isn't a list either, just convert to string comment = six.text_type(ret["comment"]) comment = comment.strip().replace("\n", "\n" + " " * 14) # If there is a data attribute, append it to the comment if "data" in ret: if isinstance(ret["data"], list): for item in ret["data"]: comment = "{0} {1}".format(comment, item) elif isinstance(ret["data"], dict): for key, value in ret["data"].items(): comment = "{0}\n\t\t{1}: {2}".format( comment, key, value) else: comment = "{0} {1}".format(comment, ret["data"]) for detail in ["start_time", "duration"]: ret.setdefault(detail, "") if ret["duration"] != "": ret["duration"] = "{0} ms".format(ret["duration"]) svars = { "tcolor": tcolor, "comps": comps, "ret": ret, "comment": salt.utils.data.decode(comment), # This nukes any trailing \n and indents the others. "colors": colors, } hstrs.extend([sline.format(**svars) for sline in state_lines]) changes = " Changes: " + ctext hstrs.append(("{0}{1}{2[ENDC]}".format(tcolor, changes, colors))) if "warnings" in ret: rcounts.setdefault("warnings", 0) rcounts["warnings"] += 1 wrapper = textwrap.TextWrapper(width=80, initial_indent=" " * 14, subsequent_indent=" " * 14) hstrs.append( " {colors[LIGHT_RED]} Warnings: {0}{colors[ENDC]}". format(wrapper.fill("\n".join(ret["warnings"])).lstrip(), colors=colors)) # Append result counts to end of output colorfmt = "{0}{1}{2[ENDC]}" rlabel = { True: "Succeeded", False: "Failed", None: "Not Run", "warnings": "Warnings", } count_max_len = max( [len(six.text_type(x)) for x in six.itervalues(rcounts)] or [0]) label_max_len = max([len(x) for x in six.itervalues(rlabel)] or [0]) line_max_len = label_max_len + count_max_len + 2 # +2 for ': ' hstrs.append( colorfmt.format( colors["CYAN"], "\nSummary for {0}\n{1}".format(host, "-" * line_max_len), colors, )) def _counts(label, count): return "{0}: {1:>{2}}".format(label, count, line_max_len - (len(label) + 2)) # Successful states changestats = [] if None in rcounts and rcounts.get(None, 0) > 0: # test=True states changestats.append( colorfmt.format( colors["LIGHT_YELLOW"], "unchanged={0}".format(rcounts.get(None, 0)), colors, )) if nchanges > 0: changestats.append( colorfmt.format(colors["GREEN"], "changed={0}".format(nchanges), colors)) if changestats: changestats = " ({0})".format(", ".join(changestats)) else: changestats = "" hstrs.append( colorfmt.format( colors["GREEN"], _counts(rlabel[True], rcounts.get(True, 0) + rcounts.get(None, 0)), colors, ) + changestats) # Failed states num_failed = rcounts.get(False, 0) hstrs.append( colorfmt.format( colors["RED"] if num_failed else colors["CYAN"], _counts(rlabel[False], num_failed), colors, )) num_warnings = rcounts.get("warnings", 0) if num_warnings: hstrs.append( colorfmt.format( colors["LIGHT_RED"], _counts(rlabel["warnings"], num_warnings), colors, )) totals = "{0}\nTotal states run: {1:>{2}}".format( "-" * line_max_len, sum(six.itervalues(rcounts)) - rcounts.get("warnings", 0), line_max_len - 7, ) hstrs.append(colorfmt.format(colors["CYAN"], totals, colors)) if __opts__.get("state_output_profile", True): sum_duration = sum(rdurations) duration_unit = "ms" # convert to seconds if duration is 1000ms or more if sum_duration > 999: sum_duration /= 1000 duration_unit = "s" total_duration = "Total run time: {0} {1}".format( "{0:.3f}".format(sum_duration).rjust(line_max_len - 5), duration_unit) hstrs.append( colorfmt.format(colors["CYAN"], total_duration, colors)) if strip_colors: host = salt.output.strip_esc_sequence(host) hstrs.insert(0, ("{0}{1}:{2[ENDC]}".format(hcolor, host, colors))) return "\n".join(hstrs), nchanges > 0
def installed(name, options=None): ''' Verify that the desired port is installed, and that it was compiled with the desired options. options Make sure that the desired non-default options are set .. warning:: Any build options not passed here assume the default values for the port, and are not just differences from the existing cached options from a previous ``make config``. Example usage: .. code-block:: yaml security/nmap: ports.installed: - options: - IPV6: off ''' ret = { 'name': name, 'changes': {}, 'result': True, 'comment': '{0} is already installed'.format(name) } try: current_options = __salt__['ports.showconfig'](name, default=False, dict_return=True) default_options = __salt__['ports.showconfig'](name, default=True, dict_return=True) # unpack the options from the top-level return dict if current_options: current_options = current_options[next(iter(current_options))] if default_options: default_options = default_options[next(iter(default_options))] except (SaltInvocationError, CommandExecutionError) as exc: ret['result'] = False ret['comment'] = ('Unable to get configuration for {0}. Port name may ' 'be invalid, or ports tree may need to be updated. ' 'Error message: {1}'.format(name, exc)) return ret options = _repack_options(options) if options is not None else {} desired_options = copy.deepcopy(default_options) desired_options.update(options) ports_pre = [ x['origin'] for x in six.itervalues(__salt__['pkg.list_pkgs'](with_origin=True)) ] if current_options == desired_options and name in ports_pre: # Port is installed as desired if options: ret['comment'] += ' ' + _build_option_string(options) return ret if not default_options: if options: ret['result'] = False ret['comment'] = ('{0} does not have any build options, yet ' 'options were specified'.format(name)) return ret else: if __opts__['test']: ret['result'] = None ret['comment'] = '{0} will be installed'.format(name) return ret else: bad_opts = [x for x in options if x not in default_options] if bad_opts: ret['result'] = False ret['comment'] = ('The following options are not available for ' '{0}: {1}'.format(name, ', '.join(bad_opts))) return ret if __opts__['test']: ret['result'] = None ret['comment'] = '{0} will be installed '.format(name) ret['comment'] += _build_option_string(options) return ret if options: if not __salt__['ports.config'](name, reset=True, **options): ret['result'] = False ret['comment'] = 'Unable to set options for {0}'.format(name) return ret else: __salt__['ports.rmconfig'](name) if _options_file_exists(name): ret['result'] = False ret['comment'] = 'Unable to clear options for {0}'.format(name) return ret ret['changes'] = __salt__['ports.install'](name) ports_post = [ x['origin'] for x in six.itervalues(__salt__['pkg.list_pkgs'](with_origin=True)) ] err = sys.modules[__salt__['test.ping'].__module__].__context__.pop( 'ports.install_error', None) if err or name not in ports_post: ret['result'] = False if ret['result']: ret['comment'] = 'Successfully installed {0}'.format(name) if default_options: ret['comment'] += ' ' + _build_option_string(options) else: ret['comment'] = 'Failed to install {0}'.format(name) if err: ret['comment'] += '. Error message:\n{0}'.format(err) return ret
def _post_processing(kwargs, skip_translate, invalid): ''' Additional container-specific post-translation processing ''' # Don't allow conflicting options to be set if kwargs.get('port_bindings') is not None \ and kwargs.get('publish_all_ports'): kwargs.pop('port_bindings') invalid['port_bindings'] = 'Cannot be used when publish_all_ports=True' if kwargs.get('hostname') is not None \ and kwargs.get('network_mode') == 'host': kwargs.pop('hostname') invalid['hostname'] = 'Cannot be used when network_mode=True' # Make sure volumes and ports are defined to match the binds and port_bindings if kwargs.get('binds') is not None \ and (skip_translate is True or all(x not in skip_translate for x in ('binds', 'volume', 'volumes'))): # Make sure that all volumes defined in "binds" are included in the # "volumes" param. auto_volumes = [] if isinstance(kwargs['binds'], dict): for val in six.itervalues(kwargs['binds']): try: if 'bind' in val: auto_volumes.append(val['bind']) except TypeError: continue else: if isinstance(kwargs['binds'], list): auto_volume_defs = kwargs['binds'] else: try: auto_volume_defs = helpers.split(kwargs['binds']) except AttributeError: auto_volume_defs = [] for val in auto_volume_defs: try: auto_volumes.append(helpers.split(val, ':')[1]) except IndexError: continue if auto_volumes: actual_volumes = kwargs.setdefault('volumes', []) actual_volumes.extend( [x for x in auto_volumes if x not in actual_volumes]) # Sort list to make unit tests more reliable actual_volumes.sort() if kwargs.get('port_bindings') is not None \ and (skip_translate is True or all(x not in skip_translate for x in ('port_bindings', 'expose', 'ports'))): # Make sure that all ports defined in "port_bindings" are included in # the "ports" param. auto_ports = list(kwargs['port_bindings']) if auto_ports: actual_ports = [] # Sort list to make unit tests more reliable for port in auto_ports: if port in actual_ports: continue if isinstance(port, six.integer_types): actual_ports.append((port, 'tcp')) else: port, proto = port.split('/') actual_ports.append((int(port), proto)) actual_ports.sort() actual_ports = [ port if proto == 'tcp' else '{}/{}'.format(port, proto) for (port, proto) in actual_ports ] kwargs.setdefault('ports', actual_ports)
def get_docker(interfaces=None, cidrs=None, with_container_id=False): """ .. versionchanged:: 2017.7.8,2018.3.3 When :conf_minion:`docker.update_mine` is set to ``False`` for a given minion, no mine data will be populated for that minion, and thus none will be returned for it. .. versionchanged:: 2019.2.0 :conf_minion:`docker.update_mine` now defaults to ``False`` Get all mine data for :py:func:`docker.ps <salt.modules.dockermod.ps_>` and run an aggregation routine. The ``interfaces`` parameter allows for specifying the network interfaces from which to select IP addresses. The ``cidrs`` parameter allows for specifying a list of subnets which the IP address must match. with_container_id Boolean, to expose container_id in the list of results .. versionadded:: 2015.8.2 CLI Example: .. code-block:: bash salt '*' mine.get_docker salt '*' mine.get_docker interfaces='eth0' salt '*' mine.get_docker interfaces='["eth0", "eth1"]' salt '*' mine.get_docker cidrs='107.170.147.0/24' salt '*' mine.get_docker cidrs='["107.170.147.0/24", "172.17.42.0/24"]' salt '*' mine.get_docker interfaces='["eth0", "eth1"]' cidrs='["107.170.147.0/24", "172.17.42.0/24"]' """ # Enforce that interface and cidr are lists if interfaces: interface_ = [] interface_.extend( interfaces if isinstance(interfaces, list) else [interfaces]) interfaces = interface_ if cidrs: cidr_ = [] cidr_.extend(cidrs if isinstance(cidrs, list) else [cidrs]) cidrs = cidr_ # Get docker info cmd = "docker.ps" docker_hosts = get("*", cmd) proxy_lists = {} # Process docker info for containers in six.itervalues(docker_hosts): host = containers.pop("host") host_ips = [] # Prepare host_ips list if not interfaces: for info in six.itervalues(host["interfaces"]): if "inet" in info: for ip_ in info["inet"]: host_ips.append(ip_["address"]) else: for interface in interfaces: if interface in host["interfaces"]: if "inet" in host["interfaces"][interface]: for item in host["interfaces"][interface]["inet"]: host_ips.append(item["address"]) host_ips = list(set(host_ips)) # Filter out ips from host_ips with cidrs if cidrs: good_ips = [] for cidr in cidrs: for ip_ in host_ips: if salt.utils.network.in_subnet(cidr, [ip_]): good_ips.append(ip_) host_ips = list(set(good_ips)) # Process each container for container in six.itervalues(containers): container_id = container["Info"]["Id"] if container["Image"] not in proxy_lists: proxy_lists[container["Image"]] = {} for dock_port in container["Ports"]: # IP exists only if port is exposed ip_address = dock_port.get("IP") # If port is 0.0.0.0, then we must get the docker host IP if ip_address == "0.0.0.0": for ip_ in host_ips: containers = ( proxy_lists[container["Image"]].setdefault( "ipv4", {}).setdefault(dock_port["PrivatePort"], [])) container_network_footprint = "{0}:{1}".format( ip_, dock_port["PublicPort"]) if with_container_id: value = (container_network_footprint, container_id) else: value = container_network_footprint if value not in containers: containers.append(value) elif ip_address: containers = (proxy_lists[container["Image"]].setdefault( "ipv4", {}).setdefault(dock_port["PrivatePort"], [])) container_network_footprint = "{0}:{1}".format( dock_port["IP"], dock_port["PublicPort"]) if with_container_id: value = (container_network_footprint, container_id) else: value = container_network_footprint if value not in containers: containers.append(value) return proxy_lists
def playbooks(name, rundir=None, git_repo=None, git_kwargs=None, ansible_kwargs=None): ''' Run Ansible Playbooks :param name: path to playbook. This can be relative to rundir or the git repo :param rundir: location to run ansible-playbook from. :param git_repo: git repository to clone for ansible playbooks. This is cloned using the `git.latest` state, and is cloned to the `rundir` if specified, otherwise it is clone to the `cache_dir` :param git_kwargs: extra kwargs to pass to `git.latest` state module besides the `name` and `target` :param ansible_kwargs: extra kwargs to pass to `ansible.playbooks` execution module besides the `name` and `target` :return: Ansible playbook output. .. code-block:: yaml run nginx install: ansible.playbooks: - name: install.yml - git_repo: git://github.com/gituser/playbook.git - git_kwargs: rev: master ''' ret = { 'result': False, 'changes': {}, 'comment': 'Running playbook {0}'.format(name), 'name': name, } if git_repo: if not isinstance(rundir, six.text_type) or not os.path.isdir(rundir): rundir = _client()._extrn_path(git_repo, 'base') log.trace('rundir set to %s', rundir) if not isinstance(git_kwargs, dict): log.debug('Setting git_kwargs to empty dict: %s', git_kwargs) git_kwargs = {} __states__['git.latest'](name=git_repo, target=rundir, **git_kwargs) if not isinstance(ansible_kwargs, dict): log.debug('Setting ansible_kwargs to empty dict: %s', ansible_kwargs) ansible_kwargs = {} checks = __salt__['ansible.playbooks'](name, rundir=rundir, check=True, diff=True, **ansible_kwargs) if all(not check['changed'] for check in six.itervalues(checks['stats'])): ret['comment'] = 'No changes to be made from playbook {0}'.format(name) ret['result'] = True elif __opts__['test']: ret['comment'] = 'Changes will be made from playbook {0}'.format(name) ret['result'] = None ret['changes'] = _changes(checks) else: results = __salt__['ansible.playbooks'](name, rundir=rundir, diff=True, **ansible_kwargs) ret['comment'] = 'Changes were made by playbook {0}'.format(name) ret['changes'] = _changes(results) ret['result'] = all(not check['failures'] and not check['unreachable'] for check in six.itervalues(checks['stats'])) return ret
def test_cp_testfile(self): ''' test salt-cp ''' minions = [] for line in self.run_salt('--out yaml "*" test.ping'): if not line: continue data = yaml.load(line) minions.extend(data.keys()) # pylint: disable=incompatible-py3-code # since we're extending a list, the Py3 dict_keys view will behave # as expected. self.assertNotEqual(minions, []) testfile = os.path.abspath( os.path.join( os.path.dirname(os.path.dirname(__file__)), 'files', 'file', 'base', 'testfile' ) ) with salt.utils.fopen(testfile, 'r') as fh_: testfile_contents = fh_.read() for idx, minion in enumerate(minions): ret = self.run_salt( '--out yaml {0} file.directory_exists {1}'.format( pipes.quote(minion), integration.TMP ) ) data = yaml.load('\n'.join(ret)) if data[minion] is False: ret = self.run_salt( '--out yaml {0} file.makedirs {1}'.format( pipes.quote(minion), integration.TMP ) ) data = yaml.load('\n'.join(ret)) self.assertTrue(data[minion]) minion_testfile = os.path.join( integration.TMP, 'cp_{0}_testfile'.format(idx) ) ret = self.run_cp('--out pprint {0} {1} {2}'.format( pipes.quote(minion), pipes.quote(testfile), pipes.quote(minion_testfile) )) data = yaml.load('\n'.join(ret)) for part in six.itervalues(data): self.assertTrue(part[minion_testfile]) ret = self.run_salt( '--out yaml {0} file.file_exists {1}'.format( pipes.quote(minion), pipes.quote(minion_testfile) ) ) data = yaml.load('\n'.join(ret)) self.assertTrue(data[minion]) ret = self.run_salt( '--out yaml {0} file.contains {1} {2}'.format( pipes.quote(minion), pipes.quote(minion_testfile), pipes.quote(testfile_contents) ) ) data = yaml.load('\n'.join(ret)) self.assertTrue(data[minion]) ret = self.run_salt( '--out yaml {0} file.remove {1}'.format( pipes.quote(minion), pipes.quote(minion_testfile) ) ) data = yaml.load('\n'.join(ret)) self.assertTrue(data[minion])
def _format_host(host, data): colors = salt.utils.get_colors(__opts__.get('color'), __opts__.get('color_theme')) tabular = __opts__.get('state_tabular', False) rcounts = {} rdurations = [] hcolor = colors['GREEN'] hstrs = [] nchanges = 0 strip_colors = __opts__.get('strip_colors', True) if isinstance(data, int) or isinstance(data, str): # Data in this format is from saltmod.function, # so it is always a 'change' nchanges = 1 hstrs.append((u'{0} {1}{2[ENDC]}'.format(hcolor, data, colors))) hcolor = colors['CYAN'] # Print the minion name in cyan if isinstance(data, list): # Errors have been detected, list them in RED! hcolor = colors['LIGHT_RED'] hstrs.append( (u' {0}Data failed to compile:{1[ENDC]}'.format(hcolor, colors))) for err in data: if strip_colors: err = salt.output.strip_esc_sequence(err) hstrs.append((u'{0}----------\n {1}{2[ENDC]}'.format( hcolor, err, colors))) if isinstance(data, dict): # Verify that the needed data is present for tname, info in six.iteritems(data): if isinstance(info, dict) and '__run_num__' not in info: err = (u'The State execution failed to record the order ' 'in which all states were executed. The state ' 'return missing data is:') hstrs.insert(0, pprint.pformat(info)) hstrs.insert(0, err) # Everything rendered as it should display the output for tname in sorted(data, key=lambda k: data[k].get('__run_num__', 0)): ret = data[tname] # Increment result counts rcounts.setdefault(ret['result'], 0) rcounts[ret['result']] += 1 rdurations.append(ret.get('duration', 0)) tcolor = colors['GREEN'] schanged, ctext = _format_changes(ret['changes']) nchanges += 1 if schanged else 0 # Skip this state if it was successful & diff output was requested if __opts__.get('state_output_diff', False) and \ ret['result'] and not schanged: continue # Skip this state if state_verbose is False, the result is True and # there were no changes made if not __opts__.get('state_verbose', False) and \ ret['result'] and not schanged: continue if schanged: tcolor = colors['CYAN'] if ret['result'] is False: hcolor = colors['RED'] tcolor = colors['RED'] if ret['result'] is None: hcolor = colors['LIGHT_YELLOW'] tcolor = colors['LIGHT_YELLOW'] comps = tname.split('_|-') if __opts__.get('state_output', 'full').lower() == 'filter': # By default, full data is shown for all types. However, return # data may be excluded by setting state_output_exclude to a # comma-separated list of True, False or None, or including the # same list with the exclude option on the command line. For # now, this option must include a comma. For example: # exclude=True, # The same functionality is also available for making return # data terse, instead of excluding it. cliargs = __opts__.get('arg', []) clikwargs = {} for item in cliargs: if isinstance(item, dict) and '__kwarg__' in item: clikwargs = item.copy() exclude = clikwargs.get( 'exclude', __opts__.get('state_output_exclude', [])) if isinstance(exclude, six.string_types): exclude = str(exclude).split(',') terse = clikwargs.get('terse', __opts__.get('state_output_terse', [])) if isinstance(terse, six.string_types): terse = str(terse).split(',') if str(ret['result']) in terse: msg = _format_terse(tcolor, comps, ret, colors, tabular) hstrs.append(msg) continue if str(ret['result']) in exclude: continue elif __opts__.get('state_output', 'full').lower() == 'terse': # Print this chunk in a terse way and continue in the # loop msg = _format_terse(tcolor, comps, ret, colors, tabular) hstrs.append(msg) continue elif __opts__.get('state_output', 'full').lower() == 'mixed': # Print terse unless it failed if ret['result'] is not False: msg = _format_terse(tcolor, comps, ret, colors, tabular) hstrs.append(msg) continue elif __opts__.get('state_output', 'full').lower() == 'changes': # Print terse if no error and no changes, otherwise, be # verbose if ret['result'] and not schanged: msg = _format_terse(tcolor, comps, ret, colors, tabular) hstrs.append(msg) continue state_lines = [ u'{tcolor}----------{colors[ENDC]}', u' {tcolor} ID: {comps[1]}{colors[ENDC]}', u' {tcolor}Function: {comps[0]}.{comps[3]}{colors[ENDC]}', u' {tcolor} Result: {ret[result]!s}{colors[ENDC]}', u' {tcolor} Comment: {comment}{colors[ENDC]}', ] if __opts__.get('state_output_profile', True): state_lines.extend([ u' {tcolor} Started: {ret[start_time]!s}{colors[ENDC]}', u' {tcolor}Duration: {ret[duration]!s}{colors[ENDC]}', ]) # This isn't the prettiest way of doing this, but it's readable. if comps[1] != comps[2]: state_lines.insert( 3, u' {tcolor} Name: {comps[2]}{colors[ENDC]}') # be sure that ret['comment'] is utf-8 friendly try: if not isinstance(ret['comment'], six.text_type): ret['comment'] = ret['comment'].decode('utf-8') except UnicodeDecodeError: # but try to continue on errors pass try: comment = salt.utils.locales.sdecode(ret['comment']) comment = comment.strip().replace(u'\n', u'\n' + u' ' * 14) except AttributeError: # Assume comment is a list try: comment = ret['comment'].join(' ').replace( u'\n', u'\n' + u' ' * 13) except AttributeError: # Comment isn't a list either, just convert to string comment = str(ret['comment']) comment = comment.strip().replace(u'\n', u'\n' + u' ' * 14) # If there is a data attribute, append it to the comment if 'data' in ret: if isinstance(ret['data'], list): for item in ret['data']: comment = '{0} {1}'.format(comment, item) elif isinstance(ret['data'], dict): for key, value in ret['data'].items(): comment = '{0}\n\t\t{1}: {2}'.format( comment, key, value) else: comment = '{0} {1}'.format(comment, ret['data']) for detail in ['start_time', 'duration']: ret.setdefault(detail, u'') if ret['duration'] != '': ret['duration'] = u'{0} ms'.format(ret['duration']) svars = { 'tcolor': tcolor, 'comps': comps, 'ret': ret, 'comment': comment, # This nukes any trailing \n and indents the others. 'colors': colors } hstrs.extend([sline.format(**svars) for sline in state_lines]) changes = u' Changes: ' + ctext hstrs.append((u'{0}{1}{2[ENDC]}'.format(tcolor, changes, colors))) if 'warnings' in ret: rcounts.setdefault('warnings', 0) rcounts['warnings'] += 1 wrapper = textwrap.TextWrapper(width=80, initial_indent=u' ' * 14, subsequent_indent=u' ' * 14) hstrs.append( u' {colors[LIGHT_RED]} Warnings: {0}{colors[ENDC]}'. format(wrapper.fill('\n'.join(ret['warnings'])).lstrip(), colors=colors)) # Append result counts to end of output colorfmt = u'{0}{1}{2[ENDC]}' rlabel = { True: u'Succeeded', False: u'Failed', None: u'Not Run', 'warnings': u'Warnings' } count_max_len = max([len(str(x)) for x in six.itervalues(rcounts)] or [0]) label_max_len = max([len(x) for x in six.itervalues(rlabel)] or [0]) line_max_len = label_max_len + count_max_len + 2 # +2 for ': ' hstrs.append( colorfmt.format( colors['CYAN'], u'\nSummary for {0}\n{1}'.format(host, '-' * line_max_len), colors)) def _counts(label, count): return u'{0}: {1:>{2}}'.format(label, count, line_max_len - (len(label) + 2)) # Successful states changestats = [] if None in rcounts and rcounts.get(None, 0) > 0: # test=True states changestats.append( colorfmt.format(colors['LIGHT_YELLOW'], u'unchanged={0}'.format(rcounts.get(None, 0)), colors)) if nchanges > 0: changestats.append( colorfmt.format(colors['GREEN'], u'changed={0}'.format(nchanges), colors)) if changestats: changestats = u' ({0})'.format(', '.join(changestats)) else: changestats = u'' hstrs.append( colorfmt.format( colors['GREEN'], _counts(rlabel[True], rcounts.get(True, 0) + rcounts.get(None, 0)), colors) + changestats) # Failed states num_failed = rcounts.get(False, 0) hstrs.append( colorfmt.format(colors['RED'] if num_failed else colors['CYAN'], _counts(rlabel[False], num_failed), colors)) num_warnings = rcounts.get('warnings', 0) if num_warnings: hstrs.append( colorfmt.format(colors['LIGHT_RED'], _counts(rlabel['warnings'], num_warnings), colors)) totals = u'{0}\nTotal states run: {1:>{2}}'.format( '-' * line_max_len, sum(six.itervalues(rcounts)) - rcounts.get('warnings', 0), line_max_len - 7) hstrs.append(colorfmt.format(colors['CYAN'], totals, colors)) sum_duration = sum(rdurations) duration_unit = 'ms' # convert to seconds if duration is 1000ms or more if sum_duration > 999: sum_duration /= 1000 duration_unit = 's' total_duration = u'Total run time: {0} {1}'.format( '{:.3f}'.format(sum_duration).rjust(line_max_len - 5), duration_unit) hstrs.append(colorfmt.format(colors['CYAN'], total_duration, colors)) if strip_colors: host = salt.output.strip_esc_sequence(host) hstrs.insert(0, (u'{0}{1}:{2[ENDC]}'.format(hcolor, host, colors))) return u'\n'.join(hstrs), nchanges > 0
def __iter__(self): return (item[0] for item in six.itervalues(self._data))
def parallel_runners(name, runners): ''' Executes multiple runner modules on the master in parallel. .. versionadded:: 2017.x.0 (Nitrogen) A separate thread is spawned for each runner. This state is intended to be used with the orchestrate runner in place of the ``saltmod.runner`` state when different tasks should be run in parallel. In general, Salt states are not safe when used concurrently, so ensure that they are used in a safe way (e.g. by only targeting separate minions in parallel tasks). name: name identifying this state. The name is provided as part of the output, but not used for anything else. runners: list of runners that should be run in parallel. Each element of the list has to be a dictionary. This dictionary's name entry stores the name of the runner function that shall be invoked. The optional kwarg entry stores a dictionary of named arguments that are passed to the runner function. .. code-block:: yaml parallel-state: salt.parallel_runners: - runners: my_runner_1: - name: state.orchestrate - kwarg: mods: orchestrate_state_1 my_runner_2: - name: state.orchestrate - kwarg: mods: orchestrate_state_2 ''' # For the sake of consistency, we treat a single string in the same way as # a key without a value. This allows something like # salt.parallel_runners: # - runners: # state.orchestrate # Obviously, this will only work if the specified runner does not need any # arguments. if isinstance(runners, six.string_types): runners = {runners: [{name: runners}]} # If the runners argument is not a string, it must be a dict. Everything # else is considered an error. if not isinstance(runners, dict): return { 'name': name, 'result': False, 'changes': {}, 'comment': 'The runners parameter must be a string or dict.' } # The configuration for each runner is given as a list of key-value pairs. # This is not very useful for what we want to do, but it is the typical # style used in Salt. For further processing, we convert each of these # lists to a dict. This also makes it easier to check whether a name has # been specified explicitly. for runner_id, runner_config in six.iteritems(runners): if runner_config is None: runner_config = {} else: runner_config = salt.utils.data.repack_dictlist(runner_config) if 'name' not in runner_config: runner_config['name'] = runner_id runners[runner_id] = runner_config try: jid = __orchestration_jid__ except NameError: log.debug( 'Unable to fire args event due to missing __orchestration_jid__') jid = None def call_runner(runner_config): return __salt__['saltutil.runner'](runner_config['name'], __orchestration_jid__=jid, __env__=__env__, full_return=True, **(runner_config.get('kwarg', {}))) try: outputs = _parallel_map(call_runner, list(six.itervalues(runners))) except salt.exceptions.SaltException as exc: return { 'name': name, 'result': False, 'success': False, 'changes': {}, 'comment': 'One of the runners raised an exception: {0}'.format(exc) } # We bundle the results of the runners with the IDs of the runners so that # we can easily identify which output belongs to which runner. At the same # time we exctract the actual return value of the runner (saltutil.runner # adds some extra information that is not interesting to us). outputs = { runner_id: out['return'] for runner_id, out in six.moves.zip(six.iterkeys(runners), outputs) } # If each of the runners returned its output in the format compatible with # the 'highstate' outputter, we can leverage this fact when merging the # outputs. highstate_output = all([ out.get('outputter', '') == 'highstate' and 'data' in out for out in six.itervalues(outputs) ]) # The following helper function is used to extract changes from highstate # output. def extract_changes(obj): if not isinstance(obj, dict): return {} elif 'changes' in obj: if (isinstance(obj['changes'], dict) and obj['changes'].get('out', '') == 'highstate' and 'ret' in obj['changes']): return obj['changes']['ret'] else: return obj['changes'] else: found_changes = {} for key, value in six.iteritems(obj): change = extract_changes(value) if change: found_changes[key] = change return found_changes if highstate_output: failed_runners = [ runner_id for runner_id, out in six.iteritems(outputs) if out['data'].get('retcode', 0) != 0 ] all_successful = not failed_runners if all_successful: comment = 'All runner functions executed successfully.' else: runner_comments = [ 'Runner {0} failed with return value:\n{1}'.format( runner_id, salt.output.out_format(outputs[runner_id], 'nested', __opts__, nested_indent=2)) for runner_id in failed_runners ] comment = '\n'.join(runner_comments) changes = {} for runner_id, out in six.iteritems(outputs): runner_changes = extract_changes(out['data']) if runner_changes: changes[runner_id] = runner_changes else: failed_runners = [ runner_id for runner_id, out in six.iteritems(outputs) if out.get('exit_code', 0) != 0 ] all_successful = not failed_runners if all_successful: comment = 'All runner functions executed successfully.' else: if len(failed_runners) == 1: comment = 'Runner {0} failed.'.format(failed_runners[0]) else: comment =\ 'Runners {0} failed.'.format(', '.join(failed_runners)) changes = { 'ret': {runner_id: out for runner_id, out in six.iteritems(outputs)} } ret = { 'name': name, 'result': all_successful, 'changes': changes, 'comment': comment } # The 'runner' function includes out['jid'] as '__jid__' in the returned # dict, but we cannot do this here because we have more than one JID if # we have more than one runner. return ret
def setUpClass(cls): # pylint: disable=arguments-differ super(SSHDMixin, cls).setUpClass() try: log.info("%s: prep_server()", cls.__name__) cls.sshd_bin = salt.utils.path.which("sshd") cls.sshd_config_dir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP) cls.sshd_config = os.path.join(cls.sshd_config_dir, "sshd_config") cls.sshd_port = get_unused_localhost_port() cls.url = "ssh://{username}@127.0.0.1:{port}/~/repo.git".format( username=cls.username, port=cls.sshd_port ) cls.url_extra_repo = "ssh://{username}@127.0.0.1:{port}/~/extra_repo.git".format( username=cls.username, port=cls.sshd_port ) home = "/root/.ssh" cls.ext_opts = { "url": cls.url, "url_extra_repo": cls.url_extra_repo, "privkey_nopass": os.path.join(home, cls.id_rsa_nopass), "pubkey_nopass": os.path.join(home, cls.id_rsa_nopass + ".pub"), "privkey_withpass": os.path.join(home, cls.id_rsa_withpass), "pubkey_withpass": os.path.join(home, cls.id_rsa_withpass + ".pub"), "passphrase": cls.passphrase, } if cls.prep_states_ran is False: ret = cls.cls_run_function( "state.apply", mods="git_pillar.ssh", pillar={ "git_pillar": { "git_ssh": cls.git_ssh, "id_rsa_nopass": cls.id_rsa_nopass, "id_rsa_withpass": cls.id_rsa_withpass, "sshd_bin": cls.sshd_bin, "sshd_port": cls.sshd_port, "sshd_config_dir": cls.sshd_config_dir, "master_user": cls.user, "user": cls.username, } }, ) assert next(six.itervalues(ret))["result"] is True cls.prep_states_ran = True log.info("%s: States applied", cls.__name__) if cls.sshd_proc is not None: if not psutil.pid_exists(cls.sshd_proc.pid): log.info( "%s: sshd started but appears to be dead now. Will try to restart it.", cls.__name__, ) cls.sshd_proc = None if cls.sshd_proc is None: cls.sshd_proc = start_daemon( cls.sshd_bin, cls.sshd_config_dir, cls.sshd_port, SshdDaemon ) log.info("%s: sshd started", cls.__name__) except AssertionError: cls.tearDownClass() six.reraise(*sys.exc_info()) if cls.known_hosts_setup is False: known_hosts_ret = cls.cls_run_function( "ssh.set_known_host", user=cls.user, hostname="127.0.0.1", port=cls.sshd_port, enc="ssh-rsa", fingerprint="fd:6f:7f:5d:06:6b:f2:06:0d:26:93:9e:5a:b5:19:46", hash_known_hosts=False, fingerprint_hash_type="md5", ) if "error" in known_hosts_ret: cls.tearDownClass() raise AssertionError( "Failed to add key to {0} user's known_hosts " "file: {1}".format( cls.master_opts["user"], known_hosts_ret["error"] ) ) cls.known_hosts_setup = True
def state(name, tgt, ssh=False, tgt_type='glob', expr_form=None, ret='', highstate=None, sls=None, top=None, saltenv=None, test=False, pillar=None, expect_minions=True, fail_minions=None, allow_fail=0, concurrent=False, timeout=None, batch=None, queue=False, subset=None, orchestration_jid=None): ''' Invoke a state run on a given target name An arbitrary name used to track the state execution tgt The target specification for the state run. .. versionadded: 2016.11.0 Masterless support: When running on a masterless minion, the ``tgt`` is ignored and will always be the local minion. tgt_type The target type to resolve, defaults to ``glob`` expr_form .. deprecated:: Nitrogen Use tgt_type instead ret Optionally set a single or a list of returners to use highstate Defaults to None, if set to True the target systems will ignore any sls references specified in the sls option and call state.highstate on the targeted minions top Should be the name of a top file. If set state.top is called with this top file instead of state.sls. sls A group of sls files to execute. This can be defined as a single string containing a single sls file, or a list of sls files test Pass ``test=true`` through to the state function pillar Pass the ``pillar`` kwarg through to the state function saltenv The default salt environment to pull sls files from ssh Set to `True` to use the ssh client instead of the standard salt client roster In the event of using salt-ssh, a roster system can be set expect_minions An optional boolean for failing if some minions do not respond fail_minions An optional list of targeted minions where failure is an option allow_fail Pass in the number of minions to allow for failure before setting the result of the execution to False concurrent Allow multiple state runs to occur at once. WARNING: This flag is potentially dangerous. It is designed for use when multiple state runs can safely be run at the same Do not use this flag for performance optimization. queue Pass ``queue=true`` through to the state function batch Execute the command :ref:`in batches <targeting-batch>`. E.g.: ``10%``. .. versionadded:: 2016.3.0 subset Number of minions from the targeted set to randomly use .. versionadded:: Nitrogen Examples: Run a list of sls files via :py:func:`state.sls <salt.state.sls>` on target minions: .. code-block:: yaml webservers: salt.state: - tgt: 'web*' - sls: - apache - django - core - saltenv: prod Run a full :py:func:`state.highstate <salt.state.highstate>` on target mininons. .. code-block:: yaml databases: salt.state: - tgt: role:database - tgt_type: grain - highstate: True ''' cmd_kw = {'arg': [], 'kwarg': {}, 'ret': ret, 'timeout': timeout} state_ret = {'name': name, 'changes': {}, 'comment': '', 'result': True} try: allow_fail = int(allow_fail) except ValueError: state_ret['result'] = False state_ret[ 'comment'] = 'Passed invalid value for \'allow_fail\', must be an int' return state_ret # remember to remove the expr_form argument from this function when # performing the cleanup on this deprecation. if expr_form is not None: salt.utils.warn_until( 'Fluorine', 'the target type should be passed using the \'tgt_type\' ' 'argument instead of \'expr_form\'. Support for using ' '\'expr_form\' will be removed in Salt Fluorine.') tgt_type = expr_form cmd_kw['tgt_type'] = tgt_type cmd_kw['ssh'] = ssh cmd_kw['expect_minions'] = expect_minions if highstate: fun = 'state.highstate' elif top: fun = 'state.top' cmd_kw['arg'].append(top) elif sls: fun = 'state.sls' if isinstance(sls, list): sls = ','.join(sls) cmd_kw['arg'].append(sls) else: state_ret[ 'comment'] = 'No highstate or sls specified, no execution made' state_ret['result'] = False return state_ret if test or __opts__.get('test'): cmd_kw['kwarg']['test'] = True if pillar: cmd_kw['kwarg']['pillar'] = pillar if __opts__.get('pillarenv'): cmd_kw['kwarg']['pillarenv'] = __opts__['pillarenv'] cmd_kw['kwarg']['saltenv'] = __env__ cmd_kw['kwarg']['queue'] = queue if isinstance(concurrent, bool): cmd_kw['kwarg']['concurrent'] = concurrent else: state_ret['comment'] = ( 'Must pass in boolean for value of \'concurrent\'') state_ret['result'] = False return state_ret if batch is not None: cmd_kw['batch'] = str(batch) if subset is not None: cmd_kw['subset'] = subset masterless = __opts__['__role'] == 'minion' and \ __opts__['file_client'] == 'local' if not masterless: _fire_args({'type': 'state', 'tgt': tgt, 'name': name, 'args': cmd_kw}) cmd_ret = __salt__['saltutil.cmd'](tgt, fun, **cmd_kw) else: if top: cmd_kw['topfn'] = ''.join(cmd_kw.pop('arg')) elif sls: cmd_kw['mods'] = cmd_kw.pop('arg') cmd_kw.update(cmd_kw.pop('kwarg')) tmp_ret = __salt__[fun](**cmd_kw) cmd_ret = { __opts__['id']: { 'ret': tmp_ret, 'out': tmp_ret.get('out', 'highstate') if isinstance(tmp_ret, dict) else 'highstate' } } try: state_ret['__jid__'] = cmd_ret[next(iter(cmd_ret))]['jid'] except (StopIteration, KeyError): pass changes = {} fail = set() failures = {} no_change = set() if fail_minions is None: fail_minions = () elif isinstance(fail_minions, string_types): fail_minions = [minion.strip() for minion in fail_minions.split(',')] elif not isinstance(fail_minions, list): state_ret.setdefault('warnings', []).append( '\'fail_minions\' needs to be a list or a comma separated ' 'string. Ignored.') fail_minions = () for minion, mdata in six.iteritems(cmd_ret): if mdata.get('out', '') != 'highstate': log.warning('Output from salt state not highstate') m_ret = False if 'return' in mdata and 'ret' not in mdata: mdata['ret'] = mdata.pop('return') m_state = True if mdata.get('failed', False): m_state = False else: try: m_ret = mdata['ret'] except KeyError: m_state = False if m_state: m_state = salt.utils.check_state_result(m_ret) if not m_state: if minion not in fail_minions: fail.add(minion) failures[minion] = m_ret or 'Minion did not respond' continue try: for state_item in six.itervalues(m_ret): if 'changes' in state_item and state_item['changes']: changes[minion] = m_ret break else: no_change.add(minion) except AttributeError: log.error("m_ret did not have changes %s %s", type(m_ret), m_ret) no_change.add(minion) if changes: state_ret['changes'] = {'out': 'highstate', 'ret': changes} if len(fail) > allow_fail: state_ret['result'] = False state_ret['comment'] = 'Run failed on minions: {0}'.format( ', '.join(fail)) else: state_ret['comment'] = 'States ran successfully.' if changes: state_ret['comment'] += ' Updating {0}.'.format(', '.join(changes)) if no_change: state_ret['comment'] += ' No changes made to {0}.'.format( ', '.join(no_change)) if failures: state_ret['comment'] += '\nFailures:\n' for minion, failure in six.iteritems(failures): state_ret['comment'] += '\n'.join((' ' * 4 + l) for l in salt.output.out_format( { minion: failure }, 'highstate', __opts__, ).splitlines()) state_ret['comment'] += '\n' if test or __opts__.get('test'): if state_ret['changes'] and state_ret['result'] is True: # Test mode with changes is the only case where result should ever be none state_ret['result'] = None return state_ret
def destroy(name, call=None): """ This function irreversibly destroys a virtual machine on the cloud provider. Before doing so, it should fire an event on the Salt event bus. The tag for this event is `salt/cloud/<vm name>/destroying`. Once the virtual machine has been destroyed, another event is fired. The tag for that event is `salt/cloud/<vm name>/destroyed`. Dependencies: list_nodes @param name: @type name: str @param call: @type call: @return: True if all went well, otherwise an error message @rtype: bool|str """ log.info("Attempting to delete instance %s", name) if call == "function": raise SaltCloudSystemExit( "The destroy action must be called with -d, --destroy, " "-a or --action.") found = [] providers = __opts__.get("providers", {}) providers_to_check = [ _f for _f in [cfg.get("libvirt") for cfg in six.itervalues(providers)] if _f ] for provider in providers_to_check: conn = __get_conn(provider["url"]) log.info("looking at %s", provider["url"]) try: domain = conn.lookupByName(name) found.append({"domain": domain, "conn": conn}) except libvirtError: pass if not found: return "{0} doesn't exist and can't be deleted".format(name) if len(found) > 1: return "{0} doesn't identify a unique machine leaving things".format( name) __utils__["cloud.fire_event"]( "event", "destroying instance", "salt/cloud/{0}/destroying".format(name), args={ "name": name }, sock_dir=__opts__["sock_dir"], transport=__opts__["transport"], ) destroy_domain(found[0]["conn"], found[0]["domain"]) __utils__["cloud.fire_event"]( "event", "destroyed instance", "salt/cloud/{0}/destroyed".format(name), args={ "name": name }, sock_dir=__opts__["sock_dir"], transport=__opts__["transport"], )
def state(name, tgt, ssh=False, tgt_type=None, expr_form=None, ret='', highstate=None, sls=None, top=None, env=None, test=False, pillar=None, expect_minions=False, fail_minions=None, allow_fail=0, concurrent=False, timeout=None, batch=None): ''' Invoke a state run on a given target name An arbitrary name used to track the state execution tgt The target specification for the state run. tgt_type | expr_form The target type to resolve, defaults to glob ret Optionally set a single or a list of returners to use highstate Defaults to None, if set to True the target systems will ignore any sls references specified in the sls option and call state.highstate on the targeted minions top Should be the name of a top file. If set state.top is called with this top file instead of state.sls. sls A group of sls files to execute. This can be defined as a single string containing a single sls file, or a list of sls files test Pass ``test=true`` through to the state function pillar Pass the ``pillar`` kwarg through to the state function saltenv The default salt environment to pull sls files from ssh Set to `True` to use the ssh client instead of the standard salt client roster In the event of using salt-ssh, a roster system can be set expect_minions An optional boolean for failing if some minions do not respond fail_minions An optional list of targeted minions where failure is an option allow_fail Pass in the number of minions to allow for failure before setting the result of the execution to False concurrent Allow multiple state runs to occur at once. WARNING: This flag is potentially dangerous. It is designed for use when multiple state runs can safely be run at the same Do not use this flag for performance optimization. Examples: Run a list of sls files via :py:func:`state.sls <salt.state.sls>` on target minions: .. code-block:: yaml webservers: salt.state: - tgt: 'web*' - sls: - apache - django - core - saltenv: prod Run a full :py:func:`state.highstate <salt.state.highstate>` on target mininons. .. code-block:: yaml databases: salt.state: - tgt: role:database - tgt_type: grain - highstate: True ''' cmd_kw = {'arg': [], 'kwarg': {}, 'ret': ret, 'timeout': timeout} state_ret = {'name': name, 'changes': {}, 'comment': '', 'result': True} try: allow_fail = int(allow_fail) except ValueError: state_ret['result'] = False state_ret[ 'comment'] = 'Passed invalid value for \'allow_fail\', must be an int' return state_ret if env is not None: msg = ( 'Passing a salt environment should be done using \'saltenv\' not ' '\'env\'. This warning will go away in Salt Boron and this ' 'will be the default and expected behavior. Please update your ' 'state files.') salt.utils.warn_until('Boron', msg) state_ret.setdefault('warnings', []).append(msg) # No need to set __env__ = env since that's done in the state machinery if expr_form and tgt_type: state_ret.setdefault('warnings', []).append( 'Please only use \'tgt_type\' or \'expr_form\' not both. ' 'Preferring \'tgt_type\' over \'expr_form\'') expr_form = None elif expr_form and not tgt_type: tgt_type = expr_form elif not tgt_type and not expr_form: tgt_type = 'glob' cmd_kw['expr_form'] = tgt_type cmd_kw['ssh'] = ssh cmd_kw['expect_minions'] = expect_minions if highstate: fun = 'state.highstate' elif top: fun = 'state.top' cmd_kw['arg'].append(top) elif sls: fun = 'state.sls' if isinstance(sls, list): sls = ','.join(sls) cmd_kw['arg'].append(sls) else: state_ret[ 'comment'] = 'No highstate or sls specified, no execution made' state_ret['result'] = False return state_ret if test or __opts__.get('test'): cmd_kw['kwarg']['test'] = True if pillar: cmd_kw['kwarg']['pillar'] = pillar cmd_kw['kwarg']['saltenv'] = __env__ if isinstance(concurrent, bool): cmd_kw['kwarg']['concurrent'] = concurrent else: state_ret['comment'] = ( 'Must pass in boolean for value of \'concurrent\'') state_ret['result'] = False return state_ret if batch is not None: cmd_kw['batch'] = str(batch) cmd_ret = __salt__['saltutil.cmd'](tgt, fun, **cmd_kw) changes = {} fail = set() failures = {} no_change = set() if fail_minions is None: fail_minions = () elif isinstance(fail_minions, string_types): fail_minions = [minion.strip() for minion in fail_minions.split(',')] elif not isinstance(fail_minions, list): state_ret.setdefault('warnings', []).append( '\'fail_minions\' needs to be a list or a comma separated ' 'string. Ignored.') fail_minions = () for minion, mdata in six.iteritems(cmd_ret): if mdata.get('out', '') != 'highstate': log.warning("Output from salt state not highstate") m_ret = False if 'return' in mdata and 'ret' not in mdata: mdata['ret'] = mdata.pop('return') m_state = True if mdata.get('failed', False): m_state = False else: try: m_ret = mdata['ret'] except KeyError: m_state = False if m_state: m_state = salt.utils.check_state_result(m_ret) if not m_state: if minion not in fail_minions: fail.add(minion) failures[minion] = m_ret or 'Minion did not respond' continue for state_item in six.itervalues(m_ret): if state_item['changes']: changes[minion] = m_ret break else: no_change.add(minion) if changes: state_ret['changes'] = {'out': 'highstate', 'ret': changes} if len(fail) > allow_fail: state_ret['result'] = False state_ret['comment'] = 'Run failed on minions: {0}'.format( ', '.join(fail)) else: state_ret['comment'] = 'States ran successfully.' if changes: state_ret['comment'] += ' Updating {0}.'.format(', '.join(changes)) if no_change: state_ret['comment'] += ' No changes made to {0}.'.format( ', '.join(no_change)) if failures: state_ret['comment'] += '\nFailures:\n' for minion, failure in six.iteritems(failures): state_ret['comment'] += '\n'.join((' ' * 4 + l) for l in salt.output.out_format( { minion: failure }, 'highstate', __opts__, ).splitlines()) state_ret['comment'] += '\n' if test or __opts__.get('test'): if state_ret['changes'] and state_ret['result'] is True: # Test mode with changes is the only case where result should ever be none state_ret['result'] = None return state_ret
def state(name, tgt, ssh=False, tgt_type="glob", ret="", ret_config=None, ret_kwargs=None, highstate=None, sls=None, top=None, saltenv=None, test=None, pillar=None, pillarenv=None, expect_minions=True, fail_minions=None, allow_fail=0, concurrent=False, timeout=None, batch=None, queue=False, subset=None, orchestration_jid=None, failhard=None, **kwargs): """ Invoke a state run on a given target name An arbitrary name used to track the state execution tgt The target specification for the state run. .. versionadded: 2016.11.0 Masterless support: When running on a masterless minion, the ``tgt`` is ignored and will always be the local minion. tgt_type The target type to resolve, defaults to ``glob`` ret Optionally set a single or a list of returners to use ret_config Use an alternative returner configuration ret_kwargs Override individual returner configuration items highstate Defaults to None, if set to True the target systems will ignore any sls references specified in the sls option and call state.highstate on the targeted minions top Should be the name of a top file. If set state.top is called with this top file instead of state.sls. sls A group of sls files to execute. This can be defined as a single string containing a single sls file, or a list of sls files test Pass ``test=true`` or ``test=false`` through to the state function. This can be used to overide a test mode set in the minion's config file. If left as the default of None and the 'test' mode is supplied on the command line, that value is passed instead. pillar Pass the ``pillar`` kwarg through to the state function pillarenv The pillar environment to grab pillars from .. versionadded:: 2017.7.0 saltenv The default salt environment to pull sls files from ssh Set to `True` to use the ssh client instead of the standard salt client roster In the event of using salt-ssh, a roster system can be set expect_minions An optional boolean for failing if some minions do not respond fail_minions An optional list of targeted minions where failure is an option allow_fail Pass in the number of minions to allow for failure before setting the result of the execution to False concurrent Allow multiple state runs to occur at once. WARNING: This flag is potentially dangerous. It is designed for use when multiple state runs can safely be run at the same Do not use this flag for performance optimization. queue Pass ``queue=true`` through to the state function batch Execute the command :ref:`in batches <targeting-batch>`. E.g.: ``10%``. .. versionadded:: 2016.3.0 subset Number of minions from the targeted set to randomly use .. versionadded:: 2017.7.0 failhard pass failhard down to the executing state .. versionadded:: 2019.2.2 Examples: Run a list of sls files via :py:func:`state.sls <salt.state.sls>` on target minions: .. code-block:: yaml webservers: salt.state: - tgt: 'web*' - sls: - apache - django - core - saltenv: prod Run a full :py:func:`state.highstate <salt.state.highstate>` on target mininons. .. code-block:: yaml databases: salt.state: - tgt: role:database - tgt_type: grain - highstate: True """ cmd_kw = {"arg": [], "kwarg": {}, "ret": ret, "timeout": timeout} if ret_config: cmd_kw["ret_config"] = ret_config if ret_kwargs: cmd_kw["ret_kwargs"] = ret_kwargs state_ret = {"name": name, "changes": {}, "comment": "", "result": True} try: allow_fail = int(allow_fail) except ValueError: state_ret["result"] = False state_ret[ "comment"] = "Passed invalid value for 'allow_fail', must be an int" return state_ret cmd_kw["tgt_type"] = tgt_type cmd_kw["ssh"] = ssh if "roster" in kwargs: cmd_kw["roster"] = kwargs["roster"] cmd_kw["expect_minions"] = expect_minions if highstate: fun = "state.highstate" elif top: fun = "state.top" cmd_kw["arg"].append(top) elif sls: fun = "state.sls" if isinstance(sls, list): sls = ",".join(sls) cmd_kw["arg"].append(sls) else: state_ret[ "comment"] = "No highstate or sls specified, no execution made" state_ret["result"] = False return state_ret if test is not None or __opts__.get("test"): cmd_kw["kwarg"]["test"] = test if test is not None else __opts__.get( "test") if pillar: cmd_kw["kwarg"]["pillar"] = pillar if pillarenv is not None: cmd_kw["kwarg"]["pillarenv"] = pillarenv if saltenv is not None: cmd_kw["kwarg"]["saltenv"] = saltenv cmd_kw["kwarg"]["queue"] = queue if isinstance(concurrent, bool): cmd_kw["kwarg"]["concurrent"] = concurrent else: state_ret["comment"] = "Must pass in boolean for value of 'concurrent'" state_ret["result"] = False return state_ret if batch is not None: cmd_kw["batch"] = six.text_type(batch) if subset is not None: cmd_kw["subset"] = subset if failhard is True or __opts__.get("failhard"): cmd_kw["failhard"] = True masterless = __opts__["__role"] == "minion" and __opts__[ "file_client"] == "local" if not masterless: _fire_args({"type": "state", "tgt": tgt, "name": name, "args": cmd_kw}) cmd_ret = __salt__["saltutil.cmd"](tgt, fun, **cmd_kw) else: if top: cmd_kw["topfn"] = "".join(cmd_kw.pop("arg")) elif sls: cmd_kw["mods"] = "".join(cmd_kw.pop("arg")) cmd_kw.update(cmd_kw.pop("kwarg")) tmp_ret = __salt__[fun](**cmd_kw) cmd_ret = { __opts__["id"]: { "ret": tmp_ret, "out": tmp_ret.get("out", "highstate") if isinstance(tmp_ret, dict) else "highstate", } } try: state_ret["__jid__"] = cmd_ret[next(iter(cmd_ret))]["jid"] except (StopIteration, KeyError): pass changes = {} fail = set() no_change = set() if fail_minions is None: fail_minions = () elif isinstance(fail_minions, six.string_types): fail_minions = [minion.strip() for minion in fail_minions.split(",")] elif not isinstance(fail_minions, list): state_ret.setdefault("warnings", []).append( "'fail_minions' needs to be a list or a comma separated " "string. Ignored.") fail_minions = () if not cmd_ret and expect_minions: state_ret["result"] = False state_ret["comment"] = "No minions returned" return state_ret for minion, mdata in six.iteritems(cmd_ret): if mdata.get("out", "") != "highstate": log.warning("Output from salt state not highstate") m_ret = False if "return" in mdata and "ret" not in mdata: mdata["ret"] = mdata.pop("return") m_state = True if mdata.get("failed", False): m_state = False else: try: m_ret = mdata["ret"] except KeyError: m_state = False if m_state: m_state = __utils__["state.check_result"](m_ret, recurse=True) if not m_state: if minion not in fail_minions: fail.add(minion) changes[minion] = m_ret continue try: for state_item in six.itervalues(m_ret): if isinstance(state_item, dict): if "changes" in state_item and state_item["changes"]: changes[minion] = m_ret break else: no_change.add(minion) except AttributeError: log.error("m_ret did not have changes %s %s", type(m_ret), m_ret) no_change.add(minion) if changes: state_ret["changes"] = {"out": "highstate", "ret": changes} if len(fail) > allow_fail: state_ret["result"] = False state_ret["comment"] = "Run failed on minions: {0}".format( ", ".join(fail)) else: state_ret["comment"] = "States ran successfully." if changes: state_ret["comment"] += " Updating {0}.".format(", ".join(changes)) if no_change: state_ret["comment"] += " No changes made to {0}.".format( ", ".join(no_change)) if test or __opts__.get("test"): if state_ret["changes"] and state_ret["result"] is True: # Test mode with changes is the only case where result should ever be none state_ret["result"] = None return state_ret
def get_docker(interfaces=None, cidrs=None, with_container_id=False): ''' Get all mine data for 'docker.get_containers' and run an aggregation routine. The "interfaces" parameter allows for specifying which network interfaces to select ip addresses from. The "cidrs" parameter allows for specifying a list of cidrs which the ip address must match. with_container_id Boolean, to expose container_id in the list of results .. versionadded:: 2015.8.2 CLI Example: .. code-block:: bash salt '*' mine.get_docker salt '*' mine.get_docker interfaces='eth0' salt '*' mine.get_docker interfaces='["eth0", "eth1"]' salt '*' mine.get_docker cidrs='107.170.147.0/24' salt '*' mine.get_docker cidrs='["107.170.147.0/24", "172.17.42.0/24"]' salt '*' mine.get_docker interfaces='["eth0", "eth1"]' cidrs='["107.170.147.0/24", "172.17.42.0/24"]' ''' # Enforce that interface and cidr are lists if interfaces: interface_ = [] interface_.extend(interfaces if isinstance(interfaces, list) else [interfaces]) interfaces = interface_ if cidrs: cidr_ = [] cidr_.extend(cidrs if isinstance(cidrs, list) else [cidrs]) cidrs = cidr_ # Get docker info cmd = 'docker.ps' docker_hosts = get('*', cmd) proxy_lists = {} # Process docker info for containers in six.itervalues(docker_hosts): host = containers.pop('host') host_ips = [] # Prepare host_ips list if not interfaces: for info in six.itervalues(host['interfaces']): if 'inet' in info: for ip_ in info['inet']: host_ips.append(ip_['address']) else: for interface in interfaces: if interface in host['interfaces']: if 'inet' in host['interfaces'][interface]: for item in host['interfaces'][interface]['inet']: host_ips.append(item['address']) host_ips = list(set(host_ips)) # Filter out ips from host_ips with cidrs if cidrs: good_ips = [] for cidr in cidrs: for ip_ in host_ips: if salt.utils.network.in_subnet(cidr, [ip_]): good_ips.append(ip_) host_ips = list(set(good_ips)) # Process each container for container in six.itervalues(containers): container_id = container['Info']['Id'] if container['Image'] not in proxy_lists: proxy_lists[container['Image']] = {} for dock_port in container['Ports']: # IP exists only if port is exposed ip_address = dock_port.get('IP') # If port is 0.0.0.0, then we must get the docker host IP if ip_address == '0.0.0.0': for ip_ in host_ips: containers = proxy_lists[container['Image']].setdefault('ipv4', {}).setdefault(dock_port['PrivatePort'], []) container_network_footprint = '{0}:{1}'.format(ip_, dock_port['PublicPort']) if with_container_id: value = (container_network_footprint, container_id) else: value = container_network_footprint if value not in containers: containers.append(value) elif ip_address: containers = proxy_lists[container['Image']].setdefault('ipv4', {}).setdefault(dock_port['PrivatePort'], []) container_network_footprint = '{0}:{1}'.format(dock_port['IP'], dock_port['PublicPort']) if with_container_id: value = (container_network_footprint, container_id) else: value = container_network_footprint if value not in containers: containers.append(value) return proxy_lists
def _post_processing(kwargs, skip_translate, invalid): """ Additional container-specific post-translation processing """ # Don't allow conflicting options to be set if kwargs.get("port_bindings") is not None and kwargs.get("publish_all_ports"): kwargs.pop("port_bindings") invalid["port_bindings"] = "Cannot be used when publish_all_ports=True" if kwargs.get("hostname") is not None and kwargs.get("network_mode") == "host": kwargs.pop("hostname") invalid["hostname"] = "Cannot be used when network_mode=True" # Make sure volumes and ports are defined to match the binds and port_bindings if kwargs.get("binds") is not None and ( skip_translate is True or all(x not in skip_translate for x in ("binds", "volume", "volumes")) ): # Make sure that all volumes defined in "binds" are included in the # "volumes" param. auto_volumes = [] if isinstance(kwargs["binds"], dict): for val in six.itervalues(kwargs["binds"]): try: if "bind" in val: auto_volumes.append(val["bind"]) except TypeError: continue else: if isinstance(kwargs["binds"], list): auto_volume_defs = kwargs["binds"] else: try: auto_volume_defs = helpers.split(kwargs["binds"]) except AttributeError: auto_volume_defs = [] for val in auto_volume_defs: try: auto_volumes.append(helpers.split(val, ":")[1]) except IndexError: continue if auto_volumes: actual_volumes = kwargs.setdefault("volumes", []) actual_volumes.extend([x for x in auto_volumes if x not in actual_volumes]) # Sort list to make unit tests more reliable actual_volumes.sort() if kwargs.get("port_bindings") is not None and all( x not in skip_translate for x in ("port_bindings", "expose", "ports") ): # Make sure that all ports defined in "port_bindings" are included in # the "ports" param. ports_to_bind = list(kwargs["port_bindings"]) if ports_to_bind: ports_to_open = set(kwargs.get("ports", [])) ports_to_open.update([helpers.get_port_def(x) for x in ports_to_bind]) kwargs["ports"] = list(ports_to_open) if "ports" in kwargs and all(x not in skip_translate for x in ("expose", "ports")): # TCP ports should only be passed as the port number. Normalize the # input so a port definition of 80/tcp becomes just 80 instead of # (80, 'tcp'). for index, _ in enumerate(kwargs["ports"]): try: if kwargs["ports"][index][1] == "tcp": kwargs["ports"][index] = ports_to_open[index][0] except TypeError: continue
def setUpClass(cls): # pylint: disable=arguments-differ super(SSHDMixin, cls).setUpClass() try: log.info('%s: prep_server()', cls.__name__) cls.sshd_bin = salt.utils.path.which('sshd') cls.sshd_config_dir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP) cls.sshd_config = os.path.join(cls.sshd_config_dir, 'sshd_config') cls.sshd_port = get_unused_localhost_port() cls.url = 'ssh://{username}@127.0.0.1:{port}/~/repo.git'.format( username=cls.username, port=cls.sshd_port) cls.url_extra_repo = 'ssh://{username}@127.0.0.1:{port}/~/extra_repo.git'.format( username=cls.username, port=cls.sshd_port) home = '/root/.ssh' cls.ext_opts = { 'url': cls.url, 'url_extra_repo': cls.url_extra_repo, 'privkey_nopass': os.path.join(home, cls.id_rsa_nopass), 'pubkey_nopass': os.path.join(home, cls.id_rsa_nopass + '.pub'), 'privkey_withpass': os.path.join(home, cls.id_rsa_withpass), 'pubkey_withpass': os.path.join(home, cls.id_rsa_withpass + '.pub'), 'passphrase': cls.passphrase } if cls.prep_states_ran is False: ret = cls.cls_run_function('state.apply', mods='git_pillar.ssh', pillar={ 'git_pillar': { 'git_ssh': cls.git_ssh, 'id_rsa_nopass': cls.id_rsa_nopass, 'id_rsa_withpass': cls.id_rsa_withpass, 'sshd_bin': cls.sshd_bin, 'sshd_port': cls.sshd_port, 'sshd_config_dir': cls.sshd_config_dir, 'master_user': cls.user, 'user': cls.username } }) assert next(six.itervalues(ret))['result'] is True cls.prep_states_ran = True log.info('%s: States applied', cls.__name__) if cls.sshd_proc is not None: if not psutil.pid_exists(cls.sshd_proc.pid): log.info( '%s: sshd started but appears to be dead now. Will try to restart it.', cls.__name__) cls.sshd_proc = None if cls.sshd_proc is None: cls.sshd_proc = start_daemon(cls.sshd_bin, cls.sshd_config_dir, cls.sshd_port, SshdDaemon) log.info('%s: sshd started', cls.__name__) except AssertionError: cls.tearDownClass() six.reraise(*sys.exc_info()) if cls.known_hosts_setup is False: known_hosts_ret = cls.cls_run_function( 'ssh.set_known_host', user=cls.user, hostname='127.0.0.1', port=cls.sshd_port, enc='ssh-rsa', fingerprint='fd:6f:7f:5d:06:6b:f2:06:0d:26:93:9e:5a:b5:19:46', hash_known_hosts=False, fingerprint_hash_type='md5', ) if 'error' in known_hosts_ret: cls.tearDownClass() raise AssertionError( 'Failed to add key to {0} user\'s known_hosts ' 'file: {1}'.format(cls.master_opts['user'], known_hosts_ret['error'])) cls.known_hosts_setup = True