def top(num_processes=5, interval=3): ''' Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10 ''' result = [] start_usage = {} for pid in psutil.pids(): try: process = psutil.Process(pid) user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue start_usage[process] = user + system time.sleep(interval) usage = set() for process, start in six.iteritems(start_usage): try: user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue now = user + system diff = now - start usage.add((diff, process)) for idx, (diff, process) in enumerate(reversed(sorted(usage))): if num_processes and idx >= num_processes: break if len(_get_proc_cmdline(process)) == 0: cmdline = _get_proc_name(process) else: cmdline = _get_proc_cmdline(process) info = {'cmd': cmdline, 'user': _get_proc_username(process), 'status': _get_proc_status(process), 'pid': _get_proc_pid(process), 'create_time': _get_proc_create_time(process), 'cpu': {}, 'mem': {}, } for key, value in six.iteritems(process.cpu_times()._asdict()): info['cpu'][key] = value for key, value in six.iteritems(process.memory_info()._asdict()): info['mem'][key] = value result.append(info) return result
def _extract_index(index_data, global_index=False): ''' Instantiates and returns an AllIndex object given a valid index configuration ''' parsed_data = {} keys = [] for key, value in six.iteritems(index_data): for item in value: for field, data in six.iteritems(item): if field == 'hash_key': parsed_data['hash_key'] = data elif field == 'hash_key_data_type': parsed_data['hash_key_data_type'] = data elif field == 'range_key': parsed_data['range_key'] = data elif field == 'range_key_data_type': parsed_data['range_key_data_type'] = data elif field == 'name': parsed_data['name'] = data elif field == 'read_capacity_units': parsed_data['read_capacity_units'] = data elif field == 'write_capacity_units': parsed_data['write_capacity_units'] = data if parsed_data['hash_key']: keys.append( HashKey( parsed_data['hash_key'], data_type=parsed_data['hash_key_data_type'] ) ) if parsed_data['range_key']: keys.append( RangeKey( parsed_data['range_key'], data_type=parsed_data['range_key_data_type'] ) ) if ( global_index and parsed_data['read_capacity_units'] and parsed_data['write_capacity_units']): parsed_data['throughput'] = { 'read': parsed_data['read_capacity_units'], 'write': parsed_data['write_capacity_units'] } if parsed_data['name'] and len(keys) > 0: if global_index: return GlobalAllIndex( parsed_data['name'], parts=keys, throughput=parsed_data['throughput'] ) else: return AllIndex( parsed_data['name'], parts=keys )
def ext_pillar(minion_id, pillar, # pylint: disable=W0613 bucket, key=None, keyid=None, verify_ssl=True, location=None, multiple_env=False, environment='base', prefix='', service_url=None, kms_keyid=None, s3_cache_expire=30, # cache for 30 seconds s3_sync_on_update=True): # sync cache on update rather than jit ''' Execute a command and read the output as YAML ''' s3_creds = S3Credentials(key, keyid, bucket, service_url, verify_ssl, kms_keyid, location) # normpath is needed to remove appended '/' if root is empty string. pillar_dir = os.path.normpath(os.path.join(_get_cache_dir(), environment, bucket)) if prefix: pillar_dir = os.path.normpath(os.path.join(pillar_dir, prefix)) if __opts__['pillar_roots'].get(environment, []) == [pillar_dir]: return {} metadata = _init(s3_creds, bucket, multiple_env, environment, prefix, s3_cache_expire) if s3_sync_on_update: # sync the buckets to the local cache log.info('Syncing local pillar cache from S3...') for saltenv, env_meta in six.iteritems(metadata): for bucket, files in six.iteritems(_find_files(env_meta)): for file_path in files: cached_file_path = _get_cached_file_name(bucket, saltenv, file_path) log.info('{0} - {1} : {2}'.format(bucket, saltenv, file_path)) # load the file from S3 if not in the cache or too old _get_file_from_s3(s3_creds, metadata, saltenv, bucket, file_path, cached_file_path) log.info('Sync local pillar cache from S3 completed.') opts = deepcopy(__opts__) opts['pillar_roots'][environment] = [os.path.join(pillar_dir, environment)] if multiple_env else [pillar_dir] # Avoid recursively re-adding this same pillar opts['ext_pillar'] = [x for x in opts['ext_pillar'] if 's3' not in x] pil = Pillar(opts, __grains__, minion_id, environment) compiled_pillar = pil.compile_pillar() return compiled_pillar
def ext_pillar(minion_id, repo, pillar_dirs): ''' Checkout the ext_pillar sources and compile the resulting pillar SLS ''' if isinstance(repo, six.string_types): return _legacy_git_pillar(minion_id, repo, pillar_dirs) else: opts = copy.deepcopy(__opts__) opts['pillar_roots'] = {} pillar = salt.utils.gitfs.GitPillar(opts) pillar.init_remotes(repo, PER_REMOTE_OVERRIDES) pillar.checkout() ret = {} merge_strategy = __opts__.get( 'pillar_source_merging_strategy', 'smart' ) for pillar_dir, env in six.iteritems(pillar.pillar_dirs): log.debug( 'git_pillar is processing pillar SLS from {0} for pillar ' 'env \'{1}\''.format(pillar_dir, env) ) opts['pillar_roots'] = { env: [d for (d, e) in six.iteritems(pillar.pillar_dirs) if env == e] } local_pillar = Pillar(opts, __grains__, minion_id, env) ret = salt.utils.dictupdate.merge( ret, local_pillar.compile_pillar(ext=False), strategy=merge_strategy ) return ret
def extract_state_confs(data, is_extend=False): for state_id, state_dict in six.iteritems(data): if state_id == 'extend' and not is_extend: extract_state_confs(state_dict, True) continue if STATE_NAME in state_dict: key = STATE_NAME elif STATE_FUNC in state_dict: key = STATE_FUNC else: continue to_dict = STATE_CONF_EXT if is_extend else STATE_CONF conf = to_dict.setdefault(state_id, Bunch()) for sdk in state_dict[key]: if not isinstance(sdk, dict): continue key, val = next(six.iteritems(sdk)) conf[key] = val if not is_extend and state_id in STATE_CONF_EXT: extend = STATE_CONF_EXT[state_id] for requisite in 'require', 'watch', 'listen': if requisite in extend: extend[requisite] += to_dict[state_id].get(requisite, []) to_dict[state_id].update(STATE_CONF_EXT[state_id])
def get_enabled(): ''' Return a list of all enabled services CLI Example: .. code-block:: bash salt '*' service.get_enabled ''' ret = [] units = _get_all_unit_files() services = _get_all_units() for name, state in six.iteritems(units): if state == 'enabled': ret.append(name) for name, state in six.iteritems(services): if name in units: continue # performance; if the legacy initscript doesnt exists, # dont contiue up with systemd query if not _service_is_sysv(name): continue if _sysv_is_enabled(name): ret.append(name) return sorted(ret)
def top_matches(self, top): ''' Search through the top high data for matches and return the states that this minion needs to execute. Returns: {'saltenv': ['state1', 'state2', ...]} ''' matches = {} for saltenv, body in six.iteritems(top): if self.opts['pillarenv']: if saltenv != self.opts['pillarenv']: continue for match, data in six.iteritems(body): if self.matcher.confirm_top( match, data, self.opts.get('nodegroups', {}), ): if saltenv not in matches: matches[saltenv] = env_matches = [] else: env_matches = matches[saltenv] for item in data: if isinstance(item, six.string_types) and item not in env_matches: env_matches.append(item) return matches
def get_cloud_init_mime(cloud_init): ''' Get a mime multipart encoded string from a cloud-init dict. Currently supports scripts and cloud-config. CLI Example: .. code-block:: bash salt myminion boto.get_cloud_init_mime <cloud init> ''' if isinstance(cloud_init, six.string_types): cloud_init = json.loads(cloud_init) _cloud_init = email.mime.multipart.MIMEMultipart() if 'boothooks' in cloud_init: for script_name, script in six.iteritems(cloud_init['boothooks']): _script = email.mime.text.MIMEText(script, 'cloud-boothook') _cloud_init.attach(_script) if 'scripts' in cloud_init: for script_name, script in six.iteritems(cloud_init['scripts']): _script = email.mime.text.MIMEText(script, 'x-shellscript') _cloud_init.attach(_script) if 'cloud-config' in cloud_init: cloud_config = cloud_init['cloud-config'] _cloud_config = email.mime.text.MIMEText(_safe_dump(cloud_config), 'cloud-config') _cloud_init.attach(_cloud_config) return _cloud_init.as_string()
def test_usage(self): ''' disk.usage ''' ret = self.run_function('disk.usage') self.assertTrue(isinstance(ret, dict)) if not isinstance(ret, dict): return if salt.utils.is_darwin(): for key, val in six.iteritems(ret): self.assertTrue('filesystem' in val) self.assertTrue('512-blocks' in val) self.assertTrue('used' in val) self.assertTrue('available' in val) self.assertTrue('capacity' in val) self.assertTrue('iused' in val) self.assertTrue('ifree' in val) self.assertTrue('%iused' in val) else: for key, val in six.iteritems(ret): self.assertTrue('filesystem' in val) self.assertTrue('1K-blocks' in val) self.assertTrue('used' in val) self.assertTrue('available' in val) self.assertTrue('capacity' in val)
def change(connect_spec, dn, before, after): """Modify an entry in an LDAP database. This does the same thing as :py:func:`modify`, but with a simpler interface. Instead of taking a list of directives, it takes a before and after view of an entry, determines the differences between the two, computes the directives, and executes them. Any attribute value present in ``before`` but missing in ``after`` is deleted. Any attribute value present in ``after`` but missing in ``before`` is added. Any attribute value in the database that is not mentioned in either ``before`` or ``after`` is not altered. Any attribute value that is present in both ``before`` and ``after`` is ignored, regardless of whether that attribute value exists in the database. :param connect_spec: See the documentation for the ``connect_spec`` parameter for :py:func:`connect`. :param dn: Distinguished name of the entry. :param before: The expected state of the entry before modification. This is a dict mapping each attribute name to an iterable of values. :param after: The desired state of the entry after modification. This is a dict mapping each attribute name to an iterable of values. :returns: ``True`` if successful, raises an exception otherwise. CLI example: .. code-block:: bash salt '*' ldap3.change "{ 'url': 'ldaps://ldap.example.com/', 'bind': { 'method': 'simple', 'password': '******'} }" dn='cn=admin,dc=example,dc=com' before="{'example_value': 'before_val'}" after="{'example_value': 'after_val'}" """ l = connect(connect_spec) # convert the "iterable of values" to lists in case that's what # modifyModlist() expects (also to ensure that the caller's dicts # are not modified) before = dict(((attr, list(vals)) for attr, vals in six.iteritems(before))) after = dict(((attr, list(vals)) for attr, vals in six.iteritems(after))) modlist = ldap.modlist.modifyModlist(before, after) try: l.c.modify_s(dn, modlist) except ldap.LDAPError as e: _convert_exception(e) return True
def _create_temp_structure(self, temp_directory, structure): for folder, files in six.iteritems(structure): current_directory = os.path.join(temp_directory, folder) os.makedirs(current_directory) for name, content in six.iteritems(files): path = os.path.join(temp_directory, folder, name) with salt.utils.fopen(path, "w+") as fh: fh.write(content)
def _parse_network_settings(opts, current): ''' Filters given options and outputs valid settings for the global network settings file. ''' # Normalize keys opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts)) current = dict((k.lower(), v) for (k, v) in six.iteritems(current)) # Check for supported parameters retain_settings = opts.get('retain_settings', False) result = current if retain_settings else {} valid = _CONFIG_TRUE + _CONFIG_FALSE if 'enabled' not in opts: try: opts['networking'] = current['networking'] _log_default_network('networking', current['networking']) except ValueError: _raise_error_network('networking', valid) else: opts['networking'] = opts['enabled'] if opts['networking'] in valid: if opts['networking'] in _CONFIG_TRUE: result['networking'] = 'yes' elif opts['networking'] in _CONFIG_FALSE: result['networking'] = 'no' else: _raise_error_network('networking', valid) if 'hostname' not in opts: try: opts['hostname'] = current['hostname'] _log_default_network('hostname', current['hostname']) except Exception: _raise_error_network('hostname', ['server1.example.com']) if opts['hostname']: result['hostname'] = opts['hostname'] else: _raise_error_network('hostname', ['server1.example.com']) if 'nozeroconf' in opts: if opts['nozeroconf'] in valid: if opts['nozeroconf'] in _CONFIG_TRUE: result['nozeroconf'] = 'true' elif opts['nozeroconf'] in _CONFIG_FALSE: result['nozeroconf'] = 'false' else: _raise_error_network('nozeroconf', valid) for opt in opts: if opt not in ['networking', 'hostname', 'nozeroconf']: result[opt] = opts[opt] return result
def statelist(states_dict, sid_excludes=frozenset(['include', 'exclude'])): for sid, states in six.iteritems(states_dict): if sid.startswith('__'): continue if sid in sid_excludes: continue for sname, args in six.iteritems(states): if sname.startswith('__'): continue yield sid, states, sname, args
def create_launch_configuration(name, image_id, key_name=None, security_groups=None, user_data=None, instance_type='m1.small', kernel_id=None, ramdisk_id=None, block_device_mappings=None, instance_monitoring=False, spot_price=None, instance_profile_name=None, ebs_optimized=False, associate_public_ip_address=None, volume_type=None, delete_on_termination=True, iops=None, use_block_device_types=False, region=None, key=None, keyid=None, profile=None): ''' Create a launch configuration. CLI example:: salt myminion boto_asg.create_launch_configuration mylc image_id=ami-0b9c9f62 key_name='mykey' security_groups='["mygroup"]' instance_type='c3.2xlarge' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if isinstance(security_groups, six.string_types): security_groups = json.loads(security_groups) if isinstance(block_device_mappings, six.string_types): block_device_mappings = json.loads(block_device_mappings) _bdms = [] if block_device_mappings: # Boto requires objects for the mappings and the devices. _block_device_map = blockdevicemapping.BlockDeviceMapping() for block_device_dict in block_device_mappings: for block_device, attributes in six.iteritems(block_device_dict): _block_device = blockdevicemapping.EBSBlockDeviceType() for attribute, value in six.iteritems(attributes): setattr(_block_device, attribute, value) _block_device_map[block_device] = _block_device _bdms = [_block_device_map] lc = autoscale.LaunchConfiguration( name=name, image_id=image_id, key_name=key_name, security_groups=security_groups, user_data=user_data, instance_type=instance_type, kernel_id=kernel_id, ramdisk_id=ramdisk_id, block_device_mappings=_bdms, instance_monitoring=instance_monitoring, spot_price=spot_price, instance_profile_name=instance_profile_name, ebs_optimized=ebs_optimized, associate_public_ip_address=associate_public_ip_address, volume_type=volume_type, delete_on_termination=delete_on_termination, iops=iops, use_block_device_types=use_block_device_types) try: conn.create_launch_configuration(lc) log.info('Created LC {0}'.format(name)) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to create LC {0}'.format(name) log.error(msg) return False
def safe_accept(target, tgt_type='glob', expr_form=None): ''' .. versionchanged:: Nitrogen The ``expr_form`` argument has been renamed to ``tgt_type``, earlier releases must use ``expr_form``. Accept a minion's public key after checking the fingerprint over salt-ssh CLI Example: .. code-block:: bash salt-run manage.safe_accept my_minion salt-run manage.safe_accept minion1,minion2 tgt_type=list ''' salt_key = salt.key.Key(__opts__) ssh_client = salt.client.ssh.client.SSHClient() ret = ssh_client.cmd(target, 'key.finger', tgt_type=tgt_type) failures = {} for minion, finger in six.iteritems(ret): if not FINGERPRINT_REGEX.match(finger): failures[minion] = finger else: fingerprints = salt_key.finger(minion) accepted = fingerprints.get('minions', {}) pending = fingerprints.get('minions_pre', {}) if minion in accepted: del ret[minion] continue elif minion not in pending: failures[minion] = ("Minion key {0} not found by salt-key" .format(minion)) elif pending[minion] != finger: failures[minion] = ("Minion key {0} does not match the key in " "salt-key: {1}" .format(finger, pending[minion])) else: subprocess.call(["salt-key", "-qya", minion]) if minion in failures: del ret[minion] if failures: print('safe_accept failed on the following minions:') for minion, message in six.iteritems(failures): print(minion) print('-' * len(minion)) print(message) print('') __jid_event__.fire_event({'message': 'Accepted {0:d} keys'.format(len(ret))}, 'progress') return ret, failures
def list_nodes(conn=None, call=None): hide = False names = __opts__.get('names', []) profiles = __opts__.get('profiles', {}) profile = __opts__.get('profile', __opts__.get('internal_lxc_profile', [])) destroy_opt = __opts__.get('destroy', False) action = __opts__.get('action', '') for opt in ['full_query', 'select_query', 'query']: if __opts__.get(opt, False): call = 'full' if destroy_opt: call = 'full' if action and not call: call = 'action' if profile and names and not destroy_opt: hide = True if not get_configured_provider(): return path = None if profile and profile in profiles: path = profiles[profile].get('path', None) lxclist = _salt('lxc.list', extra=True, path=path) nodes = {} for state, lxcs in six.iteritems(lxclist): for lxcc, linfos in six.iteritems(lxcs): info = { 'id': lxcc, 'name': lxcc, # required for cloud cache 'image': None, 'size': linfos['size'], 'state': state.lower(), 'public_ips': linfos['public_ips'], 'private_ips': linfos['private_ips'], } # in creation mode, we need to go inside the create method # so we hide the running vm from being seen as already installed # do not also mask half configured nodes which are explicitly asked # to be acted on, on the command line if (call in ['full'] or not hide) and ((lxcc in names and call in ['action']) or call in ['full']): nodes[lxcc] = { 'id': lxcc, 'name': lxcc, # required for cloud cache 'image': None, 'size': linfos['size'], 'state': state.lower(), 'public_ips': linfos['public_ips'], 'private_ips': linfos['private_ips'], } else: nodes[lxcc] = {'id': lxcc, 'state': state.lower()} return nodes
def _decode_secrets(secrets): items = secrets.get("items", []) if items: for i, secret in enumerate(items): log.trace(i, secret) for k, v in six.iteritems(secret.get("data", {})): items[i]['data'][k] = base64.b64decode(v) secrets["items"] = items return secrets else: for k, v in six.iteritems(secrets.get("data", {})): secrets['data'][k] = base64.b64decode(v) return secrets
def diff(): """Returns the differences between the expected device config and the actual config.""" _client = _get_client() ntp_state_result = _client.cmd('*', 'state.sls', ['router.ntp', 'test=True'], expr_form='glob', timeout=60) _ntp_diff = { 'add': {}, 'remove': {} } for device, device_states_run in six.iteritems(ntp_state_result): for state_run, state_result in six.iteritems(device_states_run): if state_result.get('result') is False: continue state_changes = state_result.get('changes', {}) peers_change = state_changes.get('peers', {}) servers_change = state_changes.get('servers', {}) add_peers = peers_change.get('added', []) remove_peers = peers_change.get('removed', []) add_servers = servers_change.get('added', []) remove_servers = servers_change.get('removed', []) if add_peers: if 'peers' not in _ntp_diff['add'].keys(): _ntp_diff['add']['peers'] = {} _ntp_diff['add']['peers'][device] = add_peers if remove_peers: if 'peers' not in _ntp_diff['remove'].keys(): _ntp_diff['remove']['peers'] = {} _ntp_diff['remove']['peers'][device] = remove_peers if add_servers: if 'servers' not in _ntp_diff['add'].keys(): _ntp_diff['add']['servers'] = {} _ntp_diff['add']['servers'][device] = add_servers if remove_servers: if 'servers' not in _ntp_diff['remove'].keys(): _ntp_diff['remove']['servers'] = {} _ntp_diff['remove']['servers'][device] = remove_servers return _ntp_diff
def safe_accept(target, expr_form="glob"): """ Accept a minion's public key after checking the fingerprint over salt-ssh CLI Example: .. code-block:: bash salt-run manage.safe_accept my_minion salt-run manage.safe_accept minion1,minion2 expr_form=list """ salt_key = salt.key.Key(__opts__) ssh_client = salt.client.ssh.client.SSHClient() ret = ssh_client.cmd(target, "key.finger", expr_form=expr_form) failures = {} for minion, finger in six.iteritems(ret): if not FINGERPRINT_REGEX.match(finger): failures[minion] = finger else: fingerprints = salt_key.finger(minion) accepted = fingerprints.get("minions", {}) pending = fingerprints.get("minions_pre", {}) if minion in accepted: del ret[minion] continue elif minion not in pending: failures[minion] = "Minion key {0} not found by salt-key".format(minion) elif pending[minion] != finger: failures[minion] = "Minion key {0} does not match the key in " "salt-key: {1}".format( finger, pending[minion] ) else: subprocess.call(["salt-key", "-qya", minion]) if minion in failures: del ret[minion] if failures: print("safe_accept failed on the following minions:") for minion, message in six.iteritems(failures): print(minion) print("-" * len(minion)) print(message) print("") __jid_event__.fire_event({"message": "Accepted {0:d} keys".format(len(ret))}, "progress") return ret, failures
def gen_ini(self): yield "{0}[{1}]{0}".format(os.linesep, self.name) sections_dict = OrderedDict() for name, value in six.iteritems(self): if com_regx.match(name): yield "{0}{1}".format(value, os.linesep) elif isinstance(value, _Section): sections_dict.update({name: value}) else: yield "{0}{1}{2}{3}".format( name, (" {0} ".format(self.sep) if self.sep != " " else self.sep), value, os.linesep ) for name, value in six.iteritems(sections_dict): for line in value.gen_ini(): yield line
def ext_pillar(minion_id, pillar, # pylint: disable=W0613 bucket, key, keyid, verify_ssl, multiple_env=False, environment='base', service_url=None): ''' Execute a command and read the output as YAML ''' s3_creds = S3Credentials(key, keyid, bucket, service_url, verify_ssl) # normpath is needed to remove appended '/' if root is empty string. pillar_dir = os.path.normpath(os.path.join(_get_cache_dir(), environment, bucket)) if __opts__['pillar_roots'].get(environment, []) == [pillar_dir]: return {} metadata = _init(s3_creds, multiple_env, environment) if _s3_sync_on_update: # sync the buckets to the local cache log.info('Syncing local pillar cache from S3...') for saltenv, env_meta in six.iteritems(metadata): for bucket, files in six.iteritems(_find_files(env_meta)): for file_path in files: cached_file_path = _get_cached_file_name(bucket, saltenv, file_path) log.info('{0} - {1} : {2}'.format(bucket, saltenv, file_path)) # load the file from S3 if not in the cache or too old _get_file_from_s3(s3_creds, metadata, saltenv, bucket, file_path, cached_file_path) log.info('Sync local pillar cache from S3 completed.') opts = deepcopy(__opts__) opts['pillar_roots'][environment] = [pillar_dir] pil = Pillar(opts, __grains__, minion_id, environment) compiled_pillar = pil.compile_pillar() return compiled_pillar
def _filter_running(runnings): ''' Filter out the result: True + no changes data ''' ret = dict((tag, value) for tag, value in six.iteritems(runnings) if not value['result'] or value['changes']) return ret
def list_pkgs(versions_as_list=False, **kwargs): """ List the packages currently installed in a dict:: {'<package_name>': '<version>'} CLI Example: .. code-block:: bash salt '*' pkg.list_pkgs salt '*' pkg.list_pkgs versions_as_list=True """ versions_as_list = salt.utils.is_true(versions_as_list) # not yet implemented or not applicable if any([salt.utils.is_true(kwargs.get(x)) for x in ("removed", "purge_desired")]): return {} ret = {} name_map = _get_name_map() with salt.utils.winapi.Com(): for key, val in six.iteritems(_get_reg_software()): if key in name_map: key = name_map[key] __salt__["pkg_resource.add_pkg"](ret, key, val) __salt__["pkg_resource.sort_pkglist"](ret) if not versions_as_list: __salt__["pkg_resource.stringify"](ret) return ret
def _subnets(proto='inet', interfaces_=None): ''' Returns a list of subnets to which the host belongs ''' if interfaces_ is None: ifaces = interfaces() elif isinstance(interfaces_, list): ifaces = {} for key, value in six.iteritems(interfaces()): if key in interfaces_: ifaces[key] = value else: ifaces = {interfaces_: interfaces().get(interfaces_, {})} ret = set() if proto == 'inet': subnet = 'netmask' elif proto == 'inet6': subnet = 'prefixlen' else: log.error('Invalid proto {0} calling subnets()'.format(proto)) return for ip_info in six.itervalues(ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for intf in addrs: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], intf[subnet])) if not intf.is_loopback: ret.add(intf.network) return [str(net) for net in sorted(ret)]
def _find_value(ret_dict, key, path=None): """ PRIVATE METHOD Traverses a dictionary of dictionaries/lists to find key and return the value stored. TODO:// this method doesn't really work very well, and it's not really very useful in its current state. The purpose for this method is to simplify parsing the JSON output so you can just pass the key you want to find and have it return the value. ret : dict<str,obj> The dictionary to search through. Typically this will be a dict returned from solr. key : str The key (str) to find in the dictionary Return: list<dict<str,obj>>:: [{path:path, value:value}] """ if path is None: path = key else: path = "{0}:{1}".format(path, key) ret = [] for ikey, val in six.iteritems(ret_dict): if ikey == key: ret.append({path: val}) if isinstance(val, list): for item in val: if isinstance(item, dict): ret = ret + _find_value(item, key, path) if isinstance(val, dict): ret = ret + _find_value(val, key, path) return ret
def __get_properties_assignment_string(datasource_properties, ds_resource_description): assignment_strings = [] ds_attributes = ds_resource_description['attributes'] for key, val in six.iteritems(datasource_properties): assignment_strings.append(__get_single_assignment_string(key, val, ds_attributes)) return ','.join(assignment_strings)
def version(*names, **kwargs): """ Returns a version if the package is installed, else returns an empty string CLI Example: .. code-block:: bash salt '*' pkg.version <package name> """ win_names = [] ret = {} if len(names) == 1: val = __salt__["pkg_resource.version"](*names, **kwargs) if len(val): return val return "" if len(names) > 1: reverse_dict = {} nums = __salt__["pkg_resource.version"](*names, **kwargs) if len(nums): for num, val in six.iteritems(nums): if len(val) > 0: try: ret[reverse_dict[num]] = val except KeyError: ret[num] = val return ret return dict([(x, "") for x in names]) return ret
def _parse_rules(sg, rules): _rules = [] for rule in rules: log.debug('examining rule {0} for group {1}'.format(rule, sg.id)) attrs = ['ip_protocol', 'from_port', 'to_port', 'grants'] _rule = odict.OrderedDict() for attr in attrs: val = getattr(rule, attr) if not val: continue if attr == 'grants': _grants = [] for grant in val: log.debug('examining grant {0} for'.format(grant)) g_attrs = {'name': 'source_group_name', 'owner_id': 'source_group_owner_id', 'group_id': 'source_group_group_id', 'cidr_ip': 'cidr_ip'} _grant = odict.OrderedDict() for g_attr, g_attr_map in six.iteritems(g_attrs): g_val = getattr(grant, g_attr) if not g_val: continue _grant[g_attr_map] = g_val _grants.append(_grant) _rule['grants'] = _grants elif attr == 'from_port': _rule[attr] = int(val) elif attr == 'to_port': _rule[attr] = int(val) else: _rule[attr] = val _rules.append(_rule) return _rules
def item(*args, **kwargs): ''' Return one or more grains CLI Example: .. code-block:: bash salt '*' grains.item os salt '*' grains.item os osrelease oscodename Sanitized CLI Example: .. code-block:: bash salt '*' grains.item host sanitize=True ''' ret = {} for arg in args: try: ret[arg] = __grains__[arg] except KeyError: pass if salt.utils.is_true(kwargs.get('sanitize')): for arg, func in six.iteritems(_SANITIZERS): if arg in ret: ret[arg] = func(ret[arg]) return ret
def _ip_addrs(interface=None, include_loopback=False, interface_data=None, proto='inet'): ''' Return the full list of IP adresses matching the criteria proto = inet|inet6 ''' ret = set() ifaces = interface_data \ if isinstance(interface_data, dict) \ else interfaces() if interface is None: target_ifaces = ifaces else: target_ifaces = dict([(k, v) for k, v in six.iteritems(ifaces) if k == interface]) if not target_ifaces: log.error('Interface {0} not found.'.format(interface)) for ip_info in six.itervalues(target_ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for addr in addrs: addr = ipaddress.ip_address(addr.get('address')) if not addr.is_loopback or include_loopback: ret.add(addr) return [str(addr) for addr in sorted(ret)]
def sync_minion_modules_(self, modules_kind, targets, timeout=None): if not timeout: timeout = 120 # Let's sync all connected minions print( ' {LIGHT_BLUE}*{ENDC} Syncing minion\'s {1} ' '(saltutil.sync_{1})'.format( ', '.join(targets), modules_kind, **self.colors ) ) syncing = set(targets) jid_info = self.client.run_job( list(targets), 'saltutil.sync_{0}'.format(modules_kind), tgt_type='list', timeout=999999999999999, ) if self.wait_for_jid(targets, jid_info['jid'], timeout) is False: print( ' {LIGHT_RED}*{ENDC} WARNING: Minions failed to sync {0}. ' 'Tests requiring these {0} WILL fail'.format( modules_kind, **self.colors) ) raise SystemExit() while syncing: rdata = self.client.get_full_returns(jid_info['jid'], syncing, 1) if rdata: for name, output in six.iteritems(rdata): if not output['ret']: # Already synced!? syncing.remove(name) continue if isinstance(output['ret'], six.string_types): # An errors has occurred print( ' {LIGHT_RED}*{ENDC} {0} Failed to sync {2}: ' '{1}'.format( name, output['ret'], modules_kind, **self.colors) ) return False print( ' {LIGHT_GREEN}*{ENDC} Synced {0} {2}: ' '{1}'.format( name, ', '.join(output['ret']), modules_kind, **self.colors ) ) # Synced! try: syncing.remove(name) except KeyError: print( ' {LIGHT_RED}*{ENDC} {0} already synced??? ' '{1}'.format(name, output, **self.colors) ) return True
def installed(name, pkgs=None, dir=None, user=None, force_reinstall=False, registry=None, env=None): ''' Verify that the given package is installed and is at the correct version (if specified). .. code-block:: yaml coffee-script: npm.installed: - user: someuser [email protected]: npm.installed: [] name The package to install .. versionchanged:: 2014.7.2 This parameter is no longer lowercased by salt so that case-sensitive NPM package names will work. pkgs A list of packages to install with a single npm invocation; specifying this argument will ignore the ``name`` argument .. versionadded:: 2014.7.0 dir The target directory in which to install the package, or None for global installation user The user to run NPM with .. versionadded:: 0.17.0 registry The NPM registry from which to install the package .. versionadded:: 2014.7.0 env A list of environment variables to be set prior to execution. The format is the same as the :py:func:`cmd.run <salt.states.cmd.run>`. state function. .. versionadded:: 2014.7.0 force_reinstall Install the package even if it is already installed ''' ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} if pkgs is not None: pkg_list = pkgs else: pkg_list = [name] try: installed_pkgs = __salt__['npm.list'](dir=dir, runas=user, env=env) except (CommandNotFoundError, CommandExecutionError) as err: ret['result'] = False ret['comment'] = 'Error looking up {0!r}: {1}'.format(name, err) return ret else: installed_pkgs = dict((p, info) for p, info in six.iteritems(installed_pkgs)) pkgs_satisfied = [] pkgs_to_install = [] def _pkg_is_installed(pkg, installed_pkgs): ''' Helper function to determine if a package is installed This performs more complex comparison than just checking keys, such as examining source repos to see if the package was installed by a different name from the same repo :pkg str: The package to compare :installed_pkgs: A dictionary produced by npm list --json ''' if (pkg_name in installed_pkgs and 'version' in installed_pkgs[pkg_name]): return True # Check to see if we are trying to install from a URI elif '://' in pkg_name: # TODO Better way? for pkg_details in installed_pkgs.values(): try: pkg_from = pkg_details.get('from', '').split('://')[1] if pkg_name.split('://')[1] == pkg_from: return True except IndexError: pass return False for pkg in pkg_list: pkg_name, _, pkg_ver = pkg.partition('@') pkg_name = pkg_name.strip() if force_reinstall is True: pkgs_to_install.append(pkg) continue if not _pkg_is_installed(pkg, installed_pkgs): pkgs_to_install.append(pkg) continue installed_name_ver = '{0}@{1}'.format(pkg_name, installed_pkgs[pkg_name]['version']) # If given an explicit version check the installed version matches. if pkg_ver: if installed_pkgs[pkg_name].get('version') != pkg_ver: pkgs_to_install.append(pkg) else: pkgs_satisfied.append(installed_name_ver) continue else: pkgs_satisfied.append(installed_name_ver) continue if __opts__['test']: ret['result'] = None comment_msg = [] if pkgs_to_install: comment_msg.append('NPM package(s) {0!r} are set to be installed' .format(', '.join(pkgs_to_install))) ret['changes'] = {'old': [], 'new': pkgs_to_install} if pkgs_satisfied: comment_msg.append('Package(s) {0!r} satisfied by {1}' .format(', '.join(pkg_list), ', '.join(pkgs_satisfied))) ret['comment'] = '. '.join(comment_msg) return ret if not pkgs_to_install: ret['result'] = True ret['comment'] = ('Package(s) {0!r} satisfied by {1}' .format(', '.join(pkg_list), ', '.join(pkgs_satisfied))) return ret try: cmd_args = { 'dir': dir, 'runas': user, 'registry': registry, 'env': env, } if pkgs is not None: cmd_args['pkgs'] = pkgs else: cmd_args['pkg'] = pkg_name call = __salt__['npm.install'](**cmd_args) except (CommandNotFoundError, CommandExecutionError) as err: ret['result'] = False ret['comment'] = 'Error installing {0!r}: {1}'.format( ', '.join(pkg_list), err) return ret if call and (isinstance(call, list) or isinstance(call, dict)): ret['result'] = True ret['changes'] = {'old': [], 'new': pkgs_to_install} ret['comment'] = 'Package(s) {0!r} successfully installed'.format( ', '.join(pkgs_to_install)) else: ret['result'] = False ret['comment'] = 'Could not install package(s) {0!r}'.format( ', '.join(pkg_list)) return ret
def ordered_dict_presenter(dumper, data): return dumper.represent_dict(six.iteritems(data))
def _format_error(self, minion_error): for minion, error_doc in six.iteritems(minion_error): error = 'Minion [{0}] encountered exception \'{1}\''.format( minion, error_doc['message']) return error
def run(self): ''' Execute the batch run ''' args = [ [], self.opts['fun'], self.opts['arg'], self.opts['timeout'], 'list', ] bnum = self.get_bnum() to_run = copy.deepcopy(self.minions) active = [] ret = {} iters = [] # wait the specified time before decide a job is actually done bwait = self.opts.get('batch_wait', 0) wait = [] if self.options: show_jid = self.options.show_jid else: show_jid = False # the minion tracker keeps track of responses and iterators # - it removes finished iterators from iters[] # - if a previously detected minion does not respond, its # added with an empty answer to ret{} once the timeout is reached # - unresponsive minions are removed from active[] to make # sure that the main while loop finishes even with unresp minions minion_tracker = {} # Iterate while we still have things to execute while len(ret) < len(self.minions): next_ = [] if bwait and wait: self.__update_wait(wait) if len(to_run) <= bnum - len(wait) and not active: # last bit of them, add them all to next iterator while to_run: next_.append(to_run.pop()) else: for i in range(bnum - len(active) - len(wait)): if to_run: minion_id = to_run.pop() if isinstance(minion_id, dict): next_.append(minion_id.keys()[0]) else: next_.append(minion_id) active += next_ args[0] = next_ if next_: if not self.quiet: print_cli('\nExecuting run on {0}\n'.format(next_)) # create a new iterator for this batch of minions new_iter = self.local.cmd_iter_no_block( *args, raw=self.opts.get('raw', False), ret=self.opts.get('return', ''), show_jid=show_jid, **self.eauth) # add it to our iterators and to the minion_tracker iters.append(new_iter) minion_tracker[new_iter] = {} # every iterator added is 'active' and has its set of minions minion_tracker[new_iter]['minions'] = next_ minion_tracker[new_iter]['active'] = True else: time.sleep(0.02) parts = {} # see if we found more minions for ping_ret in self.ping_gen: if ping_ret is None: break m = next(ping_ret.iterkeys()) if m not in self.minions: self.minions.append(m) to_run.append(m) for queue in iters: try: # Gather returns until we get to the bottom ncnt = 0 while True: part = next(queue) if part is None: time.sleep(0.01) ncnt += 1 if ncnt > 5: break continue if self.opts.get('raw'): parts.update({part['id']: part}) if part['id'] in minion_tracker[queue]['minions']: minion_tracker[queue]['minions'].remove( part['id']) else: print_cli( 'minion {0} was already deleted from tracker, probably a duplicate key' .format(part['id'])) else: parts.update(part) for id in part.keys(): if id in minion_tracker[queue]['minions']: minion_tracker[queue]['minions'].remove(id) else: print_cli( 'minion {0} was already deleted from tracker, probably a duplicate key' .format(id)) except StopIteration: # if a iterator is done: # - set it to inactive # - add minions that have not responded to parts{} # check if the tracker contains the iterator if queue in minion_tracker: minion_tracker[queue]['active'] = False # add all minions that belong to this iterator and # that have not responded to parts{} with an empty response for minion in minion_tracker[queue]['minions']: if minion not in parts: parts[minion] = {} parts[minion]['ret'] = {} for minion, data in six.iteritems(parts): if minion in active: active.remove(minion) if bwait: wait.append(datetime.now() + timedelta(seconds=bwait)) if self.opts.get('raw'): yield data else: ret[minion] = data['ret'] yield {minion: data['ret']} if not self.quiet: ret[minion] = data['ret'] data[minion] = data.pop('ret') if 'out' in data: out = data.pop('out') else: out = None salt.output.display_output(data, out, self.opts) # remove inactive iterators from the iters list for queue in minion_tracker: # only remove inactive queues if not minion_tracker[queue]['active'] and queue in iters: iters.remove(queue) # also remove the iterator's minions from the active list for minion in minion_tracker[queue]['minions']: if minion in active: active.remove(minion) if bwait: wait.append(datetime.now() + timedelta(seconds=bwait))
def test_basic(self): testmod = salt.loader.raw_mod(self.opts, 'test', None) for k, v in six.iteritems(testmod): self.assertEqual(k.split('.')[0], 'test')
def modify(name, beacon_data, **kwargs): ''' Modify an existing beacon :param name: Name of the beacon to configure :param beacon_data: Dictionary or list containing updated configuration for beacon. :return: Boolean and status message on success or failure of modify. CLI Example: .. code-block:: bash salt '*' beacons.modify ps "[{'salt-master': 'stopped'}, {'apache2': 'stopped'}]" ''' ret = {'comment': '', 'result': True} current_beacons = list_(return_yaml=False, **kwargs) if name not in current_beacons: ret['comment'] = 'Beacon {0} is not configured.'.format(name) return ret if 'test' in kwargs and kwargs['test']: ret['result'] = True ret['comment'] = 'Beacon: {0} would be added.'.format(name) else: try: # Attempt to load the beacon module so we have access to the validate function eventer = salt.utils.event.get_event('minion', opts=__opts__, listen=True) res = __salt__['event.fire']({ 'name': name, 'beacon_data': beacon_data, 'func': 'validate_beacon' }, 'manage_beacons') if res: event_ret = eventer.get_event( tag='/salt/minion/minion_beacon_validation_complete', wait=kwargs.get('timeout', default_event_wait)) valid = event_ret['valid'] vcomment = event_ret['vcomment'] if not valid: ret['result'] = False ret['comment'] = ('Beacon {0} configuration invalid, ' 'not adding.\n{1}'.format(name, vcomment)) return ret except KeyError: # Effectively a no-op, since we can't really return without an event system ret['comment'] = 'Event module not available. Beacon modify failed.' if not valid: ret['result'] = False ret['comment'] = ('Beacon {0} configuration invalid, ' 'not modifying.\n{1}'.format(name, vcomment)) return ret _current = current_beacons[name] _new = beacon_data if _new == _current: ret['comment'] = 'Job {0} in correct state'.format(name) return ret _current_lines = [] for _item in _current: _current_lines.extend([ '{0}:{1}\n'.format(key, value) for (key, value) in six.iteritems(_item) ]) _new_lines = [] for _item in _new: _new_lines.extend([ '{0}:{1}\n'.format(key, value) for (key, value) in six.iteritems(_item) ]) _diff = difflib.unified_diff(_current_lines, _new_lines) ret['changes'] = {} ret['changes']['diff'] = ''.join(_diff) try: eventer = salt.utils.event.get_event('minion', opts=__opts__, listen=True) res = __salt__['event.fire']({ 'name': name, 'beacon_data': beacon_data, 'func': 'modify' }, 'manage_beacons') if res: event_ret = eventer.get_event( tag='/salt/minion/minion_beacon_modify_complete', wait=kwargs.get('timeout', default_event_wait)) if event_ret and event_ret['complete']: beacons = event_ret['beacons'] if name in beacons and beacons[name] == beacon_data: ret['result'] = True ret['comment'] = 'Modified beacon: {0}.'.format(name) else: ret['result'] = False ret['comment'] = event_ret['comment'] return ret except KeyError: # Effectively a no-op, since we can't really return without an event system ret['comment'] = 'Event module not available. Beacon add failed.' return ret
def state(name, tgt, ssh=False, tgt_type=None, expr_form=None, ret='', highstate=None, sls=None, top=None, env=None, test=False, pillar=None, expect_minions=False, fail_minions=None, allow_fail=0, concurrent=False, timeout=None, batch=None): ''' Invoke a state run on a given target name An arbitrary name used to track the state execution tgt The target specification for the state run. tgt_type | expr_form The target type to resolve, defaults to glob ret Optionally set a single or a list of returners to use highstate Defaults to None, if set to True the target systems will ignore any sls references specified in the sls option and call state.highstate on the targeted minions top Should be the name of a top file. If set state.top is called with this top file instead of state.sls. sls A group of sls files to execute. This can be defined as a single string containing a single sls file, or a list of sls files test Pass ``test=true`` through to the state function pillar Pass the ``pillar`` kwarg through to the state function saltenv The default salt environment to pull sls files from ssh Set to `True` to use the ssh client instead of the standard salt client roster In the event of using salt-ssh, a roster system can be set expect_minions An optional boolean for failing if some minions do not respond fail_minions An optional list of targeted minions where failure is an option allow_fail Pass in the number of minions to allow for failure before setting the result of the execution to False concurrent Allow multiple state runs to occur at once. WARNING: This flag is potentially dangerous. It is designed for use when multiple state runs can safely be run at the same Do not use this flag for performance optimization. Examples: Run a list of sls files via :py:func:`state.sls <salt.state.sls>` on target minions: .. code-block:: yaml webservers: salt.state: - tgt: 'web*' - sls: - apache - django - core - saltenv: prod Run a full :py:func:`state.highstate <salt.state.highstate>` on target mininons. .. code-block:: yaml databases: salt.state: - tgt: role:database - tgt_type: grain - highstate: True ''' cmd_kw = {'arg': [], 'kwarg': {}, 'ret': ret, 'timeout': timeout} ret = {'name': name, 'changes': {}, 'comment': '', 'result': True} try: allow_fail = int(allow_fail) except ValueError: ret['result'] = False ret['comment'] = 'Passed invalid value for \'allow_fail\', must be an int' return ret if env is not None: msg = ( 'Passing a salt environment should be done using \'saltenv\' not ' '\'env\'. This warning will go away in Salt Boron and this ' 'will be the default and expected behavior. Please update your ' 'state files.') salt.utils.warn_until('Boron', msg) ret.setdefault('warnings', []).append(msg) # No need to set __env__ = env since that's done in the state machinery if expr_form and tgt_type: ret.setdefault('warnings', []).append( 'Please only use \'tgt_type\' or \'expr_form\' not both. ' 'Preferring \'tgt_type\' over \'expr_form\'') expr_form = None elif expr_form and not tgt_type: tgt_type = expr_form elif not tgt_type and not expr_form: tgt_type = 'glob' cmd_kw['expr_form'] = tgt_type cmd_kw['ssh'] = ssh cmd_kw['expect_minions'] = expect_minions if highstate: fun = 'state.highstate' elif top: fun = 'state.top' cmd_kw['arg'].append(top) elif sls: fun = 'state.sls' if isinstance(sls, list): sls = ','.join(sls) cmd_kw['arg'].append(sls) else: ret['comment'] = 'No highstate or sls specified, no execution made' ret['result'] = False return ret if test or __opts__.get('test'): cmd_kw['kwarg']['test'] = True if pillar: cmd_kw['kwarg']['pillar'] = pillar cmd_kw['kwarg']['saltenv'] = __env__ if isinstance(concurrent, bool): cmd_kw['kwarg']['concurrent'] = concurrent else: ret['comment'] = ('Must pass in boolean for value of \'concurrent\'') ret['result'] = False return ret if batch is not None: cmd_kw['batch'] = str(batch) cmd_ret = __salt__['saltutil.cmd'](tgt, fun, **cmd_kw) changes = {} fail = set() failures = {} no_change = set() if fail_minions is None: fail_minions = () elif isinstance(fail_minions, string_types): fail_minions = [minion.strip() for minion in fail_minions.split(',')] elif not isinstance(fail_minions, list): ret.setdefault('warnings', []).append( '\'fail_minions\' needs to be a list or a comma separated ' 'string. Ignored.') fail_minions = () for minion, mdata in six.iteritems(cmd_ret): if mdata.get('out', '') != 'highstate': log.warning("Output from salt state not highstate") m_ret = False if 'return' in mdata and 'ret' not in mdata: mdata['ret'] = mdata.pop('return') m_state = True if mdata.get('failed', False): m_state = False else: try: m_ret = mdata['ret'] except KeyError: m_state = False if not m_state: m_state = salt.utils.check_state_result(m_ret) if not m_state: if minion not in fail_minions: fail.add(minion) failures[minion] = m_ret and m_ret or 'Minion did not respond' continue for state_item in six.itervalues(m_ret): if state_item['changes']: changes[minion] = m_ret break else: no_change.add(minion) if changes: ret['changes'] = {'out': 'highstate', 'ret': changes} if len(fail) > allow_fail: ret['result'] = False ret['comment'] = 'Run failed on minions: {0}'.format(', '.join(fail)) else: ret['comment'] = 'States ran successfully.' if changes: ret['comment'] += ' Updating {0}.'.format(', '.join(changes)) if no_change: ret['comment'] += ' No changes made to {0}.'.format( ', '.join(no_change)) if failures: ret['comment'] += '\nFailures:\n' for minion, failure in six.iteritems(failures): ret['comment'] += '\n'.join((' ' * 4 + l) for l in salt.output.out_format( { minion: failure }, 'highstate', __opts__, ).splitlines()) ret['comment'] += '\n' if test or __opts__.get('test'): if ret['changes'] and ret['result'] is True: # Test mode with changes is the only case where result should ever be none ret['result'] = None return ret
def function(name, tgt, ssh=False, tgt_type=None, expr_form=None, ret='', expect_minions=False, fail_minions=None, fail_function=None, arg=None, kwarg=None, timeout=None, batch=None): ''' Execute a single module function on a remote minion via salt or salt-ssh name The name of the function to run, aka cmd.run or pkg.install tgt The target specification, aka '*' for all minions tgt_type | expr_form The target type, defaults to glob arg The list of arguments to pass into the function kwarg The list of keyword arguments to pass into the function ret Optionally set a single or a list of returners to use expect_minions An optional boolean for failing if some minions do not respond fail_minions An optional list of targeted minions where failure is an option fail_function An optional string that points to a salt module that returns True or False based on the returned data dict for individual minions ssh Set to `True` to use the ssh client instead of the standard salt client ''' ret = {'name': name, 'changes': {}, 'comment': '', 'result': True} if kwarg is None: kwarg = {} if isinstance(arg, str): ret['warnings'] = [ 'Please specify \'arg\' as a list, not a string. ' 'Modifying in place, but please update SLS file ' 'to remove this warning.' ] arg = arg.split() cmd_kw = {'arg': arg or [], 'kwarg': kwarg, 'ret': ret, 'timeout': timeout} if expr_form and tgt_type: ret['warnings'] = [ 'Please only use \'tgt_type\' or \'expr_form\' not both. ' 'Preferring \'tgt_type\' over \'expr_form\'' ] expr_form = None elif expr_form and not tgt_type: tgt_type = expr_form elif not tgt_type and not expr_form: tgt_type = 'glob' if batch is not None: cmd_kw['batch'] = str(batch) cmd_kw['expr_form'] = tgt_type cmd_kw['ssh'] = ssh cmd_kw['expect_minions'] = expect_minions cmd_kw['_cmd_meta'] = True fun = name if __opts__['test'] is True: ret['comment'] = ( 'Function {0} will be executed on target {1} as test={2}').format( fun, tgt, str(False)) ret['result'] = None return ret try: cmd_ret = __salt__['saltutil.cmd'](tgt, fun, **cmd_kw) except Exception as exc: ret['result'] = False ret['comment'] = str(exc) return ret changes = {} fail = set() failures = {} if fail_minions is None: fail_minions = () elif isinstance(fail_minions, string_types): fail_minions = [minion.strip() for minion in fail_minions.split(',')] elif not isinstance(fail_minions, list): ret.setdefault('warnings', []).append( '\'fail_minions\' needs to be a list or a comma separated ' 'string. Ignored.') fail_minions = () for minion, mdata in six.iteritems(cmd_ret): m_ret = False if mdata.get('retcode'): ret['result'] = False fail.add(minion) if mdata.get('failed', False): m_func = False else: if 'return' in mdata and 'ret' not in mdata: mdata['ret'] = mdata.pop('return') m_ret = mdata['ret'] m_func = (not fail_function and True) or __salt__[fail_function](m_ret) if not m_func: if minion not in fail_minions: fail.add(minion) failures[minion] = m_ret and m_ret or 'Minion did not respond' continue changes[minion] = m_ret if not cmd_ret: ret['result'] = False ret['command'] = 'No minions responded' else: if changes: ret['changes'] = {'out': 'highstate', 'ret': changes} if fail: ret['result'] = False ret['comment'] = 'Running function {0} failed on minions: {1}'.format( name, ', '.join(fail)) else: ret['comment'] = 'Function ran successfully.' if changes: ret['comment'] += ' Function {0} ran on {1}.'.format( name, ', '.join(changes)) if failures: ret['comment'] += '\nFailures:\n' for minion, failure in six.iteritems(failures): ret['comment'] += '\n'.join((' ' * 4 + l) for l in salt.output.out_format( { minion: failure }, 'highstate', __opts__, ).splitlines()) ret['comment'] += '\n' return ret
def _low(self, fun, low, print_event=True, full_return=False): ''' Execute a function from low data Low data includes: required: - fun: the name of the function to run optional: - arg: a list of args to pass to fun - kwarg: kwargs for fun - __user__: user who is running the command - __jid__: jid to run under - __tag__: tag to run under ''' # fire the mminion loading (if not already done) here # this is not to clutter the output with the module loading # if we have a high debug level. self.mminion # pylint: disable=W0104 jid = low.get('__jid__', salt.utils.jid.gen_jid()) tag = low.get('__tag__', salt.utils.event.tagify(jid, prefix=self.tag_prefix)) data = { 'fun': '{0}.{1}'.format(self.client, fun), 'jid': jid, 'user': low.get('__user__', 'UNKNOWN'), } event = salt.utils.event.get_event('master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False) if print_event: print_func = self.print_async_event \ if hasattr(self, 'print_async_event') \ else None else: # Suppress printing of return event (this keeps us from printing # runner/wheel output during orchestration). print_func = None namespaced_event = salt.utils.event.NamespacedEvent( event, tag, print_func=print_func) # TODO: document these, and test that they exist # TODO: Other things to inject?? func_globals = { '__jid__': jid, '__user__': data['user'], '__tag__': tag, # weak ref to avoid the Exception in interpreter # teardown of event '__jid_event__': weakref.proxy(namespaced_event), } try: salt.utils.lazy.verify_fun(self.functions, fun) # Inject some useful globals to *all* the function's global # namespace only once per module-- not per func completed_funcs = [] for mod_name in six.iterkeys(self.functions): if '.' not in mod_name: continue mod, _ = mod_name.split('.', 1) if mod in completed_funcs: continue completed_funcs.append(mod) for global_key, value in six.iteritems(func_globals): self.functions[mod_name].__globals__[global_key] = value # There are some descrepencies of what a "low" structure is in the # publisher world it is a dict including stuff such as jid, fun, # arg (a list of args, with kwargs packed in). Historically this # particular one has had no "arg" and just has had all the kwargs # packed into the top level object. The plan is to move away from # that since the caller knows what is an arg vs a kwarg, but while # we make the transition we will load "kwargs" using format_call if # there are no kwargs in the low object passed in f_call = None if 'arg' not in low: f_call = salt.utils.format_call( self.functions[fun], low, expected_extra_kws=CLIENT_INTERNAL_KEYWORDS) args = f_call.get('args', ()) else: args = low['arg'] if 'kwarg' not in low: if f_call is None: f_call = salt.utils.format_call( self.functions[fun], low, expected_extra_kws=CLIENT_INTERNAL_KEYWORDS) kwargs = f_call.get('kwargs', {}) # throw a warning for the badly formed low data if we found # kwargs using the old mechanism if kwargs: salt.utils.warn_until( 'Carbon', 'kwargs must be passed inside the low under "kwargs"') else: kwargs = low['kwarg'] # Update the event data with loaded args and kwargs data['fun_args'] = args + ([kwargs] if kwargs else []) func_globals['__jid_event__'].fire_event(data, 'new') # Initialize a context for executing the method. with tornado.stack_context.StackContext( self.functions.context_dict.clone): data['return'] = self.functions[fun](*args, **kwargs) data['success'] = True except (Exception, SystemExit) as ex: if isinstance(ex, salt.exceptions.NotImplemented): data['return'] = str(ex) else: data['return'] = 'Exception occurred in {0} {1}: {2}'.format( self.client, fun, traceback.format_exc(), ) data['success'] = False namespaced_event.fire_event(data, 'ret') try: salt.utils.job.store_job( self.opts, { 'id': self.opts['id'], 'tgt': self.opts['id'], 'jid': data['jid'], 'return': data, }, event=None, mminion=self.mminion, ) except salt.exceptions.SaltCacheError: log.error('Could not store job cache info. ' 'Job details for this run may be unavailable.') # if we fired an event, make sure to delete the event object. # This will ensure that we call destroy, which will do the 0MQ linger log.info('Runner completed: {0}'.format(data['jid'])) del event del namespaced_event return data if full_return else data['return']
def render_jinja_tmpl(tmplstr, context, tmplpath=None): opts = context['opts'] saltenv = context['saltenv'] loader = None newline = False if tmplstr and not isinstance(tmplstr, six.text_type): # http://jinja.pocoo.org/docs/api/#unicode tmplstr = tmplstr.decode(SLS_ENCODING) if tmplstr.endswith('\n'): newline = True if not saltenv: if tmplpath: # i.e., the template is from a file outside the state tree # # XXX: FileSystemLoader is not being properly instantiated here is # it? At least it ain't according to: # # http://jinja.pocoo.org/docs/api/#jinja2.FileSystemLoader loader = jinja2.FileSystemLoader(context, os.path.dirname(tmplpath)) else: loader = JinjaSaltCacheLoader(opts, saltenv, pillar_rend=context.get( '_pillar_rend', False)) env_args = {'extensions': [], 'loader': loader} if hasattr(jinja2.ext, 'with_'): env_args['extensions'].append('jinja2.ext.with_') if hasattr(jinja2.ext, 'do'): env_args['extensions'].append('jinja2.ext.do') if hasattr(jinja2.ext, 'loopcontrols'): env_args['extensions'].append('jinja2.ext.loopcontrols') env_args['extensions'].append(JinjaSerializerExtension) # Pass through trim_blocks and lstrip_blocks Jinja parameters # trim_blocks removes newlines around Jinja blocks # lstrip_blocks strips tabs and spaces from the beginning of # line to the start of a block. if opts.get('jinja_trim_blocks', False): log.debug('Jinja2 trim_blocks is enabled') env_args['trim_blocks'] = True if opts.get('jinja_lstrip_blocks', False): log.debug('Jinja2 lstrip_blocks is enabled') env_args['lstrip_blocks'] = True if opts.get('allow_undefined', False): jinja_env = jinja2.Environment(**env_args) else: jinja_env = jinja2.Environment(undefined=jinja2.StrictUndefined, **env_args) jinja_env.filters['strftime'] = salt.utils.date_format jinja_env.filters['sequence'] = ensure_sequence_filter jinja_env.filters['yaml_dquote'] = salt.utils.yaml_dquote jinja_env.filters['yaml_squote'] = salt.utils.yaml_squote jinja_env.filters['yaml_encode'] = salt.utils.yaml_encode jinja_env.globals['odict'] = OrderedDict jinja_env.globals['show_full_context'] = show_full_context unicode_context = {} for key, value in six.iteritems(context): if not isinstance(value, string_types): unicode_context[key] = value continue # Let's try UTF-8 and fail if this still fails, that's why this is not # wrapped in a try/except if isinstance(value, six.text_type): unicode_context[key] = value else: unicode_context[key] = six.text_type(value, 'utf-8') try: template = jinja_env.from_string(tmplstr) template.globals.update(unicode_context) output = template.render(**unicode_context) except jinja2.exceptions.TemplateSyntaxError as exc: trace = traceback.extract_tb(sys.exc_info()[2]) line, out = _get_jinja_error(trace, context=unicode_context) if not line: tmplstr = '' raise SaltRenderError('Jinja syntax error: {0}{1}'.format(exc, out), line, tmplstr) except jinja2.exceptions.UndefinedError as exc: trace = traceback.extract_tb(sys.exc_info()[2]) out = _get_jinja_error(trace, context=unicode_context)[1] tmplstr = '' # Don't include the line number, since it is misreported # https://github.com/mitsuhiko/jinja2/issues/276 raise SaltRenderError('Jinja variable {0}{1}'.format(exc, out), buf=tmplstr) except (SaltInvocationError, CommandExecutionError) as exc: trace = traceback.extract_tb(sys.exc_info()[2]) line, out = _get_jinja_error(trace, context=unicode_context) if not line: tmplstr = '' raise SaltRenderError( 'Problem running salt function in Jinja template: {0}{1}'.format( exc, out), line, tmplstr) except Exception as exc: tracestr = traceback.format_exc() trace = traceback.extract_tb(sys.exc_info()[2]) line, out = _get_jinja_error(trace, context=unicode_context) if not line: tmplstr = '' else: tmplstr += '\n{0}'.format(tracestr) raise SaltRenderError('Jinja error: {0}{1}'.format(exc, out), line, tmplstr, trace=tracestr) # Workaround a bug in Jinja that removes the final newline # (https://github.com/mitsuhiko/jinja2/issues/75) if newline: output += '\n' return output
def get_invalid_docs(): ''' Outputs the functions which do not have valid CLI example, or are missing a docstring. ''' allow_failure = ( 'cmd.win_runas', 'cp.recv', 'cp.recv_chunked', 'glance.warn_until', 'ipset.long_range', 'libcloud_compute.get_driver', 'libcloud_dns.get_driver', 'libcloud_loadbalancer.get_driver', 'libcloud_storage.get_driver', 'log.critical', 'log.debug', 'log.error', 'log.exception', 'log.info', 'log.warning', 'lowpkg.bin_pkg_info', 'lxc.run_cmd', 'mantest.install', 'mantest.search', 'nspawn.restart', 'nspawn.stop', 'pkg.expand_repo_def', 'pip.iteritems', 'pip.parse_version', 'peeringdb.clean_kwargs', 'runtests_decorators.depends', 'runtests_decorators.depends_will_fallback', 'runtests_decorators.missing_depends', 'runtests_decorators.missing_depends_will_fallback', 'state.apply', 'status.list2cmdline', 'swift.head', 'test.rand_str', 'travisci.parse_qs', 'vsphere.clean_kwargs', 'vsphere.disconnect', 'vsphere.get_service_instance_via_proxy', 'vsphere.gets_service_instance_via_proxy', 'vsphere.supports_proxies', 'vsphere.test_vcenter_connection', 'vsphere.wraps', ) allow_failure_glob = ( 'runtests_decorators.*', 'runtests_helpers.*', 'vsphere.*', ) nodoc = set() noexample = set() for fun, docstring in six.iteritems(__salt__['sys.doc']()): if fun in allow_failure: continue else: for pat in allow_failure_glob: if fnmatch.fnmatch(fun, pat): matched_glob = True break else: matched_glob = False if matched_glob: continue if not isinstance(docstring, six.string_types): nodoc.add(fun) elif isinstance(docstring, dict) and not re.search( r'([E|e]xample(?:s)?)+(?:.*):?', docstring): noexample.add(fun) return { 'missing_docstring': sorted(nodoc), 'missing_cli_example': sorted(noexample) }
def state(name, tgt, ssh=False, tgt_type='glob', expr_form=None, ret='', ret_config=None, ret_kwargs=None, highstate=None, sls=None, top=None, saltenv=None, test=False, pillar=None, pillarenv=None, expect_minions=True, fail_minions=None, allow_fail=0, concurrent=False, timeout=None, batch=None, queue=False, subset=None, orchestration_jid=None): ''' Invoke a state run on a given target name An arbitrary name used to track the state execution tgt The target specification for the state run. .. versionadded: 2016.11.0 Masterless support: When running on a masterless minion, the ``tgt`` is ignored and will always be the local minion. tgt_type The target type to resolve, defaults to ``glob`` expr_form .. deprecated:: Nitrogen Use tgt_type instead ret Optionally set a single or a list of returners to use ret_config Use an alternative returner configuration ret_kwargs Override individual returner configuration items highstate Defaults to None, if set to True the target systems will ignore any sls references specified in the sls option and call state.highstate on the targeted minions top Should be the name of a top file. If set state.top is called with this top file instead of state.sls. sls A group of sls files to execute. This can be defined as a single string containing a single sls file, or a list of sls files test Pass ``test=true`` through to the state function pillar Pass the ``pillar`` kwarg through to the state function pillarenv The pillar environment to grab pillars from .. versionadded:: Nitrogen saltenv The default salt environment to pull sls files from ssh Set to `True` to use the ssh client instead of the standard salt client roster In the event of using salt-ssh, a roster system can be set expect_minions An optional boolean for failing if some minions do not respond fail_minions An optional list of targeted minions where failure is an option allow_fail Pass in the number of minions to allow for failure before setting the result of the execution to False concurrent Allow multiple state runs to occur at once. WARNING: This flag is potentially dangerous. It is designed for use when multiple state runs can safely be run at the same Do not use this flag for performance optimization. queue Pass ``queue=true`` through to the state function batch Execute the command :ref:`in batches <targeting-batch>`. E.g.: ``10%``. .. versionadded:: 2016.3.0 subset Number of minions from the targeted set to randomly use .. versionadded:: Nitrogen Examples: Run a list of sls files via :py:func:`state.sls <salt.state.sls>` on target minions: .. code-block:: yaml webservers: salt.state: - tgt: 'web*' - sls: - apache - django - core - saltenv: prod Run a full :py:func:`state.highstate <salt.state.highstate>` on target mininons. .. code-block:: yaml databases: salt.state: - tgt: role:database - tgt_type: grain - highstate: True ''' cmd_kw = {'arg': [], 'kwarg': {}, 'ret': ret, 'timeout': timeout} if ret_config: cmd_kw['ret_config'] = ret_config if ret_kwargs: cmd_kw['ret_kwargs'] = ret_kwargs state_ret = {'name': name, 'changes': {}, 'comment': '', 'result': True} try: allow_fail = int(allow_fail) except ValueError: state_ret['result'] = False state_ret[ 'comment'] = 'Passed invalid value for \'allow_fail\', must be an int' return state_ret # remember to remove the expr_form argument from this function when # performing the cleanup on this deprecation. if expr_form is not None: salt.utils.warn_until( 'Fluorine', 'the target type should be passed using the \'tgt_type\' ' 'argument instead of \'expr_form\'. Support for using ' '\'expr_form\' will be removed in Salt Fluorine.') tgt_type = expr_form cmd_kw['tgt_type'] = tgt_type cmd_kw['ssh'] = ssh cmd_kw['expect_minions'] = expect_minions if highstate: fun = 'state.highstate' elif top: fun = 'state.top' cmd_kw['arg'].append(top) elif sls: fun = 'state.sls' if isinstance(sls, list): sls = ','.join(sls) cmd_kw['arg'].append(sls) else: state_ret[ 'comment'] = 'No highstate or sls specified, no execution made' state_ret['result'] = False return state_ret if test or __opts__.get('test'): cmd_kw['kwarg']['test'] = True if pillar: cmd_kw['kwarg']['pillar'] = pillar # If pillarenv is directly defined, use it if pillarenv: cmd_kw['kwarg']['pillarenv'] = pillarenv # Use pillarenv if it's passed from __opts__ (via state.orchestrate for example) elif __opts__.get('pillarenv'): cmd_kw['kwarg']['pillarenv'] = __opts__['pillarenv'] cmd_kw['kwarg']['saltenv'] = saltenv cmd_kw['kwarg']['queue'] = queue if isinstance(concurrent, bool): cmd_kw['kwarg']['concurrent'] = concurrent else: state_ret['comment'] = ( 'Must pass in boolean for value of \'concurrent\'') state_ret['result'] = False return state_ret if batch is not None: cmd_kw['batch'] = str(batch) if subset is not None: cmd_kw['subset'] = subset masterless = __opts__['__role'] == 'minion' and \ __opts__['file_client'] == 'local' if not masterless: _fire_args({'type': 'state', 'tgt': tgt, 'name': name, 'args': cmd_kw}) cmd_ret = __salt__['saltutil.cmd'](tgt, fun, **cmd_kw) else: if top: cmd_kw['topfn'] = ''.join(cmd_kw.pop('arg')) elif sls: cmd_kw['mods'] = cmd_kw.pop('arg') cmd_kw.update(cmd_kw.pop('kwarg')) tmp_ret = __salt__[fun](**cmd_kw) cmd_ret = { __opts__['id']: { 'ret': tmp_ret, 'out': tmp_ret.get('out', 'highstate') if isinstance(tmp_ret, dict) else 'highstate' } } try: state_ret['__jid__'] = cmd_ret[next(iter(cmd_ret))]['jid'] except (StopIteration, KeyError): pass changes = {} fail = set() failures = {} no_change = set() if fail_minions is None: fail_minions = () elif isinstance(fail_minions, string_types): fail_minions = [minion.strip() for minion in fail_minions.split(',')] elif not isinstance(fail_minions, list): state_ret.setdefault('warnings', []).append( '\'fail_minions\' needs to be a list or a comma separated ' 'string. Ignored.') fail_minions = () if not cmd_ret and expect_minions: state_ret['result'] = False state_ret['comment'] = 'No minions returned' return state_ret for minion, mdata in six.iteritems(cmd_ret): if mdata.get('out', '') != 'highstate': log.warning('Output from salt state not highstate') m_ret = False if 'return' in mdata and 'ret' not in mdata: mdata['ret'] = mdata.pop('return') m_state = True if mdata.get('failed', False): m_state = False else: try: m_ret = mdata['ret'] except KeyError: m_state = False if m_state: m_state = salt.utils.check_state_result(m_ret, recurse=True) if not m_state: if minion not in fail_minions: fail.add(minion) failures[minion] = m_ret or 'Minion did not respond' continue try: for state_item in six.itervalues(m_ret): if isinstance(state_item, dict): if 'changes' in state_item and state_item['changes']: changes[minion] = m_ret break else: no_change.add(minion) except AttributeError: log.error("m_ret did not have changes %s %s", type(m_ret), m_ret) no_change.add(minion) if changes: state_ret['changes'] = {'out': 'highstate', 'ret': changes} if len(fail) > allow_fail: state_ret['result'] = False state_ret['comment'] = 'Run failed on minions: {0}'.format( ', '.join(fail)) else: state_ret['comment'] = 'States ran successfully.' if changes: state_ret['comment'] += ' Updating {0}.'.format(', '.join(changes)) if no_change: state_ret['comment'] += ' No changes made to {0}.'.format( ', '.join(no_change)) if failures: state_ret['comment'] += '\nFailures:\n' for minion, failure in six.iteritems(failures): state_ret['comment'] += '\n'.join((' ' * 4 + l) for l in salt.output.out_format( { minion: failure }, 'highstate', __opts__, ).splitlines()) state_ret['comment'] += '\n' if test or __opts__.get('test'): if state_ret['changes'] and state_ret['result'] is True: # Test mode with changes is the only case where result should ever be none state_ret['result'] = None return state_ret
def move(name, target, **kwargs): """ Move scheduled job to another minion or minions. CLI Example: .. code-block:: bash salt '*' schedule.move jobname target """ ret = {"comment": [], "result": True} if not name: ret["comment"] = "Job name is required." ret["result"] = False if "test" in kwargs and kwargs["test"]: ret["comment"] = "Job: {0} would be moved from schedule.".format(name) else: opts_schedule = list_(show_all=True, where="opts", return_yaml=False) pillar_schedule = list_(show_all=True, where="pillar", return_yaml=False) if name in opts_schedule: schedule_data = opts_schedule[name] where = None elif name in pillar_schedule: schedule_data = pillar_schedule[name] where = "pillar" else: ret["comment"] = "Job {0} does not exist.".format(name) ret["result"] = False return ret schedule_opts = [] for key, value in six.iteritems(schedule_data): temp = "{0}={1}".format(key, value) schedule_opts.append(temp) response = __salt__["publish.publish"](target, "schedule.add", schedule_opts) # Get errors and list of affeced minions errors = [] minions = [] for minion in response: minions.append(minion) if not response[minion]: errors.append(minion) # parse response if not response: ret["comment"] = "no servers answered the published schedule.add command" return ret elif errors: ret['comment'] = 'the following minions return False' ret['minions'] = errors return ret else: delete(name, where=where) ret["result"] = True ret["comment"] = "Moved Job {0} from schedule.".format(name) ret["minions"] = minions return ret return ret
def init(): ''' Return the list of svn remotes and their configuration information ''' bp_ = os.path.join(__opts__['cachedir'], 'svnfs') new_remote = False repos = [] per_remote_defaults = {} for param in PER_REMOTE_OVERRIDES: per_remote_defaults[param] = \ six.text_type(__opts__['svnfs_{0}'.format(param)]) for remote in __opts__['svnfs_remotes']: repo_conf = copy.deepcopy(per_remote_defaults) if isinstance(remote, dict): repo_url = next(iter(remote)) per_remote_conf = dict([ (key, six.text_type(val)) for key, val in six.iteritems( salt.utils.data.repack_dictlist(remote[repo_url])) ]) if not per_remote_conf: log.error( 'Invalid per-remote configuration for remote {0}. If no ' 'per-remote parameters are being specified, there may be ' 'a trailing colon after the URL, which should be removed. ' 'Check the master configuration file.'.format(repo_url)) _failhard() per_remote_errors = False for param in (x for x in per_remote_conf if x not in PER_REMOTE_OVERRIDES): log.error( 'Invalid configuration parameter \'{0}\' for remote {1}. ' 'Valid parameters are: {2}. See the documentation for ' 'further information.'.format( param, repo_url, ', '.join(PER_REMOTE_OVERRIDES))) per_remote_errors = True if per_remote_errors: _failhard() repo_conf.update(per_remote_conf) else: repo_url = remote if not isinstance(repo_url, six.string_types): log.error( 'Invalid svnfs remote {0}. Remotes must be strings, you may ' 'need to enclose the URL in quotes'.format(repo_url)) _failhard() try: repo_conf['mountpoint'] = salt.utils.url.strip_proto( repo_conf['mountpoint']) except TypeError: # mountpoint not specified pass hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5')) repo_hash = hash_type(repo_url).hexdigest() rp_ = os.path.join(bp_, repo_hash) if not os.path.isdir(rp_): os.makedirs(rp_) if not os.listdir(rp_): # Only attempt a new checkout if the directory is empty. try: CLIENT.checkout(repo_url, rp_) repos.append(rp_) new_remote = True except pysvn._pysvn.ClientError as exc: log.error( 'Failed to initialize svnfs remote \'{0}\': {1}'.format( repo_url, exc)) _failhard() else: # Confirm that there is an svn checkout at the necessary path by # running pysvn.Client().status() try: CLIENT.status(rp_) except pysvn._pysvn.ClientError as exc: log.error( 'Cache path {0} (corresponding remote: {1}) exists but is ' 'not a valid subversion checkout. You will need to ' 'manually delete this directory on the master to continue ' 'to use this svnfs remote.'.format(rp_, repo_url)) _failhard() repo_conf.update({ 'repo': rp_, 'url': repo_url, 'hash': repo_hash, 'cachedir': rp_, 'lockfile': os.path.join(rp_, 'update.lk') }) repos.append(repo_conf) if new_remote: remote_map = os.path.join(__opts__['cachedir'], 'svnfs/remote_map.txt') try: with salt.utils.files.fopen(remote_map, 'w+') as fp_: timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f') fp_.write('# svnfs_remote map as of {0}\n'.format(timestamp)) for repo_conf in repos: fp_.write('{0} = {1}\n'.format(repo_conf['hash'], repo_conf['url'])) except OSError: pass else: log.info('Wrote new svnfs_remote map to {0}'.format(remote_map)) return repos
def lvcreate(lvname, vgname, size=None, extents=None, snapshot=None, pv=None, thinvolume=False, thinpool=False, force=False, **kwargs): ''' Create a new logical volume, with option for which physical volume to be used CLI Examples: .. code-block:: bash salt '*' lvm.lvcreate new_volume_name vg_name size=10G salt '*' lvm.lvcreate new_volume_name vg_name extents=100 pv=/dev/sdb salt '*' lvm.lvcreate new_snapshot vg_name snapshot=volume_name size=3G .. versionadded:: to_complete Support for thin pools and thin volumes CLI Examples: .. code-block:: bash salt '*' lvm.lvcreate new_thinpool_name vg_name size=20G thinpool=True salt '*' lvm.lvcreate new_thinvolume_name vg_name/thinpool_name size=10G thinvolume=True ''' if size and extents: return 'Error: Please specify only one of size or extents' if thinvolume and thinpool: return 'Error: Please set only one of thinvolume or thinpool to True' valid = ('activate', 'chunksize', 'contiguous', 'discards', 'stripes', 'stripesize', 'minor', 'persistent', 'mirrors', 'noudevsync', 'monitor', 'ignoremonitoring', 'permission', 'poolmetadatasize', 'readahead', 'regionsize', 'type', 'virtualsize', 'zero') no_parameter = ( 'noudevsync', 'ignoremonitoring', 'thin', ) extra_arguments = [] if kwargs: for k, v in six.iteritems(kwargs): if k in no_parameter: extra_arguments.append('--{0}'.format(k)) elif k in valid: extra_arguments.extend(['--{0}'.format(k), '{0}'.format(v)]) cmd = [salt.utils.path.which('lvcreate')] if thinvolume: cmd.extend(['--thin', '-n', lvname]) elif thinpool: cmd.extend(['--thinpool', lvname]) else: cmd.extend(['-n', lvname]) if snapshot: cmd.extend(['-s', '{0}/{1}'.format(vgname, snapshot)]) else: cmd.append(vgname) if size and thinvolume: cmd.extend(['-V', '{0}'.format(size)]) elif extents and thinvolume: return 'Error: Thin volume size cannot be specified as extents' elif size: cmd.extend(['-L', '{0}'.format(size)]) elif extents: cmd.extend(['-l', '{0}'.format(extents)]) else: return 'Error: Either size or extents must be specified' if pv: cmd.append(pv) if extra_arguments: cmd.extend(extra_arguments) if force: cmd.append('-yes') out = __salt__['cmd.run'](cmd, python_shell=False).splitlines() lvdev = '/dev/{0}/{1}'.format(vgname, lvname) lvdata = lvdisplay(lvdev) lvdata['Output from lvcreate'] = out[0].strip() return lvdata
def __virtual__(): ''' Set up the libcloud funcstions and check for AWS configs ''' try: import botocore # Since we have botocore, we won't load the libcloud AWS module return False except ImportError: pass if get_configured_provider() is False: return False if get_dependencies() is False: return False for provider, details in six.iteritems(__opts__['providers']): if 'provider' not in details or details['provider'] != 'aws': continue if not os.path.exists(details['private_key']): raise SaltCloudException( 'The AWS key file {0!r} used in the {1!r} provider ' 'configuration does not exist\n'.format( details['private_key'], provider)) keymode = str( oct(stat.S_IMODE(os.stat(details['private_key']).st_mode))) if keymode not in ('0400', '0600'): raise SaltCloudException( 'The AWS key file {0!r} used in the {1!r} provider ' 'configuration needs to be set to mode 0400 or 0600\n'.format( details['private_key'], provider)) global avail_images, avail_sizes, script, list_nodes global avail_locations, list_nodes_full, list_nodes_select, get_image global get_size, libcloudfuncs_destroy, show_instance # open a connection in a specific region conn = get_conn(**{'location': get_location()}) # Init the libcloud functions get_size = namespaced_function(get_size, globals(), (conn, )) get_image = namespaced_function(get_image, globals(), (conn, )) avail_locations = namespaced_function(avail_locations, globals(), (conn, )) avail_images = namespaced_function(avail_images, globals(), (conn, )) avail_sizes = namespaced_function(avail_sizes, globals(), (conn, )) script = namespaced_function(script, globals(), (conn, )) list_nodes = namespaced_function(list_nodes, globals(), (conn, )) list_nodes_full = namespaced_function(list_nodes_full, globals(), (conn, )) list_nodes_select = namespaced_function(list_nodes_select, globals(), (conn, )) libcloudfuncs_destroy = namespaced_function(libcloudfuncs_destroy, globals(), (conn, )) show_instance = namespaced_function(show_instance, globals()) log.warning('This driver has been deprecated and will be removed in the ' 'Boron release of Salt. Please use the ec2 driver instead.') return __virtualname__
def function(name, tgt, ssh=False, tgt_type='glob', expr_form=None, ret='', ret_config=None, ret_kwargs=None, expect_minions=False, fail_minions=None, fail_function=None, arg=None, kwarg=None, timeout=None, batch=None, subset=None): ''' Execute a single module function on a remote minion via salt or salt-ssh name The name of the function to run, aka cmd.run or pkg.install tgt The target specification, aka '*' for all minions tgt_type The target type, defaults to ``glob`` expr_form .. deprecated:: Nitrogen Use tgt_type instead arg The list of arguments to pass into the function kwarg The dict (not a list) of keyword arguments to pass into the function ret Optionally set a single or a list of returners to use ret_config Use an alternative returner configuration ret_kwargs Override individual returner configuration items expect_minions An optional boolean for failing if some minions do not respond fail_minions An optional list of targeted minions where failure is an option fail_function An optional string that points to a salt module that returns True or False based on the returned data dict for individual minions ssh Set to `True` to use the ssh client instead of the standard salt client batch Execute the command :ref:`in batches <targeting-batch>`. E.g.: ``10%``. subset Number of minions from the targeted set to randomly use .. versionadded:: Nitrogen ''' func_ret = {'name': name, 'changes': {}, 'comment': '', 'result': True} if kwarg is None: kwarg = {} if isinstance(arg, str): func_ret['warnings'] = [ 'Please specify \'arg\' as a list, not a string. ' 'Modifying in place, but please update SLS file ' 'to remove this warning.' ] arg = arg.split() cmd_kw = {'arg': arg or [], 'kwarg': kwarg, 'ret': ret, 'timeout': timeout} # remember to remove the expr_form argument from this function when # performing the cleanup on this deprecation. if expr_form is not None: salt.utils.warn_until( 'Fluorine', 'the target type should be passed using the \'tgt_type\' ' 'argument instead of \'expr_form\'. Support for using ' '\'expr_form\' will be removed in Salt Fluorine.') tgt_type = expr_form if batch is not None: cmd_kw['batch'] = str(batch) if subset is not None: cmd_kw['subset'] = subset cmd_kw['tgt_type'] = tgt_type cmd_kw['ssh'] = ssh cmd_kw['expect_minions'] = expect_minions cmd_kw['_cmd_meta'] = True if ret_config: cmd_kw['ret_config'] = ret_config if ret_kwargs: cmd_kw['ret_kwargs'] = ret_kwargs fun = name if __opts__['test'] is True: func_ret['comment'] = ( 'Function {0} will be executed on target {1} as test={2}').format( fun, tgt, str(False)) func_ret['result'] = None return func_ret try: _fire_args({ 'type': 'function', 'tgt': tgt, 'name': name, 'args': cmd_kw }) cmd_ret = __salt__['saltutil.cmd'](tgt, fun, **cmd_kw) except Exception as exc: func_ret['result'] = False func_ret['comment'] = str(exc) return func_ret try: func_ret['__jid__'] = cmd_ret[next(iter(cmd_ret))]['jid'] except (StopIteration, KeyError): pass changes = {} fail = set() failures = {} if fail_minions is None: fail_minions = () elif isinstance(fail_minions, string_types): fail_minions = [minion.strip() for minion in fail_minions.split(',')] elif not isinstance(fail_minions, list): func_ret.setdefault('warnings', []).append( '\'fail_minions\' needs to be a list or a comma separated ' 'string. Ignored.') fail_minions = () for minion, mdata in six.iteritems(cmd_ret): m_ret = False if mdata.get('retcode'): func_ret['result'] = False fail.add(minion) if mdata.get('failed', False): m_func = False else: if 'return' in mdata and 'ret' not in mdata: mdata['ret'] = mdata.pop('return') m_ret = mdata['ret'] m_func = (not fail_function and True) or __salt__[fail_function](m_ret) if not m_func: if minion not in fail_minions: fail.add(minion) failures[minion] = m_ret and m_ret or 'Minion did not respond' continue changes[minion] = m_ret if not cmd_ret: func_ret['result'] = False func_ret['command'] = 'No minions responded' else: if changes: func_ret['changes'] = {'out': 'highstate', 'ret': changes} if fail: func_ret['result'] = False func_ret[ 'comment'] = 'Running function {0} failed on minions: {1}'.format( name, ', '.join(fail)) else: func_ret['comment'] = 'Function ran successfully.' if changes: func_ret['comment'] += ' Function {0} ran on {1}.'.format( name, ', '.join(changes)) if failures: func_ret['comment'] += '\nFailures:\n' for minion, failure in six.iteritems(failures): func_ret['comment'] += '\n'.join( (' ' * 4 + l) for l in salt.output.out_format( { minion: failure }, 'highstate', __opts__, ).splitlines()) func_ret['comment'] += '\n' return func_ret
def kill_children(self, *args, **kwargs): ''' Kill all of the children ''' # first lets reset signal handlers to default one to prevent running this twice signal.signal(signal.SIGTERM, signal.SIG_IGN) signal.signal(signal.SIGINT, signal.SIG_IGN) # check that this is the correct process, children inherit this # handler, if we are in a child lets just run the original handler if os.getpid() != self._pid: if callable(self._sigterm_handler): return self._sigterm_handler(*args) elif self._sigterm_handler is not None: return signal.default_int_handler(signal.SIGTERM)(*args) else: return if salt.utils.platform.is_windows(): if multiprocessing.current_process().name != 'MainProcess': # Since the main process will kill subprocesses by tree, # no need to do anything in the subprocesses. # Sometimes, when both a subprocess and the main process # call 'taskkill', it will leave a 'taskkill' zombie process. # We want to avoid this. return with salt.utils.files.fopen(os.devnull, 'wb') as devnull: for pid, p_map in six.iteritems(self._process_map): # On Windows, we need to explicitly terminate sub-processes # because the processes don't have a sigterm handler. subprocess.call( ['taskkill', '/F', '/T', '/PID', six.text_type(pid)], stdout=devnull, stderr=devnull) p_map['Process'].terminate() else: for pid, p_map in six.iteritems(self._process_map.copy()): log.trace('Terminating pid %s: %s', pid, p_map['Process']) if args: # escalate the signal to the process try: os.kill(pid, args[0]) except OSError: pass try: p_map['Process'].terminate() except OSError as exc: if exc.errno not in (errno.ESRCH, errno.EACCES): raise if not p_map['Process'].is_alive(): try: del self._process_map[pid] except KeyError: # Race condition pass end_time = time.time() + self.wait_for_kill # when to die log.trace('Waiting to kill process manager children') while self._process_map and time.time() < end_time: for pid, p_map in six.iteritems(self._process_map.copy()): log.trace('Joining pid %s: %s', pid, p_map['Process']) p_map['Process'].join(0) if not p_map['Process'].is_alive(): # The process is no longer alive, remove it from the process map dictionary try: del self._process_map[pid] except KeyError: # This is a race condition if a signal was passed to all children pass # if any managed processes still remain to be handled, let's kill them kill_iterations = 2 while kill_iterations >= 0: kill_iterations -= 1 for pid, p_map in six.iteritems(self._process_map.copy()): if not p_map['Process'].is_alive(): # The process is no longer alive, remove it from the process map dictionary try: del self._process_map[pid] except KeyError: # This is a race condition if a signal was passed to all children pass continue log.trace('Killing pid %s: %s', pid, p_map['Process']) try: os.kill(pid, signal.SIGKILL) except OSError as exc: log.exception(exc) # in case the process has since decided to die, os.kill returns OSError if not p_map['Process'].is_alive(): # The process is no longer alive, remove it from the process map dictionary try: del self._process_map[pid] except KeyError: # This is a race condition if a signal was passed to all children pass if self._process_map: # Some processes disrespected the KILL signal!!!! available_retries = kwargs.get('retry', 3) if available_retries >= 0: log.info( 'Some processes failed to respect the KILL signal: %s', '; '.join( 'Process: {0} (Pid: {1})'.format(v['Process'], k) for # pylint: disable=str-format-in-logging (k, v) in self._process_map.items())) log.info('kill_children retries left: %s', available_retries) kwargs['retry'] = available_retries - 1 return self.kill_children(*args, **kwargs) else: log.warning( 'Failed to kill the following processes: %s', '; '.join( 'Process: {0} (Pid: {1})'.format(v['Process'], k) for # pylint: disable=str-format-in-logging (k, v) in self._process_map.items())) log.warning( 'Salt will either fail to terminate now or leave some ' 'zombie processes behind')
def chassis(name, chassis_name=None, password=None, datacenter=None, location=None, mode=None, idrac_launch=None, slot_names=None, blade_power_states=None): ''' Manage a Dell Chassis. chassis_name The name of the chassis. datacenter The datacenter in which the chassis is located location The location of the chassis. password Password for the chassis. Note: If this password is set for the chassis, the current implementation of this state will set this password both on the chassis and the iDrac passwords on any configured blades. If the password for the blades should be distinct, they should be set separately with the blade_idrac function. mode The management mode of the chassis. Viable options are: - 0: None - 1: Monitor - 2: Manage and Monitor idrac_launch The iDRAC launch method of the chassis. Viable options are: - 0: Disabled (launch iDRAC using IP address) - 1: Enabled (launch iDRAC using DNS name) slot_names The names of the slots, provided as a list identified by their slot numbers. blade_power_states The power states of a blade server, provided as a list and identified by their server numbers. Viable options are: - on: Ensure the blade server is powered on. - off: Ensure the blade server is powered off. - powercycle: Power cycle the blade server. Example: .. code-block:: yaml my-dell-chassis: dellchassis.chassis: - chassis_name: my-dell-chassis - location: my-location - datacenter: london - mode: 2 - idrac_launch: 1 - slot_names: - 1: my-slot-name - 2: my-other-slot-name - blade_power_states: - server-1: on - server-2: off - server-3: powercycle ''' ret = { 'name': chassis_name, 'chassis_name': chassis_name, 'result': True, 'changes': {}, 'comment': '' } chassis_cmd = 'chassis.cmd' cfg_tuning = 'cfgRacTuning' mode_cmd = 'cfgRacTuneChassisMgmtAtServer' launch_cmd = 'cfgRacTuneIdracDNSLaunchEnable' inventory = __salt__[chassis_cmd]('inventory') if idrac_launch: idrac_launch = str(idrac_launch) current_name = __salt__[chassis_cmd]('get_chassis_name') if chassis_name != current_name: ret['changes'].update( {'Name': { 'Old': current_name, 'New': chassis_name }}) current_dc = __salt__[chassis_cmd]('get_chassis_datacenter') if datacenter and datacenter != current_dc: ret['changes'].update( {'Datacenter': { 'Old': current_dc, 'New': datacenter }}) if password: ret['changes'].update({'Password': {'Old': '******', 'New': '******'}}) if location: current_location = __salt__[chassis_cmd]('get_chassis_location') if location != current_location: ret['changes'].update( {'Location': { 'Old': current_location, 'New': location }}) if mode: current_mode = __salt__[chassis_cmd]('get_general', cfg_tuning, mode_cmd) if mode != current_mode: ret['changes'].update( {'Management Mode': { 'Old': current_mode, 'New': mode }}) if idrac_launch: current_launch_method = __salt__[chassis_cmd]('get_general', cfg_tuning, launch_cmd) if idrac_launch != current_launch_method: ret['changes'].update({ 'iDrac Launch Method': { 'Old': current_launch_method, 'New': idrac_launch } }) if slot_names: current_slot_names = __salt__[chassis_cmd]('list_slotnames') for s in slot_names: key = s.keys()[0] new_name = s[key] if key.startswith('slot-'): key = key[5:] current_slot_name = current_slot_names.get(key).get('slotname') if current_slot_name != new_name: old = {key: current_slot_name} new = {key: new_name} if ret['changes'].get('Slot Names') is None: ret['changes'].update( {'Slot Names': { 'Old': {}, 'New': {} }}) ret['changes']['Slot Names']['Old'].update(old) ret['changes']['Slot Names']['New'].update(new) current_power_states = {} target_power_states = {} if blade_power_states: for b in blade_power_states: key = b.keys()[0] status = __salt__[chassis_cmd]('server_powerstatus', module=key) current_power_states[key] = status.get('status', -1) if b[key] == 'powerdown': if current_power_states[key] != -1 and current_power_states[ key]: target_power_states[key] = 'powerdown' if b[key] == 'powerup': if current_power_states[ key] != -1 and not current_power_states[key]: target_power_states[key] = 'powerup' if b[key] == 'powercycle': if current_power_states[ key] != -1 and not current_power_states[key]: target_power_states[key] = 'powerup' if current_power_states[key] != -1 and current_power_states[ key]: target_power_states[key] = 'powercycle' for k, v in six.iteritems(target_power_states): old = {k: current_power_states[k]} new = {k: v} if ret['changes'].get('Blade Power States') is None: ret['changes'].update( {'Blade Power States': { 'Old': {}, 'New': {} }}) ret['changes']['Blade Power States']['Old'].update(old) ret['changes']['Blade Power States']['New'].update(new) if ret['changes'] == {}: ret['comment'] = 'Dell chassis is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Dell chassis configuration will change.' return ret # Finally, set the necessary configurations on the chassis. name = __salt__[chassis_cmd]('set_chassis_name', chassis_name) if location: location = __salt__[chassis_cmd]('set_chassis_location', location) pw_result = True if password: pw_single = True if __salt__[chassis_cmd]('change_password', username='******', uid=1, password=password): for blade in inventory['server']: pw_single = __salt__[chassis_cmd]('deploy_password', username='******', password=password, module=blade) if not pw_single: pw_result = False else: pw_result = False if datacenter: datacenter_result = __salt__[chassis_cmd]('set_chassis_datacenter', datacenter) if mode: mode = __salt__[chassis_cmd]('set_general', cfg_tuning, mode_cmd, mode) if idrac_launch: idrac_launch = __salt__[chassis_cmd]('set_general', cfg_tuning, launch_cmd, idrac_launch) if ret['changes'].get('Slot Names') is not None: slot_rets = [] for s in slot_names: key = s.keys()[0] new_name = s[key] if key.startswith('slot-'): key = key[5:] slot_rets.append(__salt__[chassis_cmd]('set_slotname', key, new_name)) if any(slot_rets) is False: slot_names = False else: slot_names = True powerchange_all_ok = True for k, v in six.iteritems(target_power_states): powerchange_ok = __salt__[chassis_cmd]('server_power', v, module=k) if not powerchange_ok: powerchange_all_ok = False if any([ name, location, mode, idrac_launch, slot_names, powerchange_all_ok ]) is False: ret['result'] = False ret['comment'] = 'There was an error setting the Dell chassis.' ret['comment'] = 'Dell chassis was updated.' return ret
def describe(Bucket, region=None, key=None, keyid=None, profile=None): """ Given a bucket name describe its properties. Returns a dictionary of interesting properties. CLI Example: .. code-block:: bash salt myminion boto_s3_bucket.describe mybucket """ try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) result = {} conn_dict = { "ACL": conn.get_bucket_acl, "CORS": conn.get_bucket_cors, "LifecycleConfiguration": conn.get_bucket_lifecycle_configuration, "Location": conn.get_bucket_location, "Logging": conn.get_bucket_logging, "NotificationConfiguration": conn.get_bucket_notification_configuration, "Policy": conn.get_bucket_policy, "Replication": conn.get_bucket_replication, "RequestPayment": conn.get_bucket_request_payment, "Versioning": conn.get_bucket_versioning, "Website": conn.get_bucket_website, } for key, query in six.iteritems(conn_dict): try: data = query(Bucket=Bucket) except ClientError as e: if e.response.get("Error", {}).get("Code") in ( "NoSuchLifecycleConfiguration", "NoSuchCORSConfiguration", "NoSuchBucketPolicy", "NoSuchWebsiteConfiguration", "ReplicationConfigurationNotFoundError", "NoSuchTagSet", ): continue raise if "ResponseMetadata" in data: del data["ResponseMetadata"] result[key] = data tags = {} try: data = conn.get_bucket_tagging(Bucket=Bucket) for tagdef in data.get("TagSet"): tags[tagdef.get("Key")] = tagdef.get("Value") except ClientError as e: if not e.response.get("Error", {}).get("Code") == "NoSuchTagSet": raise if tags: result["Tagging"] = tags return {"bucket": result} except ClientError as e: err = __utils__["boto3.get_error"](e) if e.response.get("Error", {}).get("Code") == "NoSuchBucket": return {"bucket": None} return {"error": __utils__["boto3.get_error"](e)}
def keys(name, basepath='/etc/pki', **kwargs): ''' Manage libvirt keys. name The name variable used to track the execution basepath Defaults to ``/etc/pki``, this is the root location used for libvirt keys on the hypervisor The following parameters are optional: country The country that the certificate should use. Defaults to US. .. versionadded:: 2018.3.0 state The state that the certificate should use. Defaults to Utah. .. versionadded:: 2018.3.0 locality The locality that the certificate should use. Defaults to Salt Lake City. .. versionadded:: 2018.3.0 organization The organization that the certificate should use. Defaults to Salted. .. versionadded:: 2018.3.0 expiration_days The number of days that the certificate should be valid for. Defaults to 365 days (1 year) .. versionadded:: 2018.3.0 ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} # Grab all kwargs to make them available as pillar values # rename them to something hopefully unique to avoid # overriding anything existing pillar_kwargs = {} for key, value in six.iteritems(kwargs): pillar_kwargs['ext_pillar_virt.{0}'.format(key)] = value pillar = __salt__['pillar.ext']({'libvirt': '_'}, pillar_kwargs) paths = { 'serverkey': os.path.join(basepath, 'libvirt', 'private', 'serverkey.pem'), 'servercert': os.path.join(basepath, 'libvirt', 'servercert.pem'), 'clientkey': os.path.join(basepath, 'libvirt', 'private', 'clientkey.pem'), 'clientcert': os.path.join(basepath, 'libvirt', 'clientcert.pem'), 'cacert': os.path.join(basepath, 'CA', 'cacert.pem') } for key in paths: p_key = 'libvirt.{0}.pem'.format(key) if p_key not in pillar: continue if not os.path.exists(os.path.dirname(paths[key])): os.makedirs(os.path.dirname(paths[key])) if os.path.isfile(paths[key]): with salt.utils.files.fopen(paths[key], 'r') as fp_: if salt.utils.stringutils.to_unicode( fp_.read()) != pillar[p_key]: ret['changes'][key] = 'update' else: ret['changes'][key] = 'new' if not ret['changes']: ret['comment'] = 'All keys are correct' elif __opts__['test']: ret['result'] = None ret['comment'] = 'Libvirt keys are set to be updated' ret['changes'] = {} else: for key in ret['changes']: with salt.utils.files.fopen(paths[key], 'w+') as fp_: fp_.write( salt.utils.stringutils.to_str( pillar['libvirt.{0}.pem'.format(key)])) ret['comment'] = 'Updated libvirt certs and keys' return ret
def copy(name, target, **kwargs): ''' Copy scheduled job to another minion or minions. CLI Example: .. code-block:: bash salt '*' schedule.copy jobname target ''' ret = {'comment': [], 'result': True} if not name: ret['comment'] = 'Job name is required.' ret['result'] = False if 'test' in kwargs and kwargs['test']: ret['comment'] = 'Job: {0} would be copied from schedule.'.format(name) else: opts_schedule = list_(show_all=True, where='opts', return_yaml=False) pillar_schedule = list_(show_all=True, where='pillar', return_yaml=False) if name in opts_schedule: schedule_data = opts_schedule[name] elif name in pillar_schedule: schedule_data = pillar_schedule[name] else: ret['comment'] = 'Job {0} does not exist.'.format(name) ret['result'] = False return ret schedule_opts = [] for key, value in six.iteritems(schedule_data): temp = '{0}={1}'.format(key, value) schedule_opts.append(temp) response = __salt__['publish.publish'](target, 'schedule.add', schedule_opts) # Get errors and list of affeced minions errors = [] minions = [] for minion in response: minions.append(minion) if not response[minion]: errors.append(minion) # parse response if not response: ret['comment'] = 'no servers answered the published schedule.add command' return ret elif len(errors) > 0: ret['comment'] = 'the following minions return False' ret['minions'] = errors return ret else: ret['result'] = True ret['comment'] = 'Copied Job {0} from schedule to minion(s).'.format( name) ret['minions'] = minions return ret return ret
def present(name, DomainName, ElasticsearchClusterConfig=None, EBSOptions=None, AccessPolicies=None, SnapshotOptions=None, AdvancedOptions=None, Tags=None, region=None, key=None, keyid=None, profile=None): ''' Ensure domain exists. name The name of the state definition DomainName Name of the domain. ElasticsearchClusterConfig Configuration options for an Elasticsearch domain. Specifies the instance type and number of instances in the domain cluster. InstanceType (string) -- The instance type for an Elasticsearch cluster. InstanceCount (integer) -- The number of instances in the specified domain cluster. DedicatedMasterEnabled (boolean) -- A boolean value to indicate whether a dedicated master node is enabled. See About Dedicated Master Nodes for more information. ZoneAwarenessEnabled (boolean) -- A boolean value to indicate whether zone awareness is enabled. See About Zone Awareness for more information. DedicatedMasterType (string) -- The instance type for a dedicated master node. DedicatedMasterCount (integer) -- Total number of dedicated master nodes, active and on standby, for the cluster. EBSOptions Options to enable, disable and specify the type and size of EBS storage volumes. EBSEnabled (boolean) -- Specifies whether EBS-based storage is enabled. VolumeType (string) -- Specifies the volume type for EBS-based storage. VolumeSize (integer) -- Integer to specify the size of an EBS volume. Iops (integer) -- Specifies the IOPD for a Provisioned IOPS EBS volume (SSD). AccessPolicies IAM access policy SnapshotOptions Option to set time, in UTC format, of the daily automated snapshot. Default value is 0 hours. AutomatedSnapshotStartHour (integer) -- Specifies the time, in UTC format, when the service takes a daily automated snapshot of the specified Elasticsearch domain. Default value is 0 hours. AdvancedOptions Option to allow references to indices in an HTTP request body. Must be false when configuring access to individual sub-resources. By default, the value is true . region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': DomainName, 'result': True, 'comment': '', 'changes': {}} if ElasticsearchClusterConfig is None: ElasticsearchClusterConfig = { 'DedicatedMasterEnabled': False, 'InstanceCount': 1, 'InstanceType': 'm3.medium.elasticsearch', 'ZoneAwarenessEnabled': False } if EBSOptions is None: EBSOptions = { 'EBSEnabled': False, } if SnapshotOptions is None: SnapshotOptions = {'AutomatedSnapshotStartHour': 0} if AdvancedOptions is None: AdvancedOptions = {'rest.action.multi.allow_explicit_index': 'true'} if Tags is None: Tags = {} if AccessPolicies is not None and isinstance(AccessPolicies, six.string_types): try: AccessPolicies = json.loads(AccessPolicies) except ValueError as e: ret['result'] = False ret['comment'] = 'Failed to create domain: {0}.'.format(e.message) return ret r = __salt__['boto_elasticsearch_domain.exists'](DomainName=DomainName, region=region, key=key, keyid=keyid, profile=profile) if 'error' in r: ret['result'] = False ret['comment'] = 'Failed to create domain: {0}.'.format( r['error']['message']) return ret if not r.get('exists'): if __opts__['test']: ret['comment'] = 'Domain {0} is set to be created.'.format( DomainName) ret['result'] = None return ret r = __salt__['boto_elasticsearch_domain.create']( DomainName=DomainName, ElasticsearchClusterConfig=ElasticsearchClusterConfig, EBSOptions=EBSOptions, AccessPolicies=AccessPolicies, SnapshotOptions=SnapshotOptions, AdvancedOptions=AdvancedOptions, region=region, key=key, keyid=keyid, profile=profile) if not r.get('created'): ret['result'] = False ret['comment'] = 'Failed to create domain: {0}.'.format( r['error']['message']) return ret _describe = __salt__['boto_elasticsearch_domain.describe']( DomainName, region=region, key=key, keyid=keyid, profile=profile) ret['changes']['old'] = {'domain': None} ret['changes']['new'] = _describe ret['comment'] = 'Domain {0} created.'.format(DomainName) return ret ret['comment'] = os.linesep.join( [ret['comment'], 'Domain {0} is present.'.format(DomainName)]) ret['changes'] = {} # domain exists, ensure config matches _describe = __salt__['boto_elasticsearch_domain.describe']( DomainName=DomainName, region=region, key=key, keyid=keyid, profile=profile)['domain'] _describe['AccessPolicies'] = json.loads(_describe['AccessPolicies']) # When EBSEnabled is false, describe returns extra values that can't be set if not _describe.get('EBSOptions', {}).get('EBSEnabled'): opts = _describe.get('EBSOptions', {}) opts.pop('VolumeSize', None) opts.pop('VolumeType', None) comm_args = {} need_update = False es_opts = { 'ElasticsearchClusterConfig': ElasticsearchClusterConfig, 'EBSOptions': EBSOptions, 'AccessPolicies': AccessPolicies, 'SnapshotOptions': SnapshotOptions, 'AdvancedOptions': AdvancedOptions } for k, v in six.iteritems(es_opts): if not _compare_json(v, _describe[k]): need_update = True comm_args[k] = v ret['changes'].setdefault('new', {})[k] = v ret['changes'].setdefault('old', {})[k] = _describe[k] if need_update: if __opts__['test']: msg = 'Domain {0} set to be modified.'.format(DomainName) ret['comment'] = msg ret['result'] = None return ret ret['comment'] = os.linesep.join( [ret['comment'], 'Domain to be modified']) r = __salt__['boto_elasticsearch_domain.update'](DomainName=DomainName, region=region, key=key, keyid=keyid, profile=profile, **comm_args) if not r.get('updated'): ret['result'] = False ret['comment'] = 'Failed to update domain: {0}.'.format(r['error']) ret['changes'] = {} return ret return ret
def install(name=None, refresh=False, pkgs=None, sources=None, reinstall=False, **kwargs): """ Install the passed package, add refresh=True to update the opkg database. name The name of the package to be installed. Note that this parameter is ignored if either "pkgs" or "sources" is passed. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> refresh Whether or not to refresh the package database before installing. version Install a specific version of the package, e.g. 1.2.3~0ubuntu0. Ignored if "pkgs" or "sources" is passed. .. versionadded:: 2017.7.0 reinstall : False Specifying reinstall=True will use ``opkg install --force-reinstall`` rather than simply ``opkg install`` for requested packages that are already installed. If a version is specified with the requested package, then ``opkg install --force-reinstall`` will only be used if the installed version matches the requested version. .. versionadded:: 2017.7.0 Multiple Package Installation Options: pkgs A list of packages to install from a software repository. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install pkgs='["foo", "bar"]' salt '*' pkg.install pkgs='["foo", {"bar": "1.2.3-0ubuntu0"}]' sources A list of IPK packages to install. Must be passed as a list of dicts, with the keys being package names, and the values being the source URI or local path to the package. Dependencies are automatically resolved and marked as auto-installed. CLI Example: .. code-block:: bash salt '*' pkg.install sources='[{"foo": "salt://foo.deb"},{"bar": "salt://bar.deb"}]' install_recommends Whether to install the packages marked as recommended. Default is True. only_upgrade Only upgrade the packages (disallow downgrades), if they are already installed. Default is False. .. versionadded:: 2017.7.0 Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} """ refreshdb = salt.utils.data.is_true(refresh) try: pkg_params, pkg_type = __salt__["pkg_resource.parse_targets"](name, pkgs, sources, **kwargs) except MinionError as exc: raise CommandExecutionError(exc) old = list_pkgs() cmd_prefix = ["opkg", "install"] to_install = [] to_reinstall = [] to_downgrade = [] _append_noaction_if_testmode(cmd_prefix, **kwargs) if pkg_params is None or len(pkg_params) == 0: return {} elif pkg_type == "file": if reinstall: cmd_prefix.append("--force-reinstall") if not kwargs.get("only_upgrade", False): cmd_prefix.append("--force-downgrade") to_install.extend(pkg_params) elif pkg_type == "repository": if not kwargs.get("install_recommends", True): cmd_prefix.append("--no-install-recommends") for pkgname, pkgversion in six.iteritems(pkg_params): if name and pkgs is None and kwargs.get("version") and len( pkg_params) == 1: # Only use the 'version' param if 'name' was not specified as a # comma-separated list version_num = kwargs["version"] else: version_num = pkgversion if version_num is None: # Don't allow downgrades if the version # number is not specified. if reinstall and pkgname in old: to_reinstall.append(pkgname) else: to_install.append(pkgname) else: pkgstr = "{0}={1}".format(pkgname, version_num) cver = old.get(pkgname, "") if (reinstall and cver and salt.utils.versions.compare(ver1=version_num, oper="==", ver2=cver, cmp_func=version_cmp)): to_reinstall.append(pkgstr) elif not cver or salt.utils.versions.compare( ver1=version_num, oper=">=", ver2=cver, cmp_func=version_cmp): to_install.append(pkgstr) else: if not kwargs.get("only_upgrade", False): to_downgrade.append(pkgstr) else: # This should cause the command to fail. to_install.append(pkgstr) cmds = [] if to_install: cmd = copy.deepcopy(cmd_prefix) cmd.extend(to_install) cmds.append(cmd) if to_downgrade: cmd = copy.deepcopy(cmd_prefix) cmd.append("--force-downgrade") cmd.extend(to_downgrade) cmds.append(cmd) if to_reinstall: cmd = copy.deepcopy(cmd_prefix) cmd.append("--force-reinstall") cmd.extend(to_reinstall) cmds.append(cmd) if not cmds: return {} if refreshdb: refresh_db() errors = [] for cmd in cmds: out = __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=False) if out["retcode"] != 0: if out["stderr"]: errors.append(out["stderr"]) else: errors.append(out["stdout"]) __context__.pop("pkg.list_pkgs", None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if pkg_type == "file" and reinstall: # For file-based packages, prepare 'to_reinstall' to have a list # of all the package names that may have been reinstalled. # This way, we could include reinstalled packages in 'ret'. for pkgfile in to_install: # Convert from file name to package name. cmd = ["opkg", "info", pkgfile] out = __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=False) if out["retcode"] == 0: # Just need the package name. pkginfo_dict = _process_info_installed_output( out["stdout"], []) if pkginfo_dict: to_reinstall.append(list(pkginfo_dict.keys())[0]) for pkgname in to_reinstall: if pkgname not in ret or pkgname in old: ret.update({ pkgname: { "old": old.get(pkgname, ""), "new": new.get(pkgname, "") } }) rs_result = _get_restartcheck_result(errors) if errors: raise CommandExecutionError( "Problem encountered installing package(s)", info={ "errors": errors, "changes": ret }, ) _process_restartcheck_result(rs_result) return ret
def mounted(name, device, fstype, mkmnt=False, opts='defaults', dump=0, pass_num=0, config='/etc/fstab', persist=True, mount=True, user=None): ''' Verify that a device is mounted name The path to the location where the device is to be mounted device The device name, typically the device node, such as ``/dev/sdb1`` or ``UUID=066e0200-2867-4ebe-b9e6-f30026ca2314`` fstype The filesystem type, this will be ``xfs``, ``ext2/3/4`` in the case of classic filesystems, and ``fuse`` in the case of fuse mounts mkmnt If the mount point is not present then the state will fail, set ``mkmnt: True`` to create the mount point if it is otherwise not present opts A list object of options or a comma delimited list dump The dump value to be passed into the fstab, Default is ``0`` pass_num The pass value to be passed into the fstab, Default is ``0`` config Set an alternative location for the fstab, Default is ``/etc/fstab`` persist Set if the mount should be saved in the fstab, Default is ``True`` mount Set if the mount should be mounted immediately, Default is ``True`` user The user to own the mount; this defaults to the user salt is running as on the minion ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} # Defaults is not a valid option on Mac OS if __grains__['os'] in ['MacOS', 'Darwin'] and opts == 'defaults': opts = 'noowners' # Make sure that opts is correct, it can be a list or a comma delimited # string if isinstance(opts, string_types): opts = opts.split(',') # remove possible trailing slash if not name == '/': name = name.rstrip('/') # Get the active data active = __salt__['mount.active'](extended=True) real_name = os.path.realpath(name) if device.startswith('/'): if 'bind' in opts and real_name in active: _device = device if active[real_name]['device'].startswith('/'): # Find the device that the bind really points at. while True: if _device in active: _real_device = active[_device]['device'] opts = list( set(opts + active[_device]['opts'] + active[_device]['superopts'])) active[real_name]['opts'].append('bind') break _device = os.path.dirname(_device) real_device = _real_device else: # Remote file systems act differently. opts = list( set(opts + active[_device]['opts'] + active[_device]['superopts'])) active[real_name]['opts'].append('bind') real_device = active[real_name]['device'] else: real_device = os.path.realpath(device) elif device.upper().startswith('UUID='): real_device = device.split('=')[1].strip('"').lower() else: real_device = device # LVS devices have 2 names under /dev: # /dev/mapper/vg--name-lv--name and /dev/vg-name/lv-name # No matter what name is used for mounting, # mount always displays the device as /dev/mapper/vg--name-lv--name # Note the double-dash escaping. # So, let's call that the canonical device name # We should normalize names of the /dev/vg-name/lv-name type to the canonical name lvs_match = re.match(r'^/dev/(?P<vg_name>[^/]+)/(?P<lv_name>[^/]+$)', device) if lvs_match: double_dash_escaped = dict( (k, re.sub(r'-', '--', v)) for k, v in six.iteritems(lvs_match.groupdict())) mapper_device = '/dev/mapper/{vg_name}-{lv_name}'.format( **double_dash_escaped) if os.path.exists(mapper_device): real_device = mapper_device # When included in a Salt state file, FUSE # devices are prefaced by the filesystem type # and a hash, e.g. sshfs#. In the mount list # only the hostname is included. So if we detect # that the device is a FUSE device then we # remove the prefaced string so that the device in # state matches the device in the mount list. fuse_match = re.match(r'^\w+\#(?P<device_name>.+)', device) if fuse_match: if 'device_name' in fuse_match.groupdict(): real_device = fuse_match.group('device_name') device_list = [] if real_name in active: if mount: device_list.append(active[real_name]['device']) device_list.append(os.path.realpath(device_list[0])) alt_device = active[real_name][ 'alt_device'] if 'alt_device' in active[real_name] else None uuid_device = active[real_name][ 'device_uuid'] if 'device_uuid' in active[real_name] else None if alt_device and alt_device not in device_list: device_list.append(alt_device) if uuid_device and uuid_device not in device_list: device_list.append(uuid_device) if opts: mount_invisible_options = [ '_netdev', 'actimeo', 'bg', 'comment', 'defaults', 'delay_connect', 'intr', 'nobootwait', 'nofail', 'password', 'reconnect', 'retry', 'soft', ] # options which are provided as key=value (e.g. password=Zohp5ohb) mount_invisible_keys = [ 'actimeo', 'comment', 'password', 'retry', ] for opt in opts: keyval_option = opt.split('=')[0] if keyval_option in mount_invisible_keys: opt = keyval_option if opt not in active[real_name][ 'opts'] and opt not in active[real_name][ 'superopts'] and opt not in mount_invisible_options: if __opts__['test']: ret['result'] = None ret['comment'] = "Remount would be forced because options ({0}) changed".format( opt) return ret else: # nfs requires umounting and mounting if options change # add others to list that require similiar functionality if fstype in ['nfs']: ret['changes']['umount'] = "Forced unmount and mount because " \ + "options ({0}) changed".format(opt) unmount_result = __salt__['mount.umount']( real_name) if unmount_result is True: mount_result = __salt__['mount.mount']( real_name, device, mkmnt=mkmnt, fstype=fstype, opts=opts) ret['result'] = mount_result else: ret['result'] = False ret['comment'] = 'Unable to unmount {0}: {1}.'.format( real_name, unmount_result) return ret else: ret['changes']['umount'] = "Forced remount because " \ + "options ({0}) changed".format(opt) remount_result = __salt__['mount.remount']( real_name, device, mkmnt=mkmnt, fstype=fstype, opts=opts) ret['result'] = remount_result # Cleanup after the remount, so we # don't write remount into fstab if 'remount' in opts: opts.remove('remount') if real_device not in device_list: # name matches but device doesn't - need to umount if __opts__['test']: ret['result'] = None ret['comment'] = "An umount would have been forced " \ + "because devices do not match. Watched: " \ + device else: ret['changes']['umount'] = "Forced unmount because devices " \ + "don't match. Wanted: " + device if real_device != device: ret['changes']['umount'] += " (" + real_device + ")" ret['changes']['umount'] += ", current: " + ', '.join( device_list) out = __salt__['mount.umount'](real_name, user=user) active = __salt__['mount.active'](extended=True) if real_name in active: ret['comment'] = "Unable to unmount" ret['result'] = None return ret else: ret['comment'] = 'Target was already mounted' # using a duplicate check so I can catch the results of a umount if real_name not in active: if mount: # The mount is not present! Mount it if __opts__['test']: ret['result'] = None if os.path.exists(name): ret['comment'] = '{0} would be mounted'.format(name) else: ret['comment'] = '{0} will be created and mounted'.format( name) return ret if not os.path.exists(name): if mkmnt: __salt__['file.mkdir'](name, user=user) else: ret['result'] = False ret['comment'] = 'Mount directory is not present' return ret out = __salt__['mount.mount'](name, device, mkmnt, fstype, opts, user=user) active = __salt__['mount.active'](extended=True) if isinstance(out, string_types): # Failed to (re)mount, the state has failed! ret['comment'] = out ret['result'] = False return ret elif real_name in active: # (Re)mount worked! ret['comment'] = 'Target was successfully mounted' ret['changes']['mount'] = True else: ret['comment'] = '{0} not mounted'.format(name) if persist: # Override default for Mac OS if __grains__['os'] in ['MacOS', 'Darwin'] and config == '/etc/fstab': config = "/etc/auto_salt" if __opts__['test']: if __grains__['os'] in ['MacOS', 'Darwin']: out = __salt__['mount.set_automaster'](name, device, fstype, opts, config, test=True) else: out = __salt__['mount.set_fstab'](name, device, fstype, opts, dump, pass_num, config, test=True) if out != 'present': ret['result'] = None if out == 'new': if mount: ret['comment'] = ( '{0} is mounted, but needs to be ' 'written to the fstab in order to be ' 'made persistent').format(name) else: ret['comment'] = ( '{0} needs to be ' 'written to the fstab in order to be ' 'made persistent').format(name) elif out == 'change': if mount: ret['comment'] = ( '{0} is mounted, but its fstab entry ' 'must be updated').format(name) else: ret['comment'] = ('The {0} fstab entry ' 'must be updated').format(name) else: ret['result'] = False ret['comment'] = ('Unable to detect fstab status for ' 'mount point {0} due to unexpected ' 'output \'{1}\' from call to ' 'mount.set_fstab. This is most likely ' 'a bug.').format(name, out) return ret else: if __grains__['os'] in ['MacOS', 'Darwin']: out = __salt__['mount.set_automaster'](name, device, fstype, opts, config) else: out = __salt__['mount.set_fstab'](name, device, fstype, opts, dump, pass_num, config) if out == 'present': ret['comment'] += '. Entry already exists in the fstab.' return ret if out == 'new': ret['changes']['persist'] = 'new' ret['comment'] += '. Added new entry to the fstab.' return ret if out == 'change': ret['changes']['persist'] = 'update' ret['comment'] += '. Updated the entry in the fstab.' return ret if out == 'bad config': ret['result'] = False ret['comment'] += '. However, the fstab was not found.' return ret return ret
def render_jinja_tmpl(tmplstr, context, tmplpath=None): opts = context['opts'] saltenv = context['saltenv'] loader = None newline = False if tmplstr and not isinstance(tmplstr, six.text_type): # http://jinja.pocoo.org/docs/api/#unicode tmplstr = tmplstr.decode(SLS_ENCODING) if tmplstr.endswith('\n'): newline = True if not saltenv: if tmplpath: loader = jinja2.FileSystemLoader(os.path.dirname(tmplpath)) else: loader = salt.utils.jinja.SaltCacheLoader(opts, saltenv, pillar_rend=context.get('_pillar_rend', False)) env_args = {'extensions': [], 'loader': loader} if hasattr(jinja2.ext, 'with_'): env_args['extensions'].append('jinja2.ext.with_') if hasattr(jinja2.ext, 'do'): env_args['extensions'].append('jinja2.ext.do') if hasattr(jinja2.ext, 'loopcontrols'): env_args['extensions'].append('jinja2.ext.loopcontrols') env_args['extensions'].append(salt.utils.jinja.SerializerExtension) # Pass through trim_blocks and lstrip_blocks Jinja parameters # trim_blocks removes newlines around Jinja blocks # lstrip_blocks strips tabs and spaces from the beginning of # line to the start of a block. if opts.get('jinja_trim_blocks', False): log.debug('Jinja2 trim_blocks is enabled') env_args['trim_blocks'] = True if opts.get('jinja_lstrip_blocks', False): log.debug('Jinja2 lstrip_blocks is enabled') env_args['lstrip_blocks'] = True if opts.get('allow_undefined', False): jinja_env = jinja2.Environment(**env_args) else: jinja_env = jinja2.Environment(undefined=jinja2.StrictUndefined, **env_args) jinja_env.tests.update(JinjaTest.salt_jinja_tests) jinja_env.filters.update(JinjaFilter.salt_jinja_filters) # globals jinja_env.globals['odict'] = OrderedDict jinja_env.globals['show_full_context'] = salt.utils.jinja.show_full_context jinja_env.tests['list'] = salt.utils.is_list decoded_context = {} for key, value in six.iteritems(context): if not isinstance(value, six.string_types): decoded_context[key] = value continue decoded_context[key] = salt.utils.locales.sdecode(value) try: template = jinja_env.from_string(tmplstr) template.globals.update(decoded_context) output = template.render(**decoded_context) except jinja2.exceptions.TemplateSyntaxError as exc: trace = traceback.extract_tb(sys.exc_info()[2]) line, out = _get_jinja_error(trace, context=decoded_context) if not line: tmplstr = '' raise SaltRenderError('Jinja syntax error: {0}{1}'.format(exc, out), line, tmplstr) except jinja2.exceptions.UndefinedError as exc: trace = traceback.extract_tb(sys.exc_info()[2]) out = _get_jinja_error(trace, context=decoded_context)[1] tmplstr = '' # Don't include the line number, since it is misreported # https://github.com/mitsuhiko/jinja2/issues/276 raise SaltRenderError( 'Jinja variable {0}{1}'.format( exc, out), buf=tmplstr) except (SaltInvocationError, CommandExecutionError) as exc: trace = traceback.extract_tb(sys.exc_info()[2]) line, out = _get_jinja_error(trace, context=decoded_context) if not line: tmplstr = '' raise SaltRenderError( 'Problem running salt function in Jinja template: {0}{1}'.format( exc, out), line, tmplstr) except Exception as exc: tracestr = traceback.format_exc() trace = traceback.extract_tb(sys.exc_info()[2]) line, out = _get_jinja_error(trace, context=decoded_context) if not line: tmplstr = '' else: tmplstr += '\n{0}'.format(tracestr) log.debug("Jinja Error") log.debug("Exception: {0}".format(exc)) log.debug("Out: {0}".format(out)) log.debug("Line: {0}".format(line)) log.debug("TmplStr: {0}".format(tmplstr)) log.debug("TraceStr: {0}".format(tracestr)) raise SaltRenderError('Jinja error: {0}{1}'.format(exc, out), line, tmplstr, trace=tracestr) # Workaround a bug in Jinja that removes the final newline # (https://github.com/mitsuhiko/jinja2/issues/75) if newline: output += '\n' return output
def _netstat_bsd(): ''' Return netstat information for BSD flavors ''' ret = [] if __grains__['kernel'] == 'NetBSD': for addr_family in ('inet', 'inet6'): cmd = 'netstat -f {0} -an | tail -n+3'.format(addr_family) out = __salt__['cmd.run'](cmd, python_shell=True) for line in out.splitlines(): comps = line.split() entry = { 'proto': comps[0], 'recv-q': comps[1], 'send-q': comps[2], 'local-address': comps[3], 'remote-address': comps[4] } if entry['proto'].startswith('tcp'): entry['state'] = comps[5] ret.append(entry) else: # Lookup TCP connections cmd = 'netstat -p tcp -an | tail -n+3' out = __salt__['cmd.run'](cmd, python_shell=True) for line in out.splitlines(): comps = line.split() ret.append({ 'proto': comps[0], 'recv-q': comps[1], 'send-q': comps[2], 'local-address': comps[3], 'remote-address': comps[4], 'state': comps[5] }) # Lookup UDP connections cmd = 'netstat -p udp -an | tail -n+3' out = __salt__['cmd.run'](cmd, python_shell=True) for line in out.splitlines(): comps = line.split() ret.append({ 'proto': comps[0], 'recv-q': comps[1], 'send-q': comps[2], 'local-address': comps[3], 'remote-address': comps[4] }) # Add in user and program info ppid = _ppid() if __grains__['kernel'] == 'OpenBSD': netinfo = _netinfo_openbsd() elif __grains__['kernel'] in ('FreeBSD', 'NetBSD'): netinfo = _netinfo_freebsd_netbsd() for idx in range(len(ret)): local = ret[idx]['local-address'] remote = ret[idx]['remote-address'] proto = ret[idx]['proto'] try: # Make a pointer to the info for this connection for easier # reference below ptr = netinfo[local][remote][proto] except KeyError: continue # Get the pid-to-ppid mappings for this connection conn_ppid = dict((x, y) for x, y in six.iteritems(ppid) if x in ptr) try: # Master pid for this connection will be the pid whose ppid isn't # in the subset dict we created above master_pid = next( iter(x for x, y in six.iteritems(conn_ppid) if y not in ptr)) except StopIteration: continue ret[idx]['user'] = ptr[master_pid]['user'] ret[idx]['program'] = '/'.join((master_pid, ptr[master_pid]['cmd'])) return ret
def setvals(grains, destructive=False): ''' Set new grains values in the grains config file destructive If an operation results in a key being removed, delete the key, too. Defaults to False. CLI Example: .. code-block:: bash salt '*' grains.setvals "{'key1': 'val1', 'key2': 'val2'}" ''' new_grains = grains if not isinstance(new_grains, collections.Mapping): raise SaltException('setvals grains must be a dictionary.') grains = {} if os.path.isfile(__opts__['conf_file']): if salt.utils.platform.is_proxy(): gfn = os.path.join(os.path.dirname(__opts__['conf_file']), 'proxy.d', __opts__['id'], 'grains') else: gfn = os.path.join(os.path.dirname(__opts__['conf_file']), 'grains') elif os.path.isdir(__opts__['conf_file']): if salt.utils.platform.is_proxy(): gfn = os.path.join(__opts__['conf_file'], 'proxy.d', __opts__['id'], 'grains') else: gfn = os.path.join(__opts__['conf_file'], 'grains') else: if salt.utils.platform.is_proxy(): gfn = os.path.join(os.path.dirname(__opts__['conf_file']), 'proxy.d', __opts__['id'], 'grains') else: gfn = os.path.join(os.path.dirname(__opts__['conf_file']), 'grains') if os.path.isfile(gfn): with salt.utils.files.fopen(gfn, 'rb') as fp_: try: grains = salt.utils.yaml.safe_load(fp_) except salt.utils.yaml.YAMLError as exc: return 'Unable to read existing grains file: {0}'.format(exc) if not isinstance(grains, dict): grains = {} for key, val in six.iteritems(new_grains): if val is None and destructive is True: if key in grains: del grains[key] if key in __grains__: del __grains__[key] else: grains[key] = val __grains__[key] = val try: with salt.utils.files.fopen(gfn, 'w+') as fp_: salt.utils.yaml.safe_dump(grains, fp_, default_flow_style=False) except (IOError, OSError): msg = 'Unable to write to grains file at {0}. Check permissions.' log.error(msg.format(gfn)) fn_ = os.path.join(__opts__['cachedir'], 'module_refresh') try: with salt.utils.files.flopen(fn_, 'w+'): pass except (IOError, OSError): msg = 'Unable to write to cache file {0}. Check permissions.' log.error(msg.format(fn_)) if not __opts__.get('local', False): # Refresh the grains __salt__['saltutil.refresh_grains']() # Return the grains we just set to confirm everything was OK return new_grains
def config(name, reset=False, **kwargs): ''' Modify configuration options for a given port. Multiple options can be specified. To see the available options for a port, use :mod:`ports.showconfig <salt.modules.freebsdports.showconfig>`. name The port name, in ``category/name`` format reset : False If ``True``, runs a ``make rmconfig`` for the port, clearing its configuration before setting the desired options CLI Examples: .. code-block:: bash salt '*' ports.config security/nmap IPV6=off ''' portpath = _check_portname(name) if reset: rmconfig(name) configuration = showconfig(name, dict_return=True) if not configuration: raise CommandExecutionError( 'Unable to get port configuration for {0!r}'.format(name)) # Get top-level key for later reference pkg = next(iter(configuration)) conf_ptr = configuration[pkg] opts = dict((str(x), _normalize(kwargs[x])) for x in kwargs if not x.startswith('_')) bad_opts = [x for x in opts if x not in conf_ptr] if bad_opts: raise SaltInvocationError( 'The following opts are not valid for port {0}: {1}'.format( name, ', '.join(bad_opts))) bad_vals = [ '{0}={1}'.format(x, y) for x, y in six.iteritems(opts) if y not in ('on', 'off') ] if bad_vals: raise SaltInvocationError( 'The following key/value pairs are invalid: {0}'.format( ', '.join(bad_vals))) conf_ptr.update(opts) _write_options(name, configuration) new_config = showconfig(name, dict_return=True) try: new_config = new_config[next(iter(new_config))] except (StopIteration, TypeError): return False return all(conf_ptr[x] == new_config.get(x) for x in conf_ptr)