def install( pkgs=None, # pylint: disable=R0912,R0913,R0914 requirements=None, bin_env=None, use_wheel=False, no_use_wheel=False, log=None, proxy=None, timeout=None, editable=None, find_links=None, index_url=None, extra_index_url=None, no_index=False, mirrors=None, build=None, target=None, download=None, download_cache=None, source=None, upgrade=False, force_reinstall=False, ignore_installed=False, exists_action=None, no_deps=False, no_install=False, no_download=False, global_options=None, install_options=None, user=None, cwd=None, pre_releases=False, cert=None, allow_all_external=False, allow_external=None, allow_unverified=None, process_dependency_links=False, saltenv='base', env_vars=None, use_vt=False, trusted_host=None, no_cache_dir=False, cache_dir=None, no_binary=None, **kwargs): ''' Install packages with pip Install packages individually or from a pip requirements file. Install packages globally or to a virtualenv. pkgs Comma separated list of packages to install requirements Path to requirements bin_env Path to pip (or to a virtualenv). This can be used to specify the path to the pip to use when more than one Python release is installed (e.g. ``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is specified, it is assumed to be a virtualenv. .. note:: For Windows, if the pip module is being used to upgrade the pip package, bin_env should be the path to the virtualenv or to the python binary that should be used. The pip command is unable to upgrade itself in Windows. use_wheel Prefer wheel archives (requires pip>=1.4) no_use_wheel Force to not use wheel archives (requires pip>=1.4,<10.0.0) no_binary Force to not use binary packages (requires pip >= 7.0.0) Accepts either :all: to disable all binary packages, :none: to empty the set, or one or more package names with commas between them log Log file where a complete (maximum verbosity) record will be kept proxy Specify a proxy in the form ``user:[email protected]:port``. Note that the ``user:password@`` is optional and required only if you are behind an authenticated proxy. If you provide ``[email protected]:port`` then you will be prompted for a password. timeout Set the socket timeout (default 15 seconds) editable install something editable (e.g. ``git+https://github.com/worldcompany/djangoembed.git#egg=djangoembed``) find_links URL to search for packages index_url Base URL of Python Package Index extra_index_url Extra URLs of package indexes to use in addition to ``index_url`` no_index Ignore package index mirrors Specific mirror URL(s) to query (automatically adds --use-mirrors) .. warning:: This option has been deprecated and removed in pip version 7.0.0. Please use ``index_url`` and/or ``extra_index_url`` instead. build Unpack packages into ``build`` dir target Install packages into ``target`` dir download Download packages into ``download`` instead of installing them download_cache | cache_dir Cache downloaded packages in ``download_cache`` or ``cache_dir`` dir source Check out ``editable`` packages into ``source`` dir upgrade Upgrade all packages to the newest available version force_reinstall When upgrading, reinstall all packages even if they are already up-to-date. ignore_installed Ignore the installed packages (reinstalling instead) exists_action Default action when a path already exists: (s)witch, (i)gnore, (w)ipe, (b)ackup no_deps Ignore package dependencies no_install Download and unpack all packages, but don't actually install them no_download Don't download any packages, just install the ones already downloaded (completes an install run with ``--no-install``) install_options Extra arguments to be supplied to the setup.py install command (e.g. like ``--install-option='--install-scripts=/usr/local/bin'``). Use multiple --install-option options to pass multiple options to setup.py install. If you are using an option with a directory path, be sure to use absolute path. global_options Extra global options to be supplied to the setup.py call before the install command. user The user under which to run pip cwd Directory from which to run pip pre_releases Include pre-releases in the available versions cert Provide a path to an alternate CA bundle allow_all_external Allow the installation of all externally hosted files allow_external Allow the installation of externally hosted files (comma separated list) allow_unverified Allow the installation of insecure and unverifiable files (comma separated list) process_dependency_links Enable the processing of dependency links env_vars Set environment variables that some builds will depend on. For example, a Python C-module may have a Makefile that needs INCLUDE_PATH set to pick up a header file while compiling. This must be in the form of a dictionary or a mapping. Example: .. code-block:: bash salt '*' pip.install django_app env_vars="{'CUSTOM_PATH': '/opt/django_app'}" trusted_host Mark this host as trusted, even though it does not have valid or any HTTPS. use_vt Use VT terminal emulation (see output while installing) no_cache_dir Disable the cache. CLI Example: .. code-block:: bash salt '*' pip.install <package name>,<package2 name> salt '*' pip.install requirements=/path/to/requirements.txt salt '*' pip.install <package name> bin_env=/path/to/virtualenv salt '*' pip.install <package name> bin_env=/path/to/pip_bin Complicated CLI example:: salt '*' pip.install markdown,django \ editable=git+https://github.com/worldcompany/djangoembed.git#egg=djangoembed upgrade=True no_deps=True ''' if 'no_chown' in kwargs: salt.utils.versions.warn_until( 'Fluorine', 'The no_chown argument has been deprecated and is no longer used. ' 'Its functionality was removed in Boron.') kwargs.pop('no_chown') cmd = _get_pip_bin(bin_env) cmd.append('install') cleanup_requirements, error = _process_requirements( requirements=requirements, cmd=cmd, cwd=cwd, saltenv=saltenv, user=user) if error: return error cur_version = version(bin_env) if use_wheel: min_version = '1.4' max_version = '9.0.3' too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version) too_high = salt.utils.versions.compare(ver1=cur_version, oper='>', ver2=max_version) if too_low or too_high: logger.error( 'The --use-wheel option is only supported in pip between %s and ' '%s. The version of pip detected is %s. This option ' 'will be ignored.', min_version, max_version, cur_version) else: cmd.append('--use-wheel') if no_use_wheel: min_version = '1.4' max_version = '9.0.3' too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version) too_high = salt.utils.versions.compare(ver1=cur_version, oper='>', ver2=max_version) if too_low or too_high: logger.error( 'The --no-use-wheel option is only supported in pip between %s and ' '%s. The version of pip detected is %s. This option ' 'will be ignored.', min_version, max_version, cur_version) else: cmd.append('--no-use-wheel') if no_binary: min_version = '7.0.0' too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version) if too_low: logger.error( 'The --no-binary option is only supported in pip %s and ' 'newer. The version of pip detected is %s. This option ' 'will be ignored.', min_version, cur_version) else: if isinstance(no_binary, list): no_binary = ','.join(no_binary) cmd.extend(['--no-binary', no_binary]) if log: if os.path.isdir(log): raise IOError( '\'{0}\' is a directory. Use --log path_to_file'.format(log)) elif not os.access(log, os.W_OK): raise IOError('\'{0}\' is not writeable'.format(log)) cmd.extend(['--log', log]) if proxy: cmd.extend(['--proxy', proxy]) if timeout: try: if isinstance(timeout, float): # Catch floating point input, exception will be caught in # exception class below. raise ValueError('Timeout cannot be a float') int(timeout) except ValueError: raise ValueError( '\'{0}\' is not a valid timeout, must be an integer'.format( timeout)) cmd.extend(['--timeout', timeout]) if find_links: if isinstance(find_links, six.string_types): find_links = [l.strip() for l in find_links.split(',')] for link in find_links: if not (salt.utils.url.validate(link, VALID_PROTOS) or os.path.exists(link)): raise CommandExecutionError( '\'{0}\' is not a valid URL or path'.format(link)) cmd.extend(['--find-links', link]) if no_index and (index_url or extra_index_url): raise CommandExecutionError( '\'no_index\' and (\'index_url\' or \'extra_index_url\') are ' 'mutually exclusive.') if index_url: if not salt.utils.url.validate(index_url, VALID_PROTOS): raise CommandExecutionError( '\'{0}\' is not a valid URL'.format(index_url)) cmd.extend(['--index-url', index_url]) if extra_index_url: if not salt.utils.url.validate(extra_index_url, VALID_PROTOS): raise CommandExecutionError( '\'{0}\' is not a valid URL'.format(extra_index_url)) cmd.extend(['--extra-index-url', extra_index_url]) if no_index: cmd.append('--no-index') if mirrors: # https://github.com/pypa/pip/pull/2641/files#diff-3ef137fb9ffdd400f117a565cd94c188L216 if salt.utils.versions.compare(ver1=cur_version, oper='>=', ver2='7.0.0'): raise CommandExecutionError( 'pip >= 7.0.0 does not support mirror argument:' ' use index_url and/or extra_index_url instead') if isinstance(mirrors, six.string_types): mirrors = [m.strip() for m in mirrors.split(',')] cmd.append('--use-mirrors') for mirror in mirrors: if not mirror.startswith('http://'): raise CommandExecutionError( '\'{0}\' is not a valid URL'.format(mirror)) cmd.extend(['--mirrors', mirror]) if build: cmd.extend(['--build', build]) if target: cmd.extend(['--target', target]) if download: cmd.extend(['--download', download]) if download_cache or cache_dir: cmd.extend([ '--cache-dir' if salt.utils.versions.compare( ver1=cur_version, oper='>=', ver2='6.0') else '--download-cache', download_cache or cache_dir ]) if source: cmd.extend(['--source', source]) if upgrade: cmd.append('--upgrade') if force_reinstall: cmd.append('--force-reinstall') if ignore_installed: cmd.append('--ignore-installed') if exists_action: if exists_action.lower() not in ('s', 'i', 'w', 'b'): raise CommandExecutionError( 'The exists_action pip option only supports the values ' 's, i, w, and b. \'{0}\' is not valid.'.format(exists_action)) cmd.extend(['--exists-action', exists_action]) if no_deps: cmd.append('--no-deps') if no_install: cmd.append('--no-install') if no_download: cmd.append('--no-download') if no_cache_dir: cmd.append('--no-cache-dir') if pre_releases: # Check the locally installed pip version pip_version = cur_version # From pip v1.4 the --pre flag is available if salt.utils.versions.compare(ver1=pip_version, oper='>=', ver2='1.4'): cmd.append('--pre') if cert: cmd.extend(['--cert', cert]) if global_options: if isinstance(global_options, six.string_types): global_options = [go.strip() for go in global_options.split(',')] for opt in global_options: cmd.extend(['--global-option', opt]) if install_options: if isinstance(install_options, six.string_types): install_options = [io.strip() for io in install_options.split(',')] for opt in install_options: cmd.extend(['--install-option', opt]) if pkgs: if not isinstance(pkgs, list): try: pkgs = [p.strip() for p in pkgs.split(',')] except AttributeError: pkgs = [p.strip() for p in six.text_type(pkgs).split(',')] pkgs = salt.utils.data.stringify(salt.utils.data.decode_list(pkgs)) # It's possible we replaced version-range commas with semicolons so # they would survive the previous line (in the pip.installed state). # Put the commas back in while making sure the names are contained in # quotes, this allows for proper version spec passing salt>=0.17.0 cmd.extend([p.replace(';', ',') for p in pkgs]) elif not any([requirements, editable]): # Starting with pip 10.0.0, if no packages are specified in the # command, it returns a retcode 1. So instead of running the command, # just return the output without running pip. return {'retcode': 0, 'stdout': 'No packages to install.'} if editable: egg_match = re.compile(r'(?:#|#.*?&)egg=([^&]*)') if isinstance(editable, six.string_types): editable = [e.strip() for e in editable.split(',')] for entry in editable: # Is the editable local? if not (entry == '.' or entry.startswith(('file://', '/'))): match = egg_match.search(entry) if not match or not match.group(1): # Missing #egg=theEggName raise CommandExecutionError( 'You must specify an egg for this editable') cmd.extend(['--editable', entry]) if allow_all_external: cmd.append('--allow-all-external') if allow_external: if isinstance(allow_external, six.string_types): allow_external = [p.strip() for p in allow_external.split(',')] for pkg in allow_external: cmd.extend(['--allow-external', pkg]) if allow_unverified: if isinstance(allow_unverified, six.string_types): allow_unverified = \ [p.strip() for p in allow_unverified.split(',')] for pkg in allow_unverified: cmd.extend(['--allow-unverified', pkg]) if process_dependency_links: cmd.append('--process-dependency-links') if trusted_host: cmd.extend(['--trusted-host', trusted_host]) cmd_kwargs = dict(saltenv=saltenv, use_vt=use_vt, runas=user) if kwargs: cmd_kwargs.update(kwargs) if env_vars: cmd_kwargs.setdefault('env', {}).update(_format_env_vars(env_vars)) try: if cwd: cmd_kwargs['cwd'] = cwd if bin_env and os.path.isdir(bin_env): cmd_kwargs.setdefault('env', {})['VIRTUAL_ENV'] = bin_env logger.debug( 'TRY BLOCK: end of pip.install -- cmd: %s, cmd_kwargs: %s', cmd, cmd_kwargs) return __salt__['cmd.run_all'](cmd, python_shell=False, **cmd_kwargs) finally: _clear_context(bin_env) for tempdir in [cr for cr in cleanup_requirements if cr is not None]: if os.path.isdir(tempdir): shutil.rmtree(tempdir)
def get_hwclock(): ''' Get current hardware clock setting (UTC or localtime) CLI Example: .. code-block:: bash salt '*' timezone.get_hwclock ''' cmd = '' if salt.utils.which('timedatectl'): ret = _timedatectl() for line in (x.strip() for x in ret['stdout'].splitlines()): if 'rtc in local tz' in line.lower(): try: if line.split(':')[-1].strip().lower() == 'yes': return 'localtime' else: return 'UTC' except IndexError: pass msg = ('Failed to parse timedatectl output: {0}\n' 'Please file an issue with SaltStack').format(ret['stdout']) raise CommandExecutionError(msg) else: os_family = __grains__['os_family'] for family in ('RedHat', 'Suse'): if family in os_family: cmd = ['tail', '-n', '1', '/etc/adjtime'] return __salt__['cmd.run'](cmd, python_shell=False) if 'Debian' in __grains__['os_family']: # Original way to look up hwclock on Debian-based systems try: with salt.utils.fopen('/etc/default/rcS', 'r') as fp_: for line in fp_: if re.match(r'^\s*#', line): continue if 'UTC=' in line: is_utc = line.rstrip('\n').split('=')[-1].lower() if is_utc == 'yes': return 'UTC' else: return 'localtime' except IOError as exc: pass # Since Wheezy cmd = ['tail', '-n', '1', '/etc/adjtime'] return __salt__['cmd.run'](cmd, python_shell=False) elif 'Gentoo' in __grains__['os_family']: offset_file = '/etc/conf.d/hwclock' try: with salt.utils.fopen(offset_file, 'r') as fp_: for line in fp_: if line.startswith('clock='): line = line.rstrip('\n') return line.split('=')[-1].strip('\'"') raise CommandExecutionError( 'Offset information not found in {0}'.format( offset_file)) except IOError as exc: raise CommandExecutionError( 'Problem reading offset file {0}: {1}'.format( offset_file, exc.strerror)) elif 'Solaris' in __grains__['os_family']: offset_file = '/etc/rtc_config' try: with salt.utils.fopen(offset_file, 'r') as fp_: for line in fp_: if line.startswith('zone_info=GMT'): return 'UTC' return 'localtime' except IOError as exc: if exc.errno == errno.ENOENT: # offset file does not exist return 'UTC' raise CommandExecutionError( 'Problem reading offset file {0}: {1}'.format( offset_file, exc.strerror))
def cql_query(query, contact_points=None, port=None, cql_user=None, cql_pass=None): ''' Run a query on a Cassandra cluster and return a dictionary. :param query: The query to execute. :type query: str :param contact_points: The Cassandra cluster addresses, can either be a string or a list of IPs. :type contact_points: str | list[str] :param cql_user: The Cassandra user if authentication is turned on. :type cql_user: str :param cql_pass: The Cassandra user password if authentication is turned on. :type cql_pass: str :param port: The Cassandra cluster port, defaults to None. :type port: int :param params: The parameters for the query, optional. :type params: str :return: A dictionary from the return values of the query :rtype: list[dict] ''' try: cluster, session = _connect(contact_points=contact_points, port=port, cql_user=cql_user, cql_pass=cql_pass) except CommandExecutionError: log.critical('Could not get Cassandra cluster session.') raise except BaseException as e: log.critical( 'Unexpected error while getting Cassandra cluster session: {0}'. format(str(e))) raise session.row_factory = dict_factory ret = [] try: results = session.execute(query) except BaseException as e: log.error('Failed to execute query: {0}\n reason: {1}'.format( query, str(e))) msg = "ERROR: Cassandra query failed: {0} reason: {1}".format( query, str(e)) raise CommandExecutionError(msg) if results: for result in results: values = {} for key, value in six.iteritems(result): # Salt won't return dictionaries with odd types like uuid.UUID if not isinstance(value, six.text_type): # Must support Cassandra collection types. # Namely, Cassandras set, list, and map collections. if not isinstance(value, (set, list, dict)): value = str(value) values[key] = value ret.append(values) return ret
def subvolume_list( path, parent_id=False, absolute=False, ogeneration=False, generation=False, subvolumes=False, uuid=False, parent_uuid=False, sent_subvolume_uuid=False, snapshots=False, readonly=False, deleted=False, generation_cmp=None, ogeneration_cmp=None, sort=None, ): """ List the subvolumes present in the filesystem. path Mount point for the subvolume parent_id Print parent ID absolute Print all the subvolumes in the filesystem and distinguish between absolute and relative path with respect to the given <path> ogeneration Print the ogeneration of the subvolume generation Print the generation of the subvolume subvolumes Print only subvolumes below specified <path> uuid Print the UUID of the subvolume parent_uuid Print the parent uuid of subvolumes (and snapshots) sent_subvolume_uuid Print the UUID of the sent subvolume, where the subvolume is the result of a receive operation snapshots Only snapshot subvolumes in the filesystem will be listed readonly Only readonly subvolumes in the filesystem will be listed deleted Only deleted subvolumens that are ye not cleaned generation_cmp List subvolumes in the filesystem that its generation is >=, <= or = value. '+' means >= value, '-' means <= value, If there is neither '+' nor '-', it means = value ogeneration_cmp List subvolumes in the filesystem that its ogeneration is >=, <= or = value sort List subvolumes in order by specified items. Possible values: * rootid * gen * ogen * path You can add '+' or '-' in front of each items, '+' means ascending, '-' means descending. The default is ascending. You can combite it in a list. CLI Example: .. code-block:: bash salt '*' btrfs.subvolume_list /var/volumes/tmp salt '*' btrfs.subvolume_list /var/volumes/tmp path=True salt '*' btrfs.subvolume_list /var/volumes/tmp sort='[-rootid]' """ if sort and type(sort) is not list: raise CommandExecutionError("Sort parameter must be a list") valid_sorts = [ "".join((order, attrib)) for order, attrib in itertools.product(("-", "", "+"), ("rootid", "gen", "ogen", "path")) ] if sort and not all(s in valid_sorts for s in sort): raise CommandExecutionError("Value for sort not recognized") cmd = ["btrfs", "subvolume", "list"] params = ( (parent_id, "-p"), (absolute, "-a"), (ogeneration, "-c"), (generation, "-g"), (subvolumes, "-o"), (uuid, "-u"), (parent_uuid, "-q"), (sent_subvolume_uuid, "-R"), (snapshots, "-s"), (readonly, "-r"), (deleted, "-d"), ) cmd.extend(p[1] for p in params if p[0]) if generation_cmp: cmd.extend(["-G", generation_cmp]) if ogeneration_cmp: cmd.extend(["-C", ogeneration_cmp]) # We already validated the content of the list if sort: cmd.append("--sort={}".format(",".join(sort))) cmd.append(path) res = __salt__["cmd.run_all"](cmd) salt.utils.fsutils._verify_run(res) # Parse the output. ID and gen are always at the beginning, and # path is always at the end. There is only one column that # contains space (top level), and the path value can also have # spaces. The issue is that we do not know how many spaces do we # have in the path name, so any classic solution based on split # will fail. # # This list is in order. columns = ( "ID", "gen", "cgen", "parent", "top level", "otime", "parent_uuid", "received_uuid", "uuid", "path", ) result = [] for line in res["stdout"].splitlines(): table = {} for key in columns: value, line = _pop(line, key, key == "path") if value: table[key.lower()] = value # If line is not empty here, we are not able to parse it if not line: result.append(table) return result
def cql_query(query, contact_points=None, port=None, cql_user=None, cql_pass=None): """ Run a query on a Cassandra cluster and return a dictionary. :param query: The query to execute. :type query: str :param contact_points: The Cassandra cluster addresses, can either be a string or a list of IPs. :type contact_points: str | list[str] :param cql_user: The Cassandra user if authentication is turned on. :type cql_user: str :param cql_pass: The Cassandra user password if authentication is turned on. :type cql_pass: str :param port: The Cassandra cluster port, defaults to None. :type port: int :param params: The parameters for the query, optional. :type params: str :return: A dictionary from the return values of the query :rtype: list[dict] CLI Example: .. code-block:: bash salt 'cassandra-server' cassandra_cql.cql_query "SELECT * FROM users_by_name WHERE first_name = 'jane'" """ try: cluster, session = _connect( contact_points=contact_points, port=port, cql_user=cql_user, cql_pass=cql_pass, ) except CommandExecutionError: log.critical("Could not get Cassandra cluster session.") raise except BaseException as e: log.critical("Unexpected error while getting Cassandra cluster session: %s", e) raise session.row_factory = dict_factory ret = [] # Cassandra changed their internal schema from v2 to v3 # If the query contains a dictionary sorted by versions # Find the query for the current cluster version. # https://issues.apache.org/jira/browse/CASSANDRA-6717 if isinstance(query, dict): cluster_version = version( contact_points=contact_points, port=port, cql_user=cql_user, cql_pass=cql_pass, ) match = re.match(r"^(\d+)\.(\d+)(?:\.(\d+))?", cluster_version) major, minor, point = match.groups() # try to find the specific version in the query dictionary # then try the major version # otherwise default to the highest version number try: query = query[cluster_version] except KeyError: query = query.get(major, max(query)) log.debug("New query is: %s", query) try: results = session.execute(query) except BaseException as e: log.error("Failed to execute query: %s\n reason: %s", query, e) msg = "ERROR: Cassandra query failed: {0} reason: {1}".format(query, e) raise CommandExecutionError(msg) if results: for result in results: values = {} for key, value in six.iteritems(result): # Salt won't return dictionaries with odd types like uuid.UUID if not isinstance(value, six.text_type): # Must support Cassandra collection types. # Namely, Cassandras set, list, and map collections. if not isinstance(value, (set, list, dict)): value = six.text_type(value) values[key] = value ret.append(values) return ret
def persist(name, value, config=None): ''' Assign and persist a simple sysctl parameter for this minion. If ``config`` is not specified, a sensible default will be chosen using :mod:`sysctl.default_config <salt.modules.linux_sysctl.default_config>`. CLI Example: .. code-block:: bash salt '*' sysctl.persist net.ipv4.ip_forward 1 ''' if config is None: config = default_config() running = show() edited = False # If the sysctl.conf is not present, add it if not os.path.isfile(config): try: with salt.utils.fopen(config, 'w+') as _fh: _fh.write('#\n# Kernel sysctl configuration\n#\n') except (IOError, OSError): msg = 'Could not write to file: {0}' raise CommandExecutionError(msg.format(config)) # Read the existing sysctl.conf nlines = [] try: with salt.utils.fopen(config, 'r') as _fh: # Use readlines because this should be a small file # and it seems unnecessary to indent the below for # loop since it is a fairly large block of code. config_data = _fh.readlines() except (IOError, OSError): msg = 'Could not read from file: {0}' raise CommandExecutionError(msg.format(config)) for line in config_data: if line.startswith('#'): nlines.append(line) continue if '=' not in line: nlines.append(line) continue # Strip trailing whitespace and split the k,v comps = [i.strip() for i in line.split('=', 1)] # On Linux procfs, files such as /proc/sys/net/ipv4/tcp_rmem or any # other sysctl with whitespace in it consistently uses 1 tab. Lets # allow our users to put a space or tab between multi-value sysctls # and have salt not try to set it every single time. if isinstance(comps[1], string_types) and ' ' in comps[1]: comps[1] = re.sub(r'\s+', '\t', comps[1]) # Do the same thing for the value 'just in case' if isinstance(value, string_types) and ' ' in value: value = re.sub(r'\s+', '\t', value) if len(comps) < 2: nlines.append(line) continue if name == comps[0]: # This is the line to edit if str(comps[1]) == str(value): # It is correct in the config, check if it is correct in /proc if name in running: if str(running[name]) != str(value): assign(name, value) return 'Updated' else: return 'Already set' # It is missing from the running config. We can not set it. else: raise CommandExecutionError( 'sysctl {0} does not exist'.format(name)) nlines.append('{0} = {1}\n'.format(name, value)) edited = True continue else: nlines.append(line) if not edited: nlines.append('{0} = {1}\n'.format(name, value)) try: with salt.utils.fopen(config, 'w+') as _fh: _fh.writelines(nlines) except (IOError, OSError): msg = 'Could not write to file: {0}' raise CommandExecutionError(msg.format(config)) assign(name, value) return 'Updated'
def convert(device, permanent=False, keeplf=False): """ Convert ext2/3/4 to BTRFS. Device should be mounted. Filesystem can be converted temporarily so the further processing and rollback is possible, or permanently, where previous extended filesystem image gets deleted. Please note, permanent conversion takes a while as BTRFS filesystem needs to be properly rebalanced afterwards. General options: * **permanent**: Specify if the migration should be permanent (false by default) * **keeplf**: Keep ``lost+found`` of the partition (removed by default, but still in the image, if not permanent migration) CLI Example: .. code-block:: bash salt '*' btrfs.convert /dev/sda1 salt '*' btrfs.convert /dev/sda1 permanent=True """ out = __salt__["cmd.run_all"]("blkid -o export") salt.utils.fsutils._verify_run(out) devices = salt.utils.fsutils._blkid_output(out["stdout"]) if not devices.get(device): raise CommandExecutionError( 'The device "{}" was is not found.'.format(device)) if not devices[device]["type"] in ["ext2", "ext3", "ext4"]: raise CommandExecutionError( 'The device "{}" is a "{}" file system.'.format( device, devices[device]["type"])) mountpoint = (salt.utils.fsutils._get_mounts(devices[device]["type"]).get( device, [{ "mount_point": None }])[0].get("mount_point")) if mountpoint == "/": raise CommandExecutionError( """One does not simply converts a root filesystem! Converting an extended root filesystem to BTRFS is a careful and lengthy process, among other steps including the following requirements: 1. Proper verified backup. 2. System outage. 3. Offline system access. For further details, please refer to your OS vendor documentation regarding this topic. """) salt.utils.fsutils._verify_run(__salt__["cmd.run_all"]( "umount {}".format(device))) ret = { "before": { "fsck_status": _fsck_ext(device), "mount_point": mountpoint, "type": devices[device]["type"], } } salt.utils.fsutils._verify_run(__salt__["cmd.run_all"]( "btrfs-convert {}".format(device))) salt.utils.fsutils._verify_run(__salt__["cmd.run_all"]( "mount {} {}".format(device, mountpoint))) # Refresh devices out = __salt__["cmd.run_all"]("blkid -o export") salt.utils.fsutils._verify_run(out) devices = salt.utils.fsutils._blkid_output(out["stdout"]) ret["after"] = { "fsck_status": "N/A", # ToDO "mount_point": mountpoint, "type": devices[device]["type"], } # Post-migration procedures image_path = "{}/ext2_saved".format(mountpoint) orig_fstype = ret["before"]["type"] if not os.path.exists(image_path): raise CommandExecutionError( 'BTRFS migration went wrong: the image "{}" not found!'.format( image_path)) if not permanent: ret["after"]["{}_image".format(orig_fstype)] = image_path image_info_proc = subprocess.run( ["file", "{}/image".format(image_path)], check=True, stdout=subprocess.PIPE) ret["after"]["{}_image_info".format( orig_fstype)] = image_info_proc.stdout.strip() else: ret["after"]["{}_image".format(orig_fstype)] = "removed" ret["after"]["{}_image_info".format(orig_fstype)] = "N/A" salt.utils.fsutils._verify_run(__salt__["cmd.run_all"]( "btrfs subvolume delete {}".format(image_path))) out = __salt__["cmd.run_all"]( "btrfs filesystem balance {}".format(mountpoint)) salt.utils.fsutils._verify_run(out) ret["after"]["balance_log"] = out["stdout"] lost_found = "{}/lost+found".format(mountpoint) if os.path.exists(lost_found) and not keeplf: salt.utils.fsutils._verify_run(__salt__["cmd.run_all"]( "rm -rf {}".format(lost_found))) return ret
def install(name=None, sources=None, saltenv="base", **kwargs): """ Install the passed package. Can install packages from the following sources: * Locally (package already exists on the minion * HTTP/HTTPS server * FTP server * Salt master Returns a dict containing the new package names and versions: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Examples: .. code-block:: bash # Installing a data stream pkg that already exists on the minion salt '*' pkg.install sources='[{"<pkg name>": "/dir/on/minion/<pkg filename>"}]' salt '*' pkg.install sources='[{"SMClgcc346": "/var/spool/pkg/gcc-3.4.6-sol10-sparc-local.pkg"}]' # Installing a data stream pkg that exists on the salt master salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]' salt '*' pkg.install sources='[{"SMClgcc346": "salt://pkgs/gcc-3.4.6-sol10-sparc-local.pkg"}]' CLI Example: .. code-block:: bash # Installing a data stream pkg that exists on a HTTP server salt '*' pkg.install sources='[{"<pkg name>": "http://packages.server.com/<pkg filename>"}]' salt '*' pkg.install sources='[{"SMClgcc346": "http://packages.server.com/gcc-3.4.6-sol10-sparc-local.pkg"}]' If working with solaris zones and you want to install a package only in the global zone you can pass 'current_zone_only=True' to salt to have the package only installed in the global zone. (Behind the scenes this is passing '-G' to the pkgadd command.) Solaris default when installing a package in the global zone is to install it in all zones. This overrides that and installs the package only in the global. CLI Example: .. code-block:: bash # Installing a data stream package only in the global zone: salt 'global_zone' pkg.install sources='[{"SMClgcc346": "/var/spool/pkg/gcc-3.4.6-sol10-sparc-local.pkg"}]' current_zone_only=True By default salt automatically provides an adminfile, to automate package installation, with these options set:: email= instance=quit partial=nocheck runlevel=nocheck idepend=nocheck rdepend=nocheck space=nocheck setuid=nocheck conflict=nocheck action=nocheck basedir=default You can override any of these options in two ways. First you can optionally pass any of the options as a kwarg to the module/state to override the default value or you can optionally pass the 'admin_source' option providing your own adminfile to the minions. Note: You can find all of the possible options to provide to the adminfile by reading the admin man page: .. code-block:: bash man -s 4 admin CLI Example: .. code-block:: bash # Overriding the 'instance' adminfile option when calling the module directly salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]' instance="overwrite" SLS Example: .. code-block:: yaml # Overriding the 'instance' adminfile option when used in a state SMClgcc346: pkg.installed: - sources: - SMClgcc346: salt://srv/salt/pkgs/gcc-3.4.6-sol10-sparc-local.pkg - instance: overwrite .. note:: The ID declaration is ignored, as the package name is read from the ``sources`` parameter. CLI Example: .. code-block:: bash # Providing your own adminfile when calling the module directly salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]' admin_source='salt://pkgs/<adminfile filename>' # Providing your own adminfile when using states <pkg name>: pkg.installed: - sources: - <pkg name>: salt://pkgs/<pkg filename> - admin_source: salt://pkgs/<adminfile filename> .. note:: The ID declaration is ignored, as the package name is read from the ``sources`` parameter. """ if salt.utils.data.is_true(kwargs.get("refresh")): log.warning("'refresh' argument not implemented for solarispkg " "module") # pkgs is not supported, but must be passed here for API compatibility pkgs = kwargs.pop("pkgs", None) try: pkg_params, pkg_type = __salt__["pkg_resource.parse_targets"](name, pkgs, sources, **kwargs) except MinionError as exc: raise CommandExecutionError(exc) if pkg_params is None or len(pkg_params) == 0: return {} if not sources: log.error('"sources" param required for solaris pkg_add installs') return {} try: if "admin_source" in kwargs: adminfile = __salt__["cp.cache_file"](kwargs["admin_source"], saltenv) else: adminfile = _write_adminfile(kwargs) old = list_pkgs() cmd_prefix = ["/usr/sbin/pkgadd", "-n", "-a", adminfile] # Only makes sense in a global zone but works fine in non-globals. if kwargs.get("current_zone_only") in (True, "True"): cmd_prefix += "-G " errors = [] for pkg in pkg_params: cmd = cmd_prefix + ["-d", pkg, "all"] # Install the package{s} out = __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=False) if out["retcode"] != 0 and out["stderr"]: errors.append(out["stderr"]) __context__.pop("pkg.list_pkgs", None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( "Problem encountered installing package(s)", info={ "errors": errors, "changes": ret }, ) finally: # Remove the temp adminfile if "admin_source" not in kwargs: try: os.remove(adminfile) except (NameError, OSError): pass return ret
def remove(name=None, pkgs=None, saltenv="base", **kwargs): """ Remove packages with pkgrm name The name of the package to be deleted By default salt automatically provides an adminfile, to automate package removal, with these options set:: email= instance=quit partial=nocheck runlevel=nocheck idepend=nocheck rdepend=nocheck space=nocheck setuid=nocheck conflict=nocheck action=nocheck basedir=default You can override any of these options in two ways. First you can optionally pass any of the options as a kwarg to the module/state to override the default value or you can optionally pass the 'admin_source' option providing your own adminfile to the minions. Note: You can find all of the possible options to provide to the adminfile by reading the admin man page: .. code-block:: bash man -s 4 admin Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove SUNWgit salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' """ try: pkg_params = __salt__["pkg_resource.parse_targets"](name, pkgs)[0] except MinionError as exc: raise CommandExecutionError(exc) old = list_pkgs() targets = [x for x in pkg_params if x in old] if not targets: return {} try: if "admin_source" in kwargs: adminfile = __salt__["cp.cache_file"](kwargs["admin_source"], saltenv) else: # Make tempfile to hold the adminfile contents. adminfile = _write_adminfile(kwargs) # Remove the package cmd = ["/usr/sbin/pkgrm", "-n", "-a", adminfile] + targets out = __salt__["cmd.run_all"](cmd, python_shell=False, output_loglevel="trace") if out["retcode"] != 0 and out["stderr"]: errors = [out["stderr"]] else: errors = [] __context__.pop("pkg.list_pkgs", None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( "Problem encountered removing package(s)", info={ "errors": errors, "changes": ret }, ) finally: # Remove the temp adminfile if "admin_source" not in kwargs: try: os.remove(adminfile) except (NameError, OSError): pass return ret
def get_milestone(number=None, name=None, repo_name=None, profile='github', output='min'): ''' Return information about a single milestone in a named repository. .. versionadded:: 2016.11.0 number The number of the milestone to retrieve. If provided, this option will be favored over ``name``. name The name of the milestone to retrieve. repo_name The name of the repository for which to list issues. This argument is required, either passed via the CLI, or defined in the configured profile. A ``repo_name`` passed as a CLI argument will override the repo_name defined in the configured profile, if provided. profile The name of the profile configuration to use. Defaults to ``github``. output The amount of data returned by each issue. Defaults to ``min``. Change to ``full`` to see all issue output. CLI Example: .. code-block:: bash salt myminion github.get_milestone 72 salt myminion github.get_milestone name=my_milestone ''' ret = {} if not any([number, name]): raise CommandExecutionError( 'Either a milestone \'name\' or \'number\' must be provided.') org_name = _get_config_value(profile, 'org_name') if repo_name is None: repo_name = _get_config_value(profile, 'repo_name') action = '/'.join(['repos', org_name, repo_name]) if number: command = 'milestones/' + str(number) milestone_data = _query(profile, action=action, command=command) milestone_id = milestone_data.get('id') if output == 'full': ret[milestone_id] = milestone_data else: milestone_data.pop('creator') milestone_data.pop('html_url') milestone_data.pop('labels_url') ret[milestone_id] = milestone_data return ret else: milestones = get_milestones(repo_name=repo_name, profile=profile, output=output) for key, val in six.iteritems(milestones): if val.get('title') == name: ret[key] = val return ret return ret
def create(path, venv_bin=None, system_site_packages=False, distribute=False, clear=False, python=None, extra_search_dir=None, never_download=None, prompt=None, pip=False, symlinks=None, upgrade=None, user=None, use_vt=False, saltenv='base', **kwargs): ''' Create a virtualenv path The path to the virtualenv to be created venv_bin The name (and optionally path) of the virtualenv command. This can also be set globally in the minion config file as ``virtualenv.venv_bin``. Defaults to ``virtualenv``. system_site_packages : False Passthrough argument given to virtualenv or pyvenv distribute : False Passthrough argument given to virtualenv pip : False Install pip after creating a virtual environment. Implies ``distribute=True`` clear : False Passthrough argument given to virtualenv or pyvenv python : None (default) Passthrough argument given to virtualenv extra_search_dir : None (default) Passthrough argument given to virtualenv never_download : None (default) Passthrough argument given to virtualenv if True prompt : None (default) Passthrough argument given to virtualenv if not None symlinks : None Passthrough argument given to pyvenv if True upgrade : None Passthrough argument given to pyvenv if True user : None Set ownership for the virtualenv .. note:: On Windows you must also pass a ``password`` parameter. Additionally, the user must have permissions to the location where the virtual environment is being created runas : None Set ownership for the virtualenv .. deprecated:: 2014.1.0 ``user`` should be used instead use_vt : False Use VT terminal emulation (see output while installing) .. versionadded:: 2015.5.0 saltenv : 'base' Specify a different environment. The default environment is ``base``. .. versionadded:: 2014.1.0 .. note:: The ``runas`` argument is deprecated as of 2014.1.0. ``user`` should be used instead. CLI Example: .. code-block:: bash salt '*' virtualenv.create /path/to/new/virtualenv ''' if venv_bin is None: venv_bin = __opts__.get('venv_bin') or __pillar__.get('venv_bin') cmd = [venv_bin] if 'pyvenv' not in venv_bin: # ----- Stop the user if pyvenv only options are used ---------------> # If any of the following values are not None, it means that the user # is actually passing a True or False value. Stop Him! if upgrade is not None: raise CommandExecutionError( 'The `upgrade`(`--upgrade`) option is not supported ' 'by \'{0}\''.format(venv_bin)) elif symlinks is not None: raise CommandExecutionError( 'The `symlinks`(`--symlinks`) option is not supported ' 'by \'{0}\''.format(venv_bin)) # <---- Stop the user if pyvenv only options are used ---------------- # Virtualenv package try: import virtualenv version = getattr(virtualenv, '__version__', virtualenv.virtualenv_version) virtualenv_version_info = tuple( [int(i) for i in version.split('rc')[0].split('.')]) except ImportError: # Unable to import?? Let's parse the version from the console version_cmd = [venv_bin, '--version'] ret = __salt__['cmd.run_all'](version_cmd, runas=user, python_shell=False, **kwargs) if ret['retcode'] > 0 or not ret['stdout'].strip(): raise CommandExecutionError( 'Unable to get the virtualenv version output using \'{0}\'. ' 'Returned data: {1}'.format(version_cmd, ret)) virtualenv_version_info = tuple([ int(i) for i in ret['stdout'].strip().split('rc')[0].split('.') ]) if distribute: if virtualenv_version_info >= (1, 10): log.info( 'The virtualenv \'--distribute\' option has been ' 'deprecated in virtualenv(>=1.10), as such, the ' '\'distribute\' option to `virtualenv.create()` has ' 'also been deprecated and it\'s not necessary anymore.') else: cmd.append('--distribute') if python is not None and python.strip() != '': if not salt.utils.path.which(python): raise CommandExecutionError( 'Cannot find requested python ({0}).'.format(python)) cmd.append('--python={0}'.format(python)) if extra_search_dir is not None: if isinstance(extra_search_dir, string_types) and \ extra_search_dir.strip() != '': extra_search_dir = [ e.strip() for e in extra_search_dir.split(',') ] for entry in extra_search_dir: cmd.append('--extra-search-dir={0}'.format(entry)) if never_download is True: if virtualenv_version_info >= ( 1, 10) and virtualenv_version_info < (14, 0, 0): log.info( '--never-download was deprecated in 1.10.0, but reimplemented in 14.0.0. ' 'If this feature is needed, please install a supported virtualenv version.' ) else: cmd.append('--never-download') if prompt is not None and prompt.strip() != '': cmd.append('--prompt=\'{0}\''.format(prompt)) else: # venv module from the Python >= 3.3 standard library # ----- Stop the user if virtualenv only options are being used -----> # If any of the following values are not None, it means that the user # is actually passing a True or False value. Stop Him! if python is not None and python.strip() != '': raise CommandExecutionError( 'The `python`(`--python`) option is not supported ' 'by \'{0}\''.format(venv_bin)) elif extra_search_dir is not None and extra_search_dir.strip() != '': raise CommandExecutionError( 'The `extra_search_dir`(`--extra-search-dir`) option is not ' 'supported by \'{0}\''.format(venv_bin)) elif never_download is not None: raise CommandExecutionError( 'The `never_download`(`--never-download`) option is not ' 'supported by \'{0}\''.format(venv_bin)) elif prompt is not None and prompt.strip() != '': raise CommandExecutionError( 'The `prompt`(`--prompt`) option is not supported ' 'by \'{0}\''.format(venv_bin)) # <---- Stop the user if virtualenv only options are being used ------ if upgrade is True: cmd.append('--upgrade') if symlinks is True: cmd.append('--symlinks') # Common options to virtualenv and pyvenv if clear is True: cmd.append('--clear') if system_site_packages is True: cmd.append('--system-site-packages') # Finally the virtualenv path cmd.append(path) # Let's create the virtualenv ret = __salt__['cmd.run_all'](cmd, runas=user, python_shell=False, **kwargs) if ret['retcode'] != 0: # Something went wrong. Let's bail out now! return ret # Check if distribute and pip are already installed if salt.utils.platform.is_windows(): venv_python = os.path.join(path, 'Scripts', 'python.exe') venv_pip = os.path.join(path, 'Scripts', 'pip.exe') venv_setuptools = os.path.join(path, 'Scripts', 'easy_install.exe') else: venv_python = os.path.join(path, 'bin', 'python') venv_pip = os.path.join(path, 'bin', 'pip') venv_setuptools = os.path.join(path, 'bin', 'easy_install') # Install setuptools if (pip or distribute) and not os.path.exists(venv_setuptools): _install_script( 'https://bitbucket.org/pypa/setuptools/raw/default/ez_setup.py', path, venv_python, user, saltenv=saltenv, use_vt=use_vt) # clear up the distribute archive which gets downloaded for fpath in glob.glob(os.path.join(path, 'distribute-*.tar.gz*')): os.unlink(fpath) if ret['retcode'] != 0: # Something went wrong. Let's bail out now! return ret # Install pip if pip and not os.path.exists(venv_pip): _ret = _install_script('https://bootstrap.pypa.io/get-pip.py', path, venv_python, user, saltenv=saltenv, use_vt=use_vt) # Let's update the return dictionary with the details from the pip # installation ret.update( retcode=_ret['retcode'], stdout='{0}\n{1}'.format(ret['stdout'], _ret['stdout']).strip(), stderr='{0}\n{1}'.format(ret['stderr'], _ret['stderr']).strip(), ) return ret
def _query(profile, action=None, command=None, args=None, method='GET', header_dict=None, data=None, url='https://api.github.com/', per_page=None): ''' Make a web call to the GitHub API and deal with paginated results. ''' if not isinstance(args, dict): args = {} if action: url += action if command: url += '/{0}'.format(command) log.debug('GitHub URL: {0}'.format(url)) if 'access_token' not in args.keys(): args['access_token'] = _get_config_value(profile, 'token') if per_page and 'per_page' not in args.keys(): args['per_page'] = per_page if header_dict is None: header_dict = {} if method != 'POST': header_dict['Accept'] = 'application/json' decode = True if method == 'DELETE': decode = False # GitHub paginates all queries when returning many items. # Gather all data using multiple queries and handle pagination. complete_result = [] next_page = True page_number = '' while next_page is True: if page_number: args['page'] = page_number result = salt.utils.http.query( url, method, params=args, data=data, header_dict=header_dict, decode=decode, decode_type='json', headers=True, status=True, text=True, hide_fields=['access_token'], opts=__opts__, ) log.debug('GitHub Response Status Code: {0}'.format(result['status'])) if result['status'] == 200: if isinstance(result['dict'], dict): # If only querying for one item, such as a single issue # The GitHub API returns a single dictionary, instead of # A list of dictionaries. In that case, we can return. return result['dict'] complete_result = complete_result + result['dict'] else: raise CommandExecutionError('GitHub Response Error: {0}'.format( result.get('error'))) try: link_info = result.get('headers').get('Link').split(',')[0] except AttributeError: # Only one page of data was returned; exit the loop. next_page = False continue if 'next' in link_info: # Get the 'next' page number from the Link header. page_number = link_info.split('>')[0].split('&page=')[1] else: # Last page already processed; break the loop. next_page = False return complete_result
def config(name, reset=False, **kwargs): ''' Modify configuration options for a given port. Multiple options can be specified. To see the available options for a port, use :mod:`ports.showconfig <salt.modules.freebsdports.showconfig>`. name The port name, in ``category/name`` format reset : False If ``True``, runs a ``make rmconfig`` for the port, clearing its configuration before setting the desired options CLI Examples: .. code-block:: bash salt '*' ports.config security/nmap IPV6=off ''' portpath = _check_portname(name) if reset: rmconfig(name) configuration = showconfig(name, dict_return=True) if not configuration: raise CommandExecutionError( 'Unable to get port configuration for \'{0}\''.format(name)) # Get top-level key for later reference pkg = next(iter(configuration)) conf_ptr = configuration[pkg] opts = dict((six.text_type(x), _normalize(kwargs[x])) for x in kwargs if not x.startswith('_')) bad_opts = [x for x in opts if x not in conf_ptr] if bad_opts: raise SaltInvocationError( 'The following opts are not valid for port {0}: {1}'.format( name, ', '.join(bad_opts))) bad_vals = [ '{0}={1}'.format(x, y) for x, y in six.iteritems(opts) if y not in ('on', 'off') ] if bad_vals: raise SaltInvocationError( 'The following key/value pairs are invalid: {0}'.format( ', '.join(bad_vals))) conf_ptr.update(opts) _write_options(name, configuration) new_config = showconfig(name, dict_return=True) try: new_config = new_config[next(iter(new_config))] except (StopIteration, TypeError): return False return all(conf_ptr[x] == new_config.get(x) for x in conf_ptr)
def showconfig(name, default=False, dict_return=False): ''' Show the configuration options for a given port. default : False Show the default options for a port (not necessarily the same as the current configuration) dict_return : False Instead of returning the output of ``make showconfig``, return the data in an dictionary CLI Example: .. code-block:: bash salt '*' ports.showconfig security/nmap salt '*' ports.showconfig security/nmap default=True ''' portpath = _check_portname(name) if default and _options_file_exists(name): saved_config = showconfig(name, default=False, dict_return=True) rmconfig(name) if _options_file_exists(name): raise CommandExecutionError('Unable to get default configuration') default_config = showconfig(name, default=False, dict_return=dict_return) _write_options(name, saved_config) return default_config try: result = __salt__['cmd.run_all'](['make', 'showconfig'], cwd=portpath, python_shell=False) output = result['stdout'].splitlines() if result['retcode'] != 0: error = result['stderr'] else: error = '' except TypeError: error = result if error: msg = ('Error running \'make showconfig\' for {0}: {1}'.format( name, error)) log.error(msg) raise SaltInvocationError(msg) if not dict_return: return '\n'.join(output) if (not output) or ('configuration options' not in output[0]): return {} try: pkg = output[0].split()[-1].rstrip(':') except (IndexError, AttributeError, TypeError) as exc: log.error('Unable to get pkg-version string: %s', exc) return {} ret = {pkg: {}} output = output[1:] for line in output: try: opt, val, desc = re.match(r'\s+([^=]+)=(off|on): (.+)', line).groups() except AttributeError: continue ret[pkg][opt] = val if not ret[pkg]: return {} return ret
def set_automaster(name, device, fstype, opts='', config='/etc/auto_salt', test=False, **kwargs): ''' Verify that this mount is represented in the auto_salt, change the mount to match the data passed, or add the mount if it is not present. CLI Example: .. code-block:: bash salt '*' mount.set_automaster /mnt/foo /dev/sdz1 ext4 ''' # Fix the opts type if it is a list if isinstance(opts, list): opts = ','.join(opts) lines = [] change = False present = False automaster_file = "/etc/auto_master" if not os.path.isfile(config): __salt__['file.touch'](config) __salt__['file.append'](automaster_file, "/-\t\t\t{0}".format(config)) name = "/..{0}".format(name) device_fmt = "{0}:{1}".format(fstype, device) type_opts = "-fstype={0},{1}".format(fstype, opts) if fstype == 'smbfs': device_fmt = device_fmt.replace(fstype, "") try: with salt.utils.fopen(config, 'r') as ifile: for line in ifile: if line.startswith('#'): # Commented lines.append(line) continue if not line.strip(): # Blank line lines.append(line) continue comps = line.split() if len(comps) != 3: # Invalid entry lines.append(line) continue if comps[0] == name or comps[2] == device_fmt: # check to see if there are changes # and fix them if there are any present = True if comps[0] != name: change = True comps[0] = name if comps[1] != type_opts: change = True comps[1] = type_opts if comps[2] != device_fmt: change = True comps[2] = device_fmt if change: log.debug( 'auto_master entry for mount point {0} needs to be ' 'updated'.format(name)) newline = ('{0}\t{1}\t{2}\n'.format( name, type_opts, device_fmt)) lines.append(newline) else: lines.append(line) except (IOError, OSError) as exc: msg = 'Couldn\'t read from {0}: {1}' raise CommandExecutionError(msg.format(config, str(exc))) if change: if not salt.utils.test_mode(test=test, **kwargs): try: with salt.utils.fopen(config, 'w+') as ofile: # The line was changed, commit it! ofile.writelines(lines) except (IOError, OSError): msg = 'File not writable {0}' raise CommandExecutionError(msg.format(config)) return 'change' if not change: if present: # The right entry is already here return 'present' else: if not salt.utils.test_mode(test=test, **kwargs): # The entry is new, add it to the end of the fstab newline = ('{0}\t{1}\t{2}\n'.format(name, type_opts, device_fmt)) lines.append(newline) try: with salt.utils.fopen(config, 'w+') as ofile: # The line was changed, commit it! ofile.writelines(lines) except (IOError, OSError): raise CommandExecutionError( 'File not writable {0}'.format(config)) return 'new'
def list_(pkg=None, dir=None, runas=None, env=None, depth=None): ''' List installed NPM packages. If no directory is specified, this will return the list of globally- installed packages. pkg Limit package listing by name dir The directory whose packages will be listed, or None for global installation runas The user to run NPM with .. versionadded:: 2014.7.0 env Environment variables to set when invoking npm. Uses the same ``env`` format as the :py:func:`cmd.run <salt.modules.cmdmod.run>` execution function. .. versionadded:: 2014.7.0 depth Limit the depth of the packages listed .. versionadded:: 2016.11.6, 2017.7.0 CLI Example: .. code-block:: bash salt '*' npm.list ''' env = env or {} if runas: uid = salt.utils.user.get_uid(runas) if uid: env.update({'SUDO_UID': uid, 'SUDO_USER': ''}) cmd = ['npm', 'list', '--json', '--silent'] if not dir: cmd.append('--global') if depth is not None: if not isinstance(depth, (int, float)): raise salt.exceptions.SaltInvocationError( 'Error: depth {0} must be a number'.format(depth)) cmd.append('--depth={0}'.format(int(depth))) if pkg: # Protect against injection pkg = _cmd_quote(pkg) cmd.append('"{0}"'.format(pkg)) cmd = ' '.join(cmd) result = __salt__['cmd.run_all'](cmd, cwd=dir, runas=runas, env=env, python_shell=True, ignore_retcode=True) # npm will return error code 1 for both no packages found and an actual # error. The only difference between the two cases are if stderr is empty if result['retcode'] != 0 and result['stderr']: raise CommandExecutionError(result['stderr']) return salt.utils.json.loads(result['stdout']).get('dependencies', {})
def get_url(self, url, dest, makedirs=False, saltenv='base', env=None, no_cache=False): ''' Get a single file from a URL. ''' if env is not None: salt.utils.warn_until( 'Boron', 'Passing a salt environment should be done using \'saltenv\' ' 'not \'env\'. This functionality will be removed in Salt ' 'Boron.') # Backwards compatibility saltenv = env url_data = urlparse(url) if url_data.scheme in ('file', ''): # Local filesystem if not os.path.isabs(url_data.path): raise CommandExecutionError( 'Path {0!r} is not absolute'.format(url_data.path)) return url_data.path if url_data.scheme == 'salt': return self.get_file(url, dest, makedirs, saltenv) if dest: destdir = os.path.dirname(dest) if not os.path.isdir(destdir): if makedirs: os.makedirs(destdir) else: return '' elif not no_cache: dest = self._extrn_path(url, saltenv) destdir = os.path.dirname(dest) if not os.path.isdir(destdir): os.makedirs(destdir) if url_data.scheme == 's3': try: def s3_opt(key, default=None): '''Get value of s3.<key> from Minion config or from Pillar''' if 's3.' + key in self.opts: return self.opts['s3.' + key] try: return self.opts['pillar']['s3'][key] except (KeyError, TypeError): return default salt.utils.s3.query(method='GET', bucket=url_data.netloc, path=url_data.path[1:], return_bin=False, local_file=dest, action=None, key=s3_opt('key'), keyid=s3_opt('keyid'), service_url=s3_opt('service_url'), verify_ssl=s3_opt('verify_ssl', True), location=s3_opt('location')) return dest except Exception as exc: raise MinionError( 'Could not fetch from {0}. Exception: {1}'.format( url, exc)) if url_data.scheme == 'ftp': try: ftp = ftplib.FTP(url_data.hostname) ftp.login() with salt.utils.fopen(dest, 'wb') as fp_: ftp.retrbinary('RETR {0}'.format(url_data.path), fp_.write) return dest except Exception as exc: raise MinionError( 'Could not retrieve {0} from FTP server. Exception: {1}'. format(url, exc)) if url_data.scheme == 'swift': try: swift_conn = SaltSwift( self.opts.get('keystone.user', None), self.opts.get('keystone.tenant', None), self.opts.get('keystone.auth_url', None), self.opts.get('keystone.password', None)) swift_conn.get_object(url_data.netloc, url_data.path[1:], dest) return dest except Exception: raise MinionError('Could not fetch from {0}'.format(url)) get_kwargs = {} if url_data.username is not None \ and url_data.scheme in ('http', 'https'): _, netloc = url_data.netloc.split('@', 1) fixed_url = urlunparse( (url_data.scheme, netloc, url_data.path, url_data.params, url_data.query, url_data.fragment)) get_kwargs['auth'] = (url_data.username, url_data.password) else: fixed_url = url try: query = salt.utils.http.query(fixed_url, stream=True, **get_kwargs) if 'handle' not in query: raise MinionError('Error: {0}'.format(query['error'])) response = query['handle'] chunk_size = 32 * 1024 if not no_cache: with salt.utils.fopen(dest, 'wb') as destfp: if hasattr(response, 'iter_content'): for chunk in response.iter_content( chunk_size=chunk_size): destfp.write(chunk) else: while True: chunk = response.read(chunk_size) destfp.write(chunk) if len(chunk) < chunk_size: break return dest else: if hasattr(response, 'text'): return response.text else: return response['text'] except HTTPError as exc: raise MinionError('HTTP error {0} reading {1}: {3}'.format( exc.code, url, *BaseHTTPServer.BaseHTTPRequestHandler.responses[exc.code])) except URLError as exc: raise MinionError('Error reading {0}: {1}'.format(url, exc.reason))
def install(pkg=None, pkgs=None, dir=None, runas=None, registry=None, env=None, dry_run=False, silent=True): ''' Install an NPM package. If no directory is specified, the package will be installed globally. If no package is specified, the dependencies (from package.json) of the package in the given directory will be installed. pkg A package name in any format accepted by NPM, including a version identifier pkgs A list of package names in the same format as the ``name`` parameter .. versionadded:: 2014.7.0 dir The target directory in which to install the package, or None for global installation runas The user to run NPM with registry The NPM registry to install the package from. .. versionadded:: 2014.7.0 env Environment variables to set when invoking npm. Uses the same ``env`` format as the :py:func:`cmd.run <salt.modules.cmdmod.run>` execution function. .. versionadded:: 2014.7.0 silent Whether or not to run NPM install with --silent flag. .. versionadded:: 2016.3.0 dry_run Whether or not to run NPM install with --dry-run flag. .. versionadded:: 2015.8.4 silent Whether or not to run NPM install with --silent flag. .. versionadded:: 2015.8.5 CLI Example: .. code-block:: bash salt '*' npm.install coffee-script salt '*' npm.install [email protected] ''' # Protect against injection if pkg: pkgs = [_cmd_quote(pkg)] elif pkgs: pkgs = [_cmd_quote(v) for v in pkgs] else: pkgs = [] if registry: registry = _cmd_quote(registry) cmd = ['npm', 'install', '--json'] if silent: cmd.append('--silent') if not dir: cmd.append('--global') if registry: cmd.append('--registry="{0}"'.format(registry)) if dry_run: cmd.append('--dry-run') cmd.extend(pkgs) env = env or {} if runas: uid = salt.utils.user.get_uid(runas) if uid: env.update({'SUDO_UID': uid, 'SUDO_USER': ''}) cmd = ' '.join(cmd) result = __salt__['cmd.run_all'](cmd, python_shell=True, cwd=dir, runas=runas, env=env) if result['retcode'] != 0: raise CommandExecutionError(result['stderr']) # npm >1.2.21 is putting the output to stderr even though retcode is 0 npm_output = result['stdout'] or result['stderr'] try: return salt.utils.json.find_json(npm_output) except ValueError: return npm_output
def mkfs(*devices, **kwargs): """ Create a file system on the specified device. By default wipes out with force. General options: * **allocsize**: Specify the BTRFS offset from the start of the device. * **bytecount**: Specify the size of the resultant filesystem. * **nodesize**: Node size. * **leafsize**: Specify the nodesize, the tree block size in which btrfs stores data. * **noforce**: Prevent force overwrite when an existing filesystem is detected on the device. * **sectorsize**: Specify the sectorsize, the minimum data block allocation unit. * **nodiscard**: Do not perform whole device TRIM operation by default. * **uuid**: Pass UUID or pass True to generate one. Options: * **dto**: (raid0|raid1|raid5|raid6|raid10|single|dup) Specify how the data must be spanned across the devices specified. * **mto**: (raid0|raid1|raid5|raid6|raid10|single|dup) Specify how metadata must be spanned across the devices specified. * **fts**: Features (call ``salt <host> btrfs.features`` for full list of available features) See the ``mkfs.btrfs(8)`` manpage for a more complete description of corresponding options description. CLI Example: .. code-block:: bash salt '*' btrfs.mkfs /dev/sda1 salt '*' btrfs.mkfs /dev/sda1 noforce=True """ if not devices: raise CommandExecutionError("No devices specified") mounts = salt.utils.fsutils._get_mounts("btrfs") for device in devices: if mounts.get(device): raise CommandExecutionError( 'Device "{}" should not be mounted'.format(device)) cmd = ["mkfs.btrfs"] dto = kwargs.get("dto") mto = kwargs.get("mto") if len(devices) == 1: if dto: cmd.append("-d single") if mto: cmd.append("-m single") else: if dto: cmd.append("-d {}".format(dto)) if mto: cmd.append("-m {}".format(mto)) for key, option in [ ("-l", "leafsize"), ("-L", "label"), ("-O", "fts"), ("-A", "allocsize"), ("-b", "bytecount"), ("-n", "nodesize"), ("-s", "sectorsize"), ]: if option == "label" and option in kwargs: kwargs["label"] = "'{}'".format(kwargs["label"]) if kwargs.get(option): cmd.append("{} {}".format(key, kwargs.get(option))) if kwargs.get("uuid"): cmd.append("-U {}".format( kwargs.get("uuid") is True and uuid.uuid1() or kwargs.get("uuid"))) if kwargs.get("nodiscard"): cmd.append("-K") if not kwargs.get("noforce"): cmd.append("-f") cmd.extend(devices) out = __salt__["cmd.run_all"](" ".join(cmd)) salt.utils.fsutils._verify_run(out) ret = {"log": out["stdout"]} ret.update(__salt__["btrfs.info"](devices[0])) return ret
def enable(name, no_block=False, unmask=False, unmask_runtime=False, **kwargs): # pylint: disable=unused-argument ''' .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 On minions running systemd>=205, `systemd-run(1)`_ is now used to isolate commands run by this function from the ``salt-minion`` daemon's control group. This is done to avoid a race condition in cases where the ``salt-minion`` service is restarted while a service is being modified. If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html Enable the named service to start when the system boots no_block : False Set to ``True`` to start the service using ``--no-block``. .. versionadded:: 2017.7.0 unmask : False Set to ``True`` to remove an indefinite mask before attempting to enable the service. .. versionadded:: 2017.7.0 In previous releases, Salt would simply unmask a service before enabling. This behavior is no longer the default. unmask_runtime : False Set to ``True`` to remove a runtime mask before attempting to enable the service. .. versionadded:: 2017.7.0 In previous releases, Salt would simply unmask a service before enabling. This behavior is no longer the default. CLI Example: .. code-block:: bash salt '*' service.enable <service name> ''' _check_for_unit_changes(name) _check_unmask(name, unmask, unmask_runtime) if name in _get_sysv_services(): cmd = [] if salt.utils.systemd.has_scope(__context__) \ and __salt__['config.get']('systemd.scope', True): cmd.extend(['systemd-run', '--scope']) service_exec = _get_service_exec() if service_exec.endswith('/update-rc.d'): cmd.extend([service_exec, '-f', name, 'defaults', '99']) elif service_exec.endswith('/chkconfig'): cmd.extend([service_exec, name, 'on']) return __salt__['cmd.retcode'](cmd, python_shell=False, ignore_retcode=True) == 0 ret = __salt__['cmd.run_all'](_systemctl_cmd('enable', name, systemd_scope=True, no_block=no_block), python_shell=False, ignore_retcode=True) if ret['retcode'] != 0: # Instead of returning a bool, raise an exception so that we can # include the error message in the return data. This helps give more # information to the user in instances where the service is masked. raise CommandExecutionError(_strip_scope(ret['stderr'])) return True
def _restripe(mountpoint, direction, *devices, **kwargs): """ Restripe BTRFS: add or remove devices from the particular mounted filesystem. """ fs_log = [] if salt.utils.fsutils._is_device(mountpoint): raise CommandExecutionError( 'Mountpount expected, while device "{}" specified'.format( mountpoint)) mounted = False for device, mntpoints in salt.utils.fsutils._get_mounts("btrfs").items(): for mntdata in mntpoints: if mntdata["mount_point"] == mountpoint: mounted = True break if not mounted: raise CommandExecutionError( 'No BTRFS device mounted on "{}" mountpoint'.format(mountpoint)) if not devices: raise CommandExecutionError("No devices specified.") available_devices = __salt__["btrfs.devices"]() for device in devices: if device not in available_devices.keys(): raise CommandExecutionError( 'Device "{}" is not recognized'.format(device)) cmd = ["btrfs device {}".format(direction)] for device in devices: cmd.append(device) if direction == "add": if kwargs.get("nodiscard"): cmd.append("-K") if kwargs.get("force"): cmd.append("-f") cmd.append(mountpoint) out = __salt__["cmd.run_all"](" ".join(cmd)) salt.utils.fsutils._verify_run(out) if out["stdout"]: fs_log.append(out["stdout"]) if direction == "add": out = None data_conversion = kwargs.get("dc") meta_conversion = kwargs.get("mc") if data_conversion and meta_conversion: out = __salt__["cmd.run_all"]( "btrfs balance start -dconvert={} -mconvert={} {}".format( data_conversion, meta_conversion, mountpoint)) else: out = __salt__["cmd.run_all"]( "btrfs filesystem balance {}".format(mountpoint)) salt.utils.fsutils._verify_run(out) if out["stdout"]: fs_log.append(out["stdout"]) # Summarize the result ret = {} if fs_log: ret.update({"log": "\n".join(fs_log)}) ret.update(__salt__["btrfs.info"](mountpoint)) return ret
def install(name=None, refresh=False, fromrepo=None, pkgs=None, sources=None, **kwargs): ''' Install the passed package name The name of the package to be installed. refresh Whether or not to refresh the package database before installing. fromrepo Specify a package repository to install from. Multiple Package Installation Options: pkgs A list of packages to install from a software repository. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install pkgs='["foo","bar"]' sources A list of packages to install. Must be passed as a list of dicts, with the keys being package names, and the values being the source URI or local path to the package. CLI Example: .. code-block:: bash salt '*' pkg.install sources='[{"foo": "salt://foo.deb"},{"bar": "salt://bar.deb"}]' Return a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.install <package name> ''' try: pkg_params, pkg_type = __salt__['pkg_resource.parse_targets'](name, pkgs, sources, **kwargs) except MinionError as exc: raise CommandExecutionError(exc) # Support old "repo" argument repo = kwargs.get('repo', '') if not fromrepo and repo: fromrepo = repo if not pkg_params: return {} env = [] args = [] pkgin = _check_pkgin() if pkgin: cmd = pkgin if fromrepo: log.info('Setting PKG_REPOS={0}'.format(fromrepo)) env.append(('PKG_REPOS', fromrepo)) else: cmd = 'pkg_add' if fromrepo: log.info('Setting PKG_PATH={0}'.format(fromrepo)) env.append(('PKG_PATH', fromrepo)) if pkg_type == 'file': cmd = 'pkg_add' elif pkg_type == 'repository': if pkgin: if refresh: args.append('-f') # update repo db args.extend(('-y', 'in')) # Assume yes when asked args.extend(pkg_params) old = list_pkgs() __salt__['cmd.run']('{0} {1}'.format(cmd, ' '.join(args)), env=env, output_loglevel='debug') new = list_pkgs() rehash() return salt.utils.compare_dicts(old, new)
def _connect( contact_points=None, port=None, cql_user=None, cql_pass=None, protocol_version=None ): """ Connect to a Cassandra cluster. :param contact_points: The Cassandra cluster addresses, can either be a string or a list of IPs. :type contact_points: str or list of str :param cql_user: The Cassandra user if authentication is turned on. :type cql_user: str :param cql_pass: The Cassandra user password if authentication is turned on. :type cql_pass: str :param port: The Cassandra cluster port, defaults to None. :type port: int :param protocol_version: Cassandra protocol version to use. :type port: int :return: The session and cluster objects. :rtype: cluster object, session object """ # Lazy load the Cassandra cluster and session for this module by creating a # cluster and session when cql_query is called the first time. Get the # Cassandra cluster and session from this module's __context__ after it is # loaded the first time cql_query is called. # # TODO: Call cluster.shutdown() when the module is unloaded on # master/minion shutdown. Currently, Master.shutdown() and Minion.shutdown() # do nothing to allow loaded modules to gracefully handle resources stored # in __context__ (i.e. connection pools). This means that the connection # pool is orphaned and Salt relies on Cassandra to reclaim connections. # Perhaps if Master/Minion daemons could be enhanced to call an "__unload__" # function, or something similar for each loaded module, connection pools # and the like can be gracefully reclaimed/shutdown. if ( __context__ and "cassandra_cql_returner_cluster" in __context__ and "cassandra_cql_returner_session" in __context__ ): return ( __context__["cassandra_cql_returner_cluster"], __context__["cassandra_cql_returner_session"], ) else: contact_points = _load_properties( property_name=contact_points, config_option="cluster" ) contact_points = ( contact_points if isinstance(contact_points, list) else contact_points.split(",") ) port = _load_properties( property_name=port, config_option="port", set_default=True, default=9042 ) cql_user = _load_properties( property_name=cql_user, config_option="username", set_default=True, default="cassandra", ) cql_pass = _load_properties( property_name=cql_pass, config_option="password", set_default=True, default="cassandra", ) protocol_version = _load_properties( property_name=protocol_version, config_option="protocol_version", set_default=True, default=4, ) try: auth_provider = PlainTextAuthProvider(username=cql_user, password=cql_pass) ssl_opts = _get_ssl_opts() if ssl_opts: cluster = Cluster( contact_points, port=port, auth_provider=auth_provider, ssl_options=ssl_opts, protocol_version=protocol_version, compression=True, ) else: cluster = Cluster( contact_points, port=port, auth_provider=auth_provider, protocol_version=protocol_version, compression=True, ) for recontimes in range(1, 4): try: session = cluster.connect() break except OperationTimedOut: log.warning( "Cassandra cluster.connect timed out, try %s", recontimes ) if recontimes >= 3: raise # TODO: Call cluster.shutdown() when the module is unloaded on shutdown. __context__["cassandra_cql_returner_cluster"] = cluster __context__["cassandra_cql_returner_session"] = session __context__["cassandra_cql_prepared"] = {} log.debug( "Successfully connected to Cassandra cluster at %s", contact_points ) return cluster, session except TypeError: pass except (ConnectionException, ConnectionShutdown, NoHostAvailable): log.error("Could not connect to Cassandra cluster at %s", contact_points) raise CommandExecutionError( "ERROR: Could not connect to Cassandra cluster." )
def remove(name=None, pkgs=None, **kwargs): ''' name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. .. versionadded:: 0.16.0 Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' ''' try: pkg_params, pkg_type = __salt__['pkg_resource.parse_targets'](name, pkgs) except MinionError as exc: raise CommandExecutionError(exc) if not pkg_params: return {} old = list_pkgs() args = [] for param in pkg_params: ver = old.get(param, []) if not ver: continue if isinstance(ver, list): args.extend(['{0}-{1}'.format(param, v) for v in ver]) else: args.append('{0}-{1}'.format(param, ver)) if not args: return {} for_remove = ' '.join(args) pkgin = _check_pkgin() if pkgin: cmd = '{0} -y remove {1}'.format(pkgin, for_remove) else: cmd = 'pkg_remove {0}'.format(for_remove) __salt__['cmd.run'](cmd, output_loglevel='debug') new = list_pkgs() return salt.utils.compare_dicts(old, new)
def cql_query_with_prepare( query, statement_name, statement_arguments, callback_errors=None, contact_points=None, port=None, cql_user=None, cql_pass=None, **kwargs ): """ Run a query on a Cassandra cluster and return a dictionary. This function should not be used asynchronously for SELECTs -- it will not return anything and we don't currently have a mechanism for handling a future that will return results. :param query: The query to execute. :type query: str :param statement_name: Name to assign the prepared statement in the __context__ dictionary :type statement_name: str :param statement_arguments: Bind parameters for the SQL statement :type statement_arguments: list[str] :param async: Run this query in asynchronous mode :type async: bool :param callback_errors: Function to call after query runs if there is an error :type callback_errors: Function callable :param contact_points: The Cassandra cluster addresses, can either be a string or a list of IPs. :type contact_points: str | list[str] :param cql_user: The Cassandra user if authentication is turned on. :type cql_user: str :param cql_pass: The Cassandra user password if authentication is turned on. :type cql_pass: str :param port: The Cassandra cluster port, defaults to None. :type port: int :param params: The parameters for the query, optional. :type params: str :return: A dictionary from the return values of the query :rtype: list[dict] CLI Example: .. code-block:: bash # Insert data asynchronously salt this-node cassandra_cql.cql_query_with_prepare "name_insert" "INSERT INTO USERS (first_name, last_name) VALUES (?, ?)" \ statement_arguments=['John','Doe'], asynchronous=True # Select data, should not be asynchronous because there is not currently a facility to return data from a future salt this-node cassandra_cql.cql_query_with_prepare "name_select" "SELECT * FROM USERS WHERE first_name=?" \ statement_arguments=['John'] """ # Backward-compatibility with Python 3.7: "async" is a reserved word asynchronous = kwargs.get("async", False) try: cluster, session = _connect( contact_points=contact_points, port=port, cql_user=cql_user, cql_pass=cql_pass, ) except CommandExecutionError: log.critical("Could not get Cassandra cluster session.") raise except BaseException as e: log.critical("Unexpected error while getting Cassandra cluster session: %s", e) raise if statement_name not in __context__["cassandra_cql_prepared"]: try: bound_statement = session.prepare(query) __context__["cassandra_cql_prepared"][statement_name] = bound_statement except BaseException as e: log.critical("Unexpected error while preparing SQL statement: %s", e) raise else: bound_statement = __context__["cassandra_cql_prepared"][statement_name] session.row_factory = dict_factory ret = [] try: if asynchronous: future_results = session.execute_async( bound_statement.bind(statement_arguments) ) # future_results.add_callbacks(_async_log_errors) else: results = session.execute(bound_statement.bind(statement_arguments)) except BaseException as e: log.error("Failed to execute query: %s\n reason: %s", query, e) msg = "ERROR: Cassandra query failed: {0} reason: {1}".format(query, e) raise CommandExecutionError(msg) if not asynchronous and results: for result in results: values = {} for key, value in six.iteritems(result): # Salt won't return dictionaries with odd types like uuid.UUID if not isinstance(value, six.text_type): # Must support Cassandra collection types. # Namely, Cassandras set, list, and map collections. if not isinstance(value, (set, list, dict)): value = six.text_type(value) values[key] = value ret.append(values) # If this was a synchronous call, then we either have an empty list # because there was no return, or we have a return # If this was an asynchronous call we only return the empty list return ret
def _unhandled_mock_read(filename): ''' Raise an error because we should not be calling salt.utils.fopen() ''' raise CommandExecutionError('Unhandled mock read for {0}'.format(filename))
def _connect(contact_points=None, port=None, cql_user=None, cql_pass=None, protocol_version=4): ''' Connect to a Cassandra cluster. :param contact_points: The Cassandra cluster addresses, can either be a string or a list of IPs. :type contact_points: str or list of str :param cql_user: The Cassandra user if authentication is turned on. :type cql_user: str :param cql_pass: The Cassandra user password if authentication is turned on. :type cql_pass: str :param port: The Cassandra cluster port, defaults to None. :type port: int :param protocol_version: Cassandra protocol version to use. :type port: int :return: The session and cluster objects. :rtype: cluster object, session object ''' # Lazy load the Cassandra cluster and session for this module by creating a # cluster and session when cql_query is called the first time. Get the # Cassandra cluster and session from this module's __context__ after it is # loaded the first time cql_query is called. # # TODO: Call cluster.shutdown() when the module is unloaded on # master/minion shutdown. Currently, Master.shutdown() and Minion.shutdown() # do nothing to allow loaded modules to gracefully handle resources stored # in __context__ (i.e. connection pools). This means that the the connection # pool is orphaned and Salt relies on Cassandra to reclaim connections. # Perhaps if Master/Minion daemons could be enhanced to call an "__unload__" # function, or something similar for each loaded module, connection pools # and the like can be gracefully reclaimed/shutdown. if (__context__ and 'cassandra_cql_returner_cluster' in __context__ and 'cassandra_cql_returner_session' in __context__): return __context__['cassandra_cql_returner_cluster'], __context__[ 'cassandra_cql_returner_session'] else: contact_points = _load_properties(property_name=contact_points, config_option='cluster') contact_points = contact_points if isinstance( contact_points, list) else contact_points.split(',') port = _load_properties(property_name=port, config_option='port', set_default=True, default=9042) cql_user = _load_properties(property_name=cql_user, config_option='username', set_default=True, default="cassandra") cql_pass = _load_properties(property_name=cql_pass, config_option='password', set_default=True, default="cassandra") protocol_version = _load_properties(property_name=protocol_version, config_option='protocol_version', set_default=True, default=4) try: auth_provider = PlainTextAuthProvider(username=cql_user, password=cql_pass) ssl_opts = _get_ssl_opts() if ssl_opts: cluster = Cluster(contact_points, port=port, auth_provider=auth_provider, ssl_options=ssl_opts, protocol_version=protocol_version) else: cluster = Cluster(contact_points, port=port, auth_provider=auth_provider, protocol_version=protocol_version) session = cluster.connect() # TODO: Call cluster.shutdown() when the module is unloaded on shutdown. __context__['cassandra_cql_returner_cluster'] = cluster __context__['cassandra_cql_returner_session'] = session log.debug( 'Successfully connected to Cassandra cluster at {0}'.format( contact_points)) return cluster, session except TypeError: pass except (ConnectionException, ConnectionShutdown, NoHostAvailable): log.error('Could not connect to Cassandra cluster at {0}'.format( contact_points)) raise CommandExecutionError( 'ERROR: Could not connect to Cassandra cluster.')
def set_fstab(name, device, fstype, opts='defaults', dump=0, pass_num=0, config='/etc/fstab', test=False, match_on='auto', **kwargs): ''' Verify that this mount is represented in the fstab, change the mount to match the data passed, or add the mount if it is not present. CLI Example: .. code-block:: bash salt '*' mount.set_fstab /mnt/foo /dev/sdz1 ext4 ''' # Fix the opts type if it is a list if isinstance(opts, list): opts = ','.join(opts) # preserve arguments for updating entry_args = { 'name': name, 'device': device, 'fstype': fstype, 'opts': opts, 'dump': dump, 'pass_num': pass_num, } lines = [] ret = None # Transform match_on into list--items will be checked later if isinstance(match_on, list): pass elif not isinstance(match_on, six.string_types): msg = 'match_on must be a string or list of strings' raise CommandExecutionError(msg) elif match_on == 'auto': # Try to guess right criteria for auto.... # NOTE: missing some special fstypes here specialFSes = frozenset([ 'none', 'tmpfs', 'sysfs', 'proc', 'fusectl', 'debugfs', 'securityfs', 'devtmpfs', 'cgroup', 'btrfs' ]) if fstype in specialFSes: match_on = ['name'] else: match_on = ['device'] else: match_on = [match_on] # generate entry and criteria objects, handle invalid keys in match_on entry = _fstab_entry(**entry_args) try: criteria = entry.pick(match_on) except KeyError: filterFn = lambda key: key not in _fstab_entry.fstab_keys invalid_keys = filter(filterFn, match_on) msg = 'Unrecognized keys in match_on: "{0}"'.format(invalid_keys) raise CommandExecutionError(msg) # parse file, use ret to cache status if not os.path.isfile(config): raise CommandExecutionError('Bad config file "{0}"'.format(config)) try: with salt.utils.fopen(config, 'r') as ifile: for line in ifile: try: if criteria.match(line): # Note: If ret isn't None here, # we've matched multiple lines ret = 'present' if entry.match(line): lines.append(line) else: ret = 'change' lines.append(str(entry)) else: lines.append(line) except _fstab_entry.ParseError: lines.append(line) except (IOError, OSError) as exc: msg = 'Couldn\'t read from {0}: {1}' raise CommandExecutionError(msg.format(config, str(exc))) # add line if not present or changed if ret is None: lines.append(str(entry)) ret = 'new' if ret != 'present': # ret in ['new', 'change']: if not salt.utils.test_mode(test=test, **kwargs): try: with salt.utils.fopen(config, 'w+') as ofile: # The line was changed, commit it! ofile.writelines(lines) except (IOError, OSError): msg = 'File not writable {0}' raise CommandExecutionError(msg.format(config)) return ret
def __process_tokens_internal(tokens, start_at=0): if __is_dict_start(tokens[start_at]) and start_at == 0: # the top object return __process_tokens_internal(tokens, start_at=1) log.debug("__process_tokens, start_at="+str(start_at)) token_no = start_at result = {} current_key = None while token_no < len(tokens): token = tokens[token_no] log.debug("PROCESSING TOKEN %d: %s", token_no, token) if __is_quoted_string(token): log.debug(" TYPE: QUOTED STRING ") if current_key is None: current_key = __get_quoted_string(token) log.debug(" KEY: %s", current_key) else: result[current_key] = __get_quoted_string(token) log.debug(" %s -> %s", current_key, result[current_key]) current_key = None elif __is_datatype(token): log.debug(" TYPE: DATATYPE: %s ", token) result[current_key] = __get_datatype(token) log.debug(" %s -> %s", current_key, str(result[current_key])) current_key = None elif __is_boolean(token): log.debug(" TYPE: BOOLEAN ") result[current_key] = __get_boolean(token) log.debug(" %s -> %s", current_key, str(result[current_key])) current_key = None elif __is_int(token): log.debug(" TYPE: INT ") result[current_key] = __get_int(token) log.debug(" %s -> %s", current_key, str(result[current_key])) current_key = None elif __is_long(token): log.debug(" TYPE: LONG ") result[current_key] = __get_long(token) log.debug(" %s -> %s", current_key, str(result[current_key])) current_key = None elif __is_undefined(token): log.debug(" TYPE: UNDEFINED ") log.debug(" %s -> undefined (Adding as None to map)", current_key) result[current_key] = None current_key = None elif __is_dict_start(token): log.debug(" TYPE: DICT START") dict_value, token_no = __process_tokens_internal(tokens, start_at=token_no+1) log.debug(" DICT = %s ", dict_value) result[current_key] = dict_value log.debug(" %s -> %s", current_key, str(result[current_key])) current_key = None elif __is_dict_end(token): log.debug(" TYPE: DICT END") return result, token_no elif __is_assignment(token): log.debug(" TYPE: ASSIGNMENT") is_assignment = True else: raise CommandExecutionError('Unknown token! Token: {0}'.format(token)) token_no = token_no + 1
def list_all_versions(pkg, bin_env=None, include_alpha=False, include_beta=False, include_rc=False, user=None, cwd=None, index_url=None): ''' .. versionadded:: 2017.7.3 List all available versions of a pip package pkg The package to check bin_env Path to pip (or to a virtualenv). This can be used to specify the path to the pip to use when more than one Python release is installed (e.g. ``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is specified, it is assumed to be a virtualenv. include_alpha Include alpha versions in the list include_beta Include beta versions in the list include_rc Include release candidates versions in the list user The user under which to run pip cwd Directory from which to run pip index_url Base URL of Python Package Index .. versionadded:: Fluorine CLI Example: .. code-block:: bash salt '*' pip.list_all_versions <package name> ''' cmd = _get_pip_bin(bin_env) cmd.extend(['install', '{0}==versions'.format(pkg)]) if index_url: if not salt.utils.url.validate(index_url, VALID_PROTOS): raise CommandExecutionError( '\'{0}\' is not a valid URL'.format(index_url)) cmd.extend(['--index-url', index_url]) cmd_kwargs = dict(cwd=cwd, runas=user, output_loglevel='quiet', redirect_stderr=True) if bin_env and os.path.isdir(bin_env): cmd_kwargs['env'] = {'VIRTUAL_ENV': bin_env} result = __salt__['cmd.run_all'](cmd, **cmd_kwargs) filtered = [] if not include_alpha: filtered.append('a') if not include_beta: filtered.append('b') if not include_rc: filtered.append('rc') if filtered: excludes = re.compile(r'^((?!{0}).)*$'.format('|'.join(filtered))) else: excludes = re.compile(r'') versions = [] for line in result['stdout'].splitlines(): match = re.search( r'\s*Could not find a version.* \(from versions: (.*)\)', line) if match: versions = [ v for v in match.group(1).split(', ') if v and excludes.match(v) ] versions.sort(key=pkg_resources.parse_version) break if not versions: return None return versions