def bootstrap( platform, root, img_format="dir", fs_format="ext2", fs_opts=None, arch=None, flavor=None, repo_url=None, static_qemu=None, img_size=None, mount_dir=None, pkg_cache=None, pkgs=None, exclude_pkgs=None, epel_url=EPEL_URL, ): """ Create an image for a specific platform. Please note that this function *MUST* be run as root, as images that are created make files belonging to root. platform Which platform to use to create the image. Currently supported platforms are rpm, deb and pacman. root Local path to create the root of the image filesystem. img_format Which format to create the image in. By default, just copies files into a directory on the local filesystem (``dir``). Future support will exist for ``sparse``. fs_format When using a non-``dir`` ``img_format``, which filesystem to format the image to. By default, ``ext2``. fs_opts When using a non-``dir`` ``img_format``, a dict of opts may be specified. arch Architecture to install packages for, if supported by the underlying bootstrap tool. Currently only used for deb. flavor Which flavor of operating system to install. This correlates to a specific directory on the distribution repositories. For instance, ``wheezy`` on Debian. repo_url Mainly important for Debian-based repos. Base URL for the mirror to install from. (e.x.: http://ftp.debian.org/debian/) static_qemu Local path to the static qemu binary required for this arch. (e.x.: /usr/bin/qemu-amd64-static) pkg_confs The location of the conf files to copy into the image, to point the installer to the right repos and configuration. img_size If img_format is not ``dir``, then the size of the image must be specified. mount_dir If img_format is not ``dir``, then the image must be mounted somewhere. If the ``mount_dir`` is not specified, then it will be created at ``/opt/salt-genesis.<random_uuid>``. This directory will be unmounted and removed when the process is finished. pkg_cache This points to a directory containing a cache of package files to be copied to the image. It does not need to be specified. pkgs A list of packages to be installed on this image. For RedHat, this will include ``yum``, ``centos-release`` and ``iputils`` by default. exclude_pkgs A list of packages to be excluded. If you do not want to install the defaults, you need to include them in this list. epel_url The URL to download the EPEL release package from. CLI Examples: .. code-block:: bash salt myminion genesis.bootstrap pacman /root/arch salt myminion genesis.bootstrap rpm /root/redhat salt myminion genesis.bootstrap deb /root/wheezy arch=amd64 \ flavor=wheezy static_qemu=/usr/bin/qemu-x86_64-static """ if img_format not in ("dir", "sparse"): raise SaltInvocationError('The img_format must be "sparse" or "dir"') if img_format == "dir": # We can just use the root as the root if not __salt__["file.directory_exists"](root): try: __salt__["file.mkdir"](root) except Exception as exc: # pylint: disable=broad-except return { "Error": salt.utils.stringutils.to_unicode(pprint.pformat(exc)) } elif img_format == "sparse": if not img_size: raise SaltInvocationError( "An img_size must be specified for a sparse file") if not mount_dir: mount_dir = "/opt/salt-genesis.{0}".format(uuid.uuid4()) __salt__["file.mkdir"](mount_dir, "root", "root", "755") __salt__["cmd.run"](("fallocate", "-l", img_size, root), python_shell=False) _mkpart(root, fs_format, fs_opts, mount_dir) loop1 = __salt__["cmd.run"]("losetup -f") log.debug("First loop device is {0}".format(loop1)) __salt__["cmd.run"]("losetup {0} {1}".format(loop1, root)) loop2 = __salt__["cmd.run"]("losetup -f") log.debug("Second loop device is {0}".format(loop2)) start = six.text_type(2048 * 2048) __salt__["cmd.run"]("losetup -o {0} {1} {2}".format( start, loop2, loop1)) __salt__["mount.mount"](mount_dir, loop2) _populate_cache(platform, pkg_cache, mount_dir) if mount_dir: root = mount_dir if pkgs is None: pkgs = [] if exclude_pkgs is None: exclude_pkgs = [] if platform in ("rpm", "yum"): _bootstrap_yum( root, pkgs=pkgs, exclude_pkgs=exclude_pkgs, epel_url=epel_url, ) elif platform == "deb": _bootstrap_deb( root, arch=arch, flavor=flavor, repo_url=repo_url, static_qemu=static_qemu, pkgs=pkgs, exclude_pkgs=exclude_pkgs, ) elif platform == "pacman": _bootstrap_pacman( root, img_format=img_format, pkgs=pkgs, exclude_pkgs=exclude_pkgs, ) if img_format != "dir": blkinfo = __salt__["disk.blkid"](loop2) __salt__["file.replace"]( "{0}/boot/grub/grub.cfg".format(mount_dir), "ad4103fa-d940-47ca-8506-301d8071d467", # This seems to be the default blkinfo[loop2]["UUID"], ) __salt__["mount.umount"](root) __salt__["cmd.run"]("losetup -d {0}".format(loop2)) __salt__["cmd.run"]("losetup -d {0}".format(loop1)) __salt__["file.rmdir"](mount_dir)
def run(name, cmd, container_type=None, exec_driver=None, output=None, no_start=False, stdin=None, python_shell=True, output_loglevel='debug', ignore_retcode=False, use_vt=False, keep_env=None): ''' Common logic for running shell commands in containers CLI Example: .. code-block:: salt myminion container_resource.run mycontainer 'ps aux' container_type=docker exec_driver=nsenter output=stdout ''' valid_output = ('stdout', 'stderr', 'retcode', 'all') if output is None: cmd_func = 'cmd.run' elif output not in valid_output: raise SaltInvocationError( '\'output\' param must be one of the following: {0}'.format( ', '.join(valid_output))) else: cmd_func = 'cmd.run_all' if keep_env is None or isinstance(keep_env, bool): to_keep = [] elif not isinstance(keep_env, (list, tuple)): try: to_keep = keep_env.split(',') except AttributeError: log.warning('Invalid keep_env value, ignoring') to_keep = [] else: to_keep = keep_env if exec_driver == 'lxc-attach': full_cmd = 'lxc-attach ' if keep_env is not True: full_cmd += '--clear-env ' if 'PATH' not in to_keep: full_cmd += '--set-var {0} '.format(PATH) # --clear-env results in a very restrictive PATH # (/bin:/usr/bin), use a good fallback. full_cmd += ' '.join([ '--set-var {0}={1}'.format(x, pipes.quote(os.environ[x])) for x in to_keep if x in os.environ ]) full_cmd += ' -n {0} -- {1}'.format(pipes.quote(name), cmd) elif exec_driver == 'nsenter': pid = __salt__['{0}.pid'.format(container_type)](name) full_cmd = ('nsenter --target {0} --mount --uts --ipc --net --pid -- '. format(pid)) if keep_env is not True: full_cmd += 'env -i ' if 'PATH' not in to_keep: full_cmd += '{0} '.format(PATH) full_cmd += ' '.join([ '{0}={1}'.format(x, pipes.quote(os.environ[x])) for x in to_keep if x in os.environ ]) full_cmd += ' {0}'.format(cmd) elif exec_driver == 'docker-exec': # We're using docker exec on the CLI as opposed to via docker-py, since # the Docker API doesn't return stdout and stderr separately. full_cmd = 'docker exec ' if stdin: full_cmd += '-i ' full_cmd += '{0} '.format(name) if keep_env is not True: full_cmd += 'env -i ' if 'PATH' not in to_keep: full_cmd += '{0} '.format(PATH) full_cmd += ' '.join([ '{0}={1}'.format(x, pipes.quote(os.environ[x])) for x in to_keep if x in os.environ ]) full_cmd += ' {0}'.format(cmd) if not use_vt: ret = __salt__[cmd_func](full_cmd, stdin=stdin, python_shell=python_shell, output_loglevel=output_loglevel, ignore_retcode=ignore_retcode) else: stdout, stderr = '', '' try: proc = vt.Terminal(full_cmd, shell=python_shell, log_stdin_level=output_loglevel if output_loglevel == 'quiet' else 'info', log_stdout_level=output_loglevel, log_stderr_level=output_loglevel, log_stdout=True, log_stderr=True, stream_stdout=False, stream_stderr=False) # Consume output while proc.has_unread_data: try: cstdout, cstderr = proc.recv() if cstdout: stdout += cstdout if cstderr: if output is None: stdout += cstderr else: stderr += cstderr time.sleep(0.5) except KeyboardInterrupt: break ret = stdout if output is None \ else {'retcode': proc.exitstatus, 'pid': 2, 'stdout': stdout, 'stderr': stderr} except vt.TerminalException: trace = traceback.format_exc() log.error(trace) ret = stdout if output is None \ else {'retcode': 127, 'pid': 2, 'stdout': stdout, 'stderr': stderr} finally: proc.terminate() return ret
def _get_repo_dists_env(env): ''' Get repo environment overrides dictionary to use in repo distributions process env A dictionary of variables to define the repository distributions Example: .. code-block:: yaml - env: - ORIGIN : 'jessie' - LABEL : 'salt debian' - SUITE : 'main' - VERSION : '8.1' - CODENAME : 'jessie' - ARCHS : 'amd64 i386 source' - COMPONENTS : 'main' - DESCRIPTION : 'SaltStack Debian package repo' .. warning:: The above illustrates a common PyYAML pitfall, that **yes**, **no**, **on**, **off**, **true**, and **false** are all loaded as boolean ``True`` and ``False`` values, and must be enclosed in quotes to be used as strings. More info on this (and other) PyYAML idiosyncrasies can be found :ref:`here <yaml-idiosyncrasies>`. ''' # env key with tuple of control information for handling input env dictionary # 0 | M - Mandatory, O - Optional, I - Ignore # 1 | 'text string for repo field' # 2 | 'default value' dflts_dict = { 'OPTIONS': ('I', '', 'processed by _get_repo_options_env'), 'ORIGIN': ('O', 'Origin', 'SaltStack'), 'LABEL': ('O', 'Label', 'salt_debian'), 'SUITE': ('O', 'Suite', 'stable'), 'VERSION': ('O', 'Version', '9.0'), 'CODENAME': ('M', 'Codename', 'stretch'), 'ARCHS': ('M', 'Architectures', 'i386 amd64 source'), 'COMPONENTS': ('M', 'Components', 'main'), 'DESCRIPTION': ('O', 'Description', 'SaltStack debian package repo'), } env_dists = '' codename = '' dflts_keys = list(dflts_dict.keys()) if env is None: for key, value in dflts_dict.items(): if dflts_dict[key][0] == 'M': env_dists += '{0}: {1}\n'.format(dflts_dict[key][1], dflts_dict[key][2]) if key == 'CODENAME': codename = dflts_dict[key][2] return (codename, env_dists) if not isinstance(env, dict): raise SaltInvocationError('\'env\' must be a Python dictionary') env_man_seen = [] for key, value in env.items(): if key in dflts_keys: if dflts_dict[key][0] == 'M': env_man_seen.append(key) if key == 'CODENAME': codename = value if dflts_dict[key][0] != 'I': env_dists += '{0}: {1}\n'.format(dflts_dict[key][1], value) else: env_dists += '{0}: {1}\n'.format(key, value) # ensure mandatories are included env_keys = list(env.keys()) for key in env_keys: if key in dflts_keys and dflts_dict[key][ 0] == 'M' and key not in env_man_seen: env_dists += '{0}: {1}\n'.format(dflts_dict[key][1], dflts_dict[key][2]) if key == 'CODENAME': codename = value return (codename, env_dists)
def set_community_names(communities): """ Manage the SNMP accepted community names and their permissions. .. note:: Settings managed by Group Policy will always take precedence over those set using the SNMP interface. Therefore if this function finds Group Policy settings it will raise a CommandExecutionError Args: communities (dict): A dictionary of SNMP community names and permissions. The possible permissions can be found via ``win_snmp.get_permission_types``. Returns: bool: True if successful, otherwise False Raises: CommandExecutionError: If SNMP settings are being managed by Group Policy CLI Example: .. code-block:: bash salt '*' win_snmp.set_community_names communities="{'TestCommunity': 'Read Only'}' """ values = dict() if __utils__["reg.key_exists"](_HKEY, _COMMUNITIES_GPO_KEY): _LOG.debug("Communities on this system are managed by Group Policy") raise CommandExecutionError( "Communities on this system are managed by Group Policy" ) current_communities = get_community_names() if communities == current_communities: _LOG.debug("Communities already contain the provided values.") return True for vname in communities: if not communities[vname]: communities[vname] = "None" try: vdata = _PERMISSION_TYPES[communities[vname]] except KeyError: raise SaltInvocationError( "Invalid permission '{}' specified. Valid permissions: {}".format( communities[vname], _PERMISSION_TYPES.keys() ) ) values[vname] = vdata # Check current communities. for current_vname in current_communities: if current_vname in values: # Modify existing communities that have a different permission value. if current_communities[current_vname] != values[current_vname]: __utils__["reg.set_value"]( _HKEY, _COMMUNITIES_KEY, current_vname, values[current_vname], "REG_DWORD", ) else: # Remove current communities that weren't provided. __utils__["reg.delete_value"](_HKEY, _COMMUNITIES_KEY, current_vname) # Create any new communities. for vname in values: if vname not in current_communities: __utils__["reg.set_value"]( _HKEY, _COMMUNITIES_KEY, vname, values[vname], "REG_DWORD" ) # Get the fields post-change so that we can verify tht all values # were modified successfully. Track the ones that weren't. new_communities = get_community_names() failed_communities = dict() for new_vname in new_communities: if new_vname not in communities: failed_communities[new_vname] = None for vname in communities: if communities[vname] != new_communities[vname]: failed_communities[vname] = communities[vname] if failed_communities: _LOG.error("Unable to configure communities: %s", failed_communities) return False _LOG.debug("Communities configured successfully: %s", communities.keys()) return True
def parse_targets(name=None, pkgs=None, sources=None, saltenv='base', normalize=True, **kwargs): ''' Parses the input to pkg.install and returns back the package(s) to be installed. Returns a list of packages, as well as a string noting whether the packages are to come from a repository or a binary package. CLI Example: .. code-block:: bash salt '*' pkg_resource.parse_targets ''' if '__env__' in kwargs: salt.utils.warn_until( 'Boron', 'Passing a salt environment should be done using \'saltenv\' ' 'not \'__env__\'. This functionality will be removed in Salt ' 'Boron.') # Backwards compatibility saltenv = kwargs['__env__'] if __grains__['os'] == 'MacOS' and sources: log.warning('Parameter "sources" ignored on MacOS hosts.') if pkgs and sources: log.error('Only one of "pkgs" and "sources" can be used.') return None, None elif pkgs: pkgs = _repack_pkgs(pkgs, normalize=normalize) if not pkgs: return None, None else: return pkgs, 'repository' elif sources and __grains__['os'] != 'MacOS': sources = pack_sources(sources, normalize=normalize) if not sources: return None, None srcinfo = [] for pkg_name, pkg_src in six.iteritems(sources): if __salt__['config.valid_fileproto'](pkg_src): # Cache package from remote source (salt master, HTTP, FTP) and # append the cached path. srcinfo.append(__salt__['cp.cache_file'](pkg_src, saltenv)) else: # Package file local to the minion, just append the tuple from # the pack_sources() return data. if not os.path.isabs(pkg_src): raise SaltInvocationError( 'Path {0} for package {1} is either not absolute or ' 'an invalid protocol'.format(pkg_src[0], pkg_name)) srcinfo.append(pkg_src) return srcinfo, 'file' elif name: if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) packed = dict([(_normalize_name(x), None) for x in name.split(',')]) else: packed = dict([(x, None) for x in name.split(',')]) return packed, 'repository' else: log.error('No package sources provided') return None, None
def import_key(user=None, text=None, filename=None, gnupghome=None): r''' Import a key from text or file user Which user's keychain to access, defaults to user Salt is running as. Passing the user as 'salt' will set the GPG home directory to /etc/salt/gpgkeys. text The text containing to import. filename The filename containing the key to import. gnupghome Specify the location where GPG related files are stored. CLI Example: .. code-block:: bash salt '*' gpg.import_key text='-----BEGIN PGP PUBLIC KEY BLOCK-----\n ... -----END PGP PUBLIC KEY BLOCK-----' salt '*' gpg.import_key filename='/path/to/public-key-file' ''' ret = {'res': True, 'message': ''} gpg = _create_gpg(user, gnupghome) if not text and not filename: raise SaltInvocationError('filename or text must be passed.') if filename: try: with salt.utils.flopen(filename, 'rb') as _fp: lines = _fp.readlines() text = ''.join(lines) except IOError: raise SaltInvocationError('filename does not exist.') imported_data = gpg.import_keys(text) if GPG_1_3_1: counts = imported_data.counts if counts.get('imported') or counts.get('imported_rsa'): ret['message'] = 'Successfully imported key(s).' elif counts.get('unchanged'): ret['message'] = 'Key(s) already exist in keychain.' elif counts.get('not_imported'): ret['res'] = False ret['message'] = 'Unable to import key.' elif not counts.get('count'): ret['res'] = False ret['message'] = 'Unable to import key.' else: if imported_data.imported or imported_data.imported_rsa: ret['message'] = 'Successfully imported key(s).' elif imported_data.unchanged: ret['message'] = 'Key(s) already exist in keychain.' elif imported_data.not_imported: ret['res'] = False ret['message'] = 'Unable to import key.' elif not imported_data.count: ret['res'] = False ret['message'] = 'Unable to import key.' return ret
def encrypt(user=None, recipients=None, text=None, filename=None, output=None, sign=None, use_passphrase=False, gnupghome=None, bare=False): ''' Encrypt a message or file user Which user's keychain to access, defaults to user Salt is running as. Passing the user as 'salt' will set the GPG home directory to /etc/salt/gpgkeys. recipients The fingerprints for those recipient whom the data is being encrypted for. text The text to encrypt. filename The filename to encrypt. output The filename where the signed file will be written, default is standard out. sign Whether to sign, in addition to encrypt, the data. True to use default key or fingerprint to specify a different key to sign with. use_passphrase Whether to use a passphrase with the signing key. Passphrase is received from pillar. gnupghome Specify the location where GPG related files are stored. bare If True, return the (armored) encrypted block as a string without the standard comment/res dict CLI Example: .. code-block:: bash salt '*' gpg.encrypt text='Hello there. How are you?' salt '*' gpg.encrypt filename='/path/to/important.file' salt '*' gpg.encrypt filename='/path/to/important.file' use_pasphrase=True ''' ret = {'res': True, 'comment': ''} gpg = _create_gpg(user, gnupghome) if use_passphrase: gpg_passphrase = __salt__['pillar.item']('gpg_passphrase') if not gpg_passphrase: raise SaltInvocationError( 'gpg_passphrase not available in pillar.') gpg_passphrase = gpg_passphrase['gpg_passphrase'] else: gpg_passphrase = None if text: result = gpg.encrypt(text, recipients, passphrase=gpg_passphrase) elif filename: if GPG_1_3_1: # This version does not allow us to encrypt using the # file stream # have to read in the contents and encrypt. with salt.utils.flopen(filename, 'rb') as _fp: _contents = _fp.read() result = gpg.encrypt(_contents, recipients, passphrase=gpg_passphrase, output=output) else: # This version allows encrypting the file stream with salt.utils.flopen(filename, 'rb') as _fp: if output: result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, output=output, sign=sign) else: result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, sign=sign) else: raise SaltInvocationError('filename or text must be passed.') if result.ok: if not bare: if output: ret['comment'] = 'Encrypted data has been written to {0}'.format( output) else: ret['comment'] = result.data else: ret = result.data else: if not bare: ret['res'] = False ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format( result.status) else: ret = False log.error(result.stderr) return ret
def get_resource_records(HostedZoneId=None, Name=None, StartRecordName=None, StartRecordType=None, PrivateZone=None, region=None, key=None, keyid=None, profile=None): ''' Get all resource records from a given zone matching the provided StartRecordName (if given) or all records in the zone (if not), optionally filtered by a specific StartRecordType. This will return any and all RRs matching, regardless of their special AWS flavors (weighted, geolocation, alias, etc.) so your code should be prepared for potentially large numbers of records back from this function - for example, if you've created a complex geolocation mapping with lots of entries all over the world providing the same server name to many different regional clients. If you want EXACTLY ONE record to operate on, you'll need to implement any logic required to pick the specific RR you care about from those returned. Note that if you pass in Name without providing a value for PrivateZone (either True or False), CommandExecutionError can be raised in the case of both public and private zones matching the domain. XXX FIXME DOCU CLI example:: salt myminion boto3_route53.get_records test.example.org example.org A ''' if not _exactly_one((HostedZoneId, Name)): raise SaltInvocationError( 'Exactly one of either HostedZoneId or Name must ' 'be provided.') if Name: args = { 'Name': Name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } args.update({'PrivateZone': PrivateZone }) if PrivateZone is not None else None zone = find_hosted_zone(**args) if not zone: log.error( "Couldn't resolve domain name {0} to a hosted zone ID.".format( Name)) return [] HostedZoneId = zone[0]['HostedZone']['Id'] conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) ret = [] next_rr_name = StartRecordName next_rr_type = StartRecordType next_rr_id = None done = False while True: if done: return ret args = {'HostedZoneId': HostedZoneId} args.update({'StartRecordName': next_rr_name }) if next_rr_name else None # Grrr, can't specify type unless name is set... We'll do this via filtering later instead args.update({'StartRecordType': next_rr_type }) if next_rr_name and next_rr_type else None args.update({'StartRecordIdentifier': next_rr_id }) if next_rr_id else None try: r = conn.list_resource_record_sets(**args) rrs = r['ResourceRecordSets'] next_rr_name = r.get('NextRecordName') next_rr_type = r.get('NextRecordType') next_rr_id = r.get('NextRecordIdentifier') for rr in rrs: if StartRecordName and rr['Name'] != StartRecordName: done = True break if StartRecordType and rr['Type'] != StartRecordType: if StartRecordName: done = True break else: # We're filtering by type alone, and there might be more later, so... continue ret += [rr] if not next_rr_name: done = True except ClientError as e: # Try forever on a simple thing like this... if e.response.get('Error', {}).get('Code') == 'Throttling': log.debug('Throttled by AWS API.') time.sleep(3) continue raise e
def change_resource_record_sets(HostedZoneId=None, Name=None, PrivateZone=None, ChangeBatch=None, region=None, key=None, keyid=None, profile=None): ''' Ugh!!! Not gonna try to reproduce and validatethis mess in here - just pass what we get to AWS and let it decide if it's valid or not... See the `AWS Route53 API docs`__ as well as the `Boto3 documentation`__ for all the details... .. __: https://docs.aws.amazon.com/Route53/latest/APIReference/API_ChangeResourceRecordSets.html .. __: http://boto3.readthedocs.io/en/latest/reference/services/route53.html#Route53.Client.change_resource_record_sets The syntax for a ChangeBatch parameter is as follows, but note that the permutations of allowed parameters and combinations thereof are quite varied, so perusal of the above linked docs is highly recommended for any non-trival configurations. .. code-block:: json ChangeBatch={ 'Comment': 'string', 'Changes': [ { 'Action': 'CREATE'|'DELETE'|'UPSERT', 'ResourceRecordSet': { 'Name': 'string', 'Type': 'SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'AAAA', 'SetIdentifier': 'string', 'Weight': 123, 'Region': 'us-east-1'|'us-east-2'|'us-west-1'|'us-west-2'|'ca-central-1'|'eu-west-1'|'eu-west-2'|'eu-central-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'ap-northeast-2'|'sa-east-1'|'cn-north-1'|'ap-south-1', 'GeoLocation': { 'ContinentCode': 'string', 'CountryCode': 'string', 'SubdivisionCode': 'string' }, 'Failover': 'PRIMARY'|'SECONDARY', 'TTL': 123, 'ResourceRecords': [ { 'Value': 'string' }, ], 'AliasTarget': { 'HostedZoneId': 'string', 'DNSName': 'string', 'EvaluateTargetHealth': True|False }, 'HealthCheckId': 'string', 'TrafficPolicyInstanceId': 'string' } }, ] } CLI Example: .. code-block:: bash foo='{ "Name": "my-cname.example.org.", "TTL": 600, "Type": "CNAME", "ResourceRecords": [ { "Value": "my-host.example.org" } ] }' foo=`echo $foo` # Remove newlines salt myminion boto3_route53.change_resource_record_sets DomainName=example.org. \ keyid=A1234567890ABCDEF123 key=xblahblahblah \ ChangeBatch="{'Changes': [{'Action': 'UPSERT', 'ResourceRecordSet': $foo}]}" ''' if not _exactly_one((HostedZoneId, Name)): raise SaltInvocationError( 'Exactly one of either HostZoneId or Name must be provided.') if Name: args = { 'Name': Name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } args.update({'PrivateZone': PrivateZone }) if PrivateZone is not None else None zone = find_hosted_zone(**args) if not zone: log.error( "Couldn't resolve domain name {0} to a hosted zone ID.".format( Name)) return [] HostedZoneId = zone[0]['HostedZone']['Id'] args = {'HostedZoneId': HostedZoneId, 'ChangeBatch': ChangeBatch} conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) tries = 20 # A bit more headroom while tries: try: r = conn.change_resource_record_sets(**args) return _wait_for_sync(r['ChangeInfo']['Id'], conn, 30) # And a little extra time here except ClientError as e: if tries and e.response.get('Error', {}).get('Code') == 'Throttling': log.debug('Throttled by AWS API.') time.sleep(3) retries -= 1 continue log.error( 'Failed to apply requested changes to the hosted zone {0}: {1}' .format(Name or HostedZoneId, str(e))) return False
def associate_vpc_with_hosted_zone(HostedZoneId=None, Name=None, VPCId=None, VPCName=None, VPCRegion=None, Comment=None, region=None, key=None, keyid=None, profile=None): ''' Associates an Amazon VPC with a private hosted zone. To perform the association, the VPC and the private hosted zone must already exist. You can't convert a public hosted zone into a private hosted zone. If you want to associate a VPC from one AWS account with a zone from a another, the AWS account owning the hosted zone must first submit a CreateVPCAssociationAuthorization (using create_vpc_association_authorization() or by other means, such as the AWS console). With that done, the account owning the VPC can then call associate_vpc_with_hosted_zone() to create the association. Note that if both sides happen to be within the same account, associate_vpc_with_hosted_zone() is enough on its own, and there is no need for the CreateVPCAssociationAuthorization step. Also note that looking up hosted zones by name (e.g. using the Name parameter) only works within a single account - if you're associating a VPC to a zone in a different account, as outlined above, you unfortunately MUST use the HostedZoneId parameter exclusively. HostedZoneId The unique Zone Identifier for the Hosted Zone. Name The domain name associated with the Hosted Zone(s). VPCId When working with a private hosted zone, either the VPC ID or VPC Name to associate with is required. Exclusive with VPCName. VPCName When working with a private hosted zone, either the VPC ID or VPC Name to associate with is required. Exclusive with VPCId. VPCRegion When working with a private hosted zone, the region of the associated VPC is required. If not provided, an effort will be made to determine it from VPCId or VPCName, if possible. If this fails, you'll need to provide an explicit value for VPCRegion. Comment Any comments you want to include about the change being made. CLI Example:: salt myminion boto3_route53.associate_vpc_with_hosted_zone \ Name=example.org. VPCName=myVPC \ VPCRegion=us-east-1 Comment="Whoo-hoo! I added another VPC." ''' if not _exactly_one((HostedZoneId, Name)): raise SaltInvocationError( 'Exactly one of either HostedZoneId or Name is required.') if not _exactly_one((VPCId, VPCName)): raise SaltInvocationError( 'Exactly one of either VPCId or VPCName is required.') if Name: # {'PrivateZone': True} because you can only associate VPCs with private hosted zones. args = { 'Name': Name, 'PrivateZone': True, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } zone = find_hosted_zone(**args) if not zone: log.error( "Couldn't resolve domain name {0} to a private hosted zone" 'ID.'.format(Name)) return False HostedZoneId = zone[0]['HostedZone']['Id'] vpcs = __salt__['boto_vpc.describe_vpcs'](vpc_id=VPCId, name=VPCName, region=region, key=key, keyid=keyid, profile=profile).get('vpcs', []) if VPCRegion and vpcs: vpcs = [v for v in vpcs if v['region'] == VPCRegion] if not vpcs: log.error('No VPC matching the given criteria found.') return False if len(vpcs) > 1: log.error('Multiple VPCs matching the given criteria found: {0}.' ''.format(', '.join([v['id'] for v in vpcs]))) return False vpc = vpcs[0] if VPCName: VPCId = vpc['id'] if not VPCRegion: VPCRegion = vpc['region'] args = { 'HostedZoneId': HostedZoneId, 'VPC': { 'VPCId': VPCId, 'VPCRegion': VPCRegion } } args.update({'Comment': Comment}) if Comment is not None else None conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) tries = 10 while tries: try: r = conn.associate_vpc_with_hosted_zone(**args) return _wait_for_sync(r['ChangeInfo']['Id'], conn) except ClientError as e: if tries and e.response.get('Error', {}).get('Code') == 'Throttling': log.debug('Throttled by AWS API.') time.sleep(3) retries -= 1 continue log.error( 'Failed to associate VPC {0} with hosted zone {1}: {2}'.format( VPCName or VPCId, Name or HostedZoneId, str(e))) return False
def diassociate_vpc_from_hosted_zone(HostedZoneId=None, Name=None, VPCId=None, VPCName=None, VPCRegion=None, Comment=None, region=None, key=None, keyid=None, profile=None): ''' Disassociates an Amazon VPC from a private hosted zone. You can't disassociate the last VPC from a private hosted zone. You also can't convert a private hosted zone into a public hosted zone. Note that looking up hosted zones by name (e.g. using the Name parameter) only works XXX FACTCHECK within a single AWS account - if you're disassociating a VPC in one account from a hosted zone in a different account you unfortunately MUST use the HostedZoneId parameter exclusively. XXX FIXME DOCU HostedZoneId The unique Zone Identifier for the Hosted Zone. Name The domain name associated with the Hosted Zone(s). VPCId When working with a private hosted zone, either the VPC ID or VPC Name to associate with is required. Exclusive with VPCName. VPCName When working with a private hosted zone, either the VPC ID or VPC Name to associate with is required. Exclusive with VPCId. VPCRegion When working with a private hosted zone, the region of the associated VPC is required. If not provided, an effort will be made to determine it from VPCId or VPCName, if possible. If this fails, you'll need to provide an explicit value for VPCRegion. Comment Any comments you want to include about the change being made. CLI Example:: salt myminion boto3_route53.disassociate_vpc_from_hosted_zone \ Name=example.org. VPCName=myVPC \ VPCRegion=us-east-1 Comment="Whoops! Don't wanna talk to this-here zone no more." ''' if not _exactly_one((HostedZoneId, Name)): raise SaltInvocationError( 'Exactly one of either HostedZoneId or Name is required.') if not _exactly_one((VPCId, VPCName)): raise SaltInvocationError( 'Exactly one of either VPCId or VPCName is required.') if Name: # {'PrivateZone': True} because you can only associate VPCs with private hosted zones. args = { 'Name': Name, 'PrivateZone': True, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } zone = find_hosted_zone(**args) if not zone: log.error( "Couldn't resolve domain name {0} to a private hosted zone" 'ID.'.format(Name)) return False HostedZoneId = zone[0]['HostedZone']['Id'] vpcs = __salt__['boto_vpc.describe_vpcs'](vpc_id=VPCId, name=VPCName, region=region, key=key, keyid=keyid, profile=profile).get('vpcs', []) if VPCRegion and vpcs: vpcs = [v for v in vpcs if v['region'] == VPCRegion] if not vpcs: log.error('No VPC matching the given criteria found.') return False if len(vpcs) > 1: log.error('Multiple VPCs matching the given criteria found: {0}.' ''.format(', '.join([v['id'] for v in vpcs]))) return False vpc = vpcs[0] if VPCName: VPCId = vpc['id'] if not VPCRegion: VPCRegion = vpc['region'] args = ({ 'HostedZoneId': HostedZoneId, 'VPC': { 'VPCId': VPCId, 'VPCRegion': VPCRegion } }) args.update({'Comment': Comment}) if Comment is not None else None conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) tries = 10 while tries: try: r = conn.disassociate_vpc_from_hosted_zone(**args) return _wait_for_sync(r['ChangeInfo']['Id'], conn) except ClientError as e: if tries and e.response.get('Error', {}).get('Code') == 'Throttling': log.debug('Throttled by AWS API.') time.sleep(3) retries -= 1 continue log.error( 'Failed to associate VPC {0} with hosted zone {1}: {2}'.format( VPCName or VPCId, Name or HostedZoneId, str(e))) return False
def update_hosted_zone_comment(Id=None, Name=None, Comment=None, PrivateZone=None, region=None, key=None, keyid=None, profile=None): ''' Update the comment on an existing Route 53 hosted zone. Id The unique Zone Identifier for the Hosted Zone. Name The domain name associated with the Hosted Zone(s). Comment Any comments you want to include about the hosted zone. PrivateZone Boolean - Set to True if changing a private hosted zone. CLI Example:: salt myminion boto3_route53.update_hosted_zone_comment Name=example.org. \ Comment="This is an example comment for an example zone" ''' if not _exactly_one((Id, Name)): raise SaltInvocationError( 'Exactly one of either Id or Name is required.') conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if Name: args = { 'Name': Name, 'PrivateZone': PrivateZone, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } zone = find_hosted_zone(**args) if not zone: log.error( "Couldn't resolve domain name {0} to a hosted zone ID.".format( Name)) return [] Id = zone[0]['HostedZone']['Id'] tries = 10 while tries: try: r = conn.update_hosted_zone_comment(Id=Id, Comment=Comment) r.pop('ResponseMetadata', None) return [r] except ClientError as e: if tries and e.response.get('Error', {}).get('Code') == 'Throttling': log.debug('Throttled by AWS API.') time.sleep(3) retries -= 1 continue log.error( 'Failed to update comment on hosted zone {0}: {1}'.format( Name or Id, str(e))) return []
def create_hosted_zone(Name, VPCId=None, VPCName=None, VPCRegion=None, CallerReference=None, Comment='', PrivateZone=False, DelegationSetId=None, region=None, key=None, keyid=None, profile=None): ''' Create a new Route53 Hosted Zone. Returns a Python data structure with information about the newly created Hosted Zone. Name The name of the domain. This should be a fully-specified domain, and should terminate with a period. This is the name you have registered with your DNS registrar. It is also the name you will delegate from your registrar to the Amazon Route 53 delegation servers returned in response to this request. VPCId When creating a private hosted zone, either the VPC ID or VPC Name to associate with is required. Exclusive with VPCName. Ignored if passed for a non-private zone. VPCName When creating a private hosted zone, either the VPC ID or VPC Name to associate with is required. Exclusive with VPCId. Ignored if passed for a non-private zone. VPCRegion When creating a private hosted zone, the region of the associated VPC is required. If not provided, an effort will be made to determine it from VPCId or VPCName, if possible. If this fails, you'll need to provide an explicit value for this option. Ignored if passed for a non-private zone. CallerReference A unique string that identifies the request and that allows create_hosted_zone() calls to be retried without the risk of executing the operation twice. This is a required parameter when creating new Hosted Zones. Maximum length of 128. Comment Any comments you want to include about the hosted zone. PrivateZone Boolean - Set to True if creating a private hosted zone. DelegationSetId If you want to associate a reusable delegation set with this hosted zone, the ID that Amazon Route 53 assigned to the reusable delegation set when you created it. Note that XXX TODO create_delegation_set() is not yet implemented, so you'd need to manually create any delegation sets before utilizing this. region Region endpoint to connect to. key AWS key to bind with. keyid AWS keyid to bind with. profile Dict, or pillar key pointing to a dict, containing AWS region/key/keyid. CLI Example:: salt myminion boto3_route53.create_hosted_zone example.org. ''' if not Name.endswith('.'): raise SaltInvocationError( 'Domain must be fully-qualified, complete with trailing period.') conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) deets = find_hosted_zone(Name=Name, PrivateZone=PrivateZone, region=region, key=key, keyid=keyid, profile=profile) if deets: log.info( 'Route 53 hosted zone {0} already exists. You may want to pass e.g. ' "'PrivateZone=True' or similar...".format(Name)) return None args = { 'Name': Name, 'CallerReference': CallerReference, 'HostedZoneConfig': { 'Comment': Comment, 'PrivateZone': PrivateZone } } args.update({'DelegationSetId': DelegationSetId }) if DelegationSetId else None if PrivateZone: if not _exactly_one((VPCName, VPCId)): raise SaltInvocationError( 'Either VPCName or VPCId is required when creating a ' 'private zone.') vpcs = __salt__['boto_vpc.describe_vpcs'](vpc_id=VPCId, name=VPCName, region=region, key=key, keyid=keyid, profile=profile).get( 'vpcs', []) if VPCRegion and vpcs: vpcs = [v for v in vpcs if v['region'] == VPCRegion] if not vpcs: log.error( 'Private zone requested but no VPC matching given criteria found.' ) return None if len(vpcs) > 1: log.error( 'Private zone requested but multiple VPCs matching given criteria found: ' '{0}.'.format([v['id'] for v in vpcs])) return None vpc = vpcs[0] if VPCName: VPCId = vpc['id'] if not VPCRegion: VPCRegion = vpc['region'] args.update({'VPC': {'VPCId': VPCId, 'VPCRegion': VPCRegion}}) else: if any((VPCId, VPCName, VPCRegion)): log.info( 'Options VPCId, VPCName, and VPCRegion are ignored when creating ' 'non-private zones.') tries = 10 while tries: try: r = conn.create_hosted_zone(**args) r.pop('ResponseMetadata', None) if _wait_for_sync(r['ChangeInfo']['Id'], conn): return [r] return [] except ClientError as e: if tries and e.response.get('Error', {}).get('Code') == 'Throttling': log.debug('Throttled by AWS API.') time.sleep(3) retries -= 1 continue log.error('Failed to create hosted zone {0}: {1}'.format( Name, str(e))) return [] return []
def find_hosted_zone(Id=None, Name=None, PrivateZone=None, region=None, key=None, keyid=None, profile=None): ''' Find a hosted zone with the given characteristics. Id The unique Zone Identifier for the Hosted Zone. Exclusive with Name. Name The domain name associated with the Hosted Zone. Exclusive with Id. Note this has the potential to match more then one hosted zone (e.g. a public and a private if both exist) which will raise an error unless PrivateZone has also been passed in order split the different. PrivateZone Boolean - Set to True if searching for a private hosted zone. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile Dict, or pillar key pointing to a dict, containing AWS region/key/keyid. CLI Example: .. code-block:: bash salt myminion boto3_route53.find_hosted_zone Name=salt.org. \ profile='{"region": "us-east-1", "keyid": "A12345678AB", "key": "xblahblahblah"}' ''' if not _exactly_one((Id, Name)): raise SaltInvocationError( 'Exactly one of either Id or Name is required.') if PrivateZone is not None and not isinstance(PrivateZone, bool): raise SaltInvocationError( 'If set, PrivateZone must be a bool (e.g. True / False).') if Id: ret = get_hosted_zone(Id, region=region, key=key, keyid=keyid, profile=profile) else: ret = get_hosted_zones_by_domain(Name, region=region, key=key, keyid=keyid, profile=profile) if PrivateZone is not None: ret = [ m for m in ret if m['HostedZone']['Config']['PrivateZone'] is PrivateZone ] if len(ret) > 1: log.error( 'Request matched more than one Hosted Zone ({0}). Refine your criteria and try ' 'again.'.format([z['HostedZone']['Id'] for z in ret])) ret = [] return ret
def _publish(tgt, fun, arg=None, expr_form='glob', returner='', timeout=5, form='clean', wait=False, via_master=None): ''' Publish a command from the minion out to other minions, publications need to be enabled on the Salt master and the minion needs to have permission to publish the command. The Salt master will also prevent a recursive publication loop, this means that a minion cannot command another minion to command another minion as that would create an infinite command loop. The arguments sent to the minion publish function are separated with commas. This means that for a minion executing a command with multiple args it will look like this:: salt system.example.com publish.publish '*' user.add 'foo,1020,1020' CLI Example: .. code-block:: bash salt system.example.com publish.publish '*' cmd.run 'ls -la /tmp' ''' if 'master_uri' not in __opts__: log.error( 'Cannot run publish commands without a connection to a salt master. No command sent.' ) return {} if fun.startswith('publish.'): log.info('Cannot publish publish calls. Returning {}') return {} arg = _parse_args(arg) if via_master: if 'master_uri_list' not in __opts__: raise SaltInvocationError(message='Could not find list of masters \ in minion configuration but `via_master` was specified.') else: # Find the master in the list of master_uris generated by the minion base class matching_master_uris = [ master for master in __opts__['master_uri_list'] if '//{0}:'.format(via_master) in master ] if not matching_master_uris: raise SaltInvocationError('Could not find match for {0} in \ list of configured masters {1} when using `via_master` option'. format(via_master, __opts__['master_uri_list'])) if len(matching_master_uris) > 1: # If we have multiple matches, consider this a non-fatal error # and continue with whatever we found first. log.warning('The `via_master` flag found \ more than one possible match found for {0} when evaluating \ list {1}'.format(via_master, __opts__['master_uri_list'])) master_uri = matching_master_uris.pop() else: # If no preference is expressed by the user, just publish to the first master # in the list. master_uri = __opts__['master_uri'] log.info('Publishing \'{0}\' to {1}'.format(fun, master_uri)) auth = salt.crypt.SAuth(__opts__) tok = auth.gen_token('salt') load = { 'cmd': 'minion_pub', 'fun': fun, 'arg': arg, 'tgt': tgt, 'tgt_type': expr_form, 'ret': returner, 'tok': tok, 'tmo': timeout, 'form': form, 'id': __opts__['id'] } channel = salt.transport.Channel.factory(__opts__, master_uri=master_uri) try: peer_data = channel.send(load) except SaltReqTimeoutError: return '\'{0}\' publish timed out'.format(fun) if not peer_data: return {} # CLI args are passed as strings, re-cast to keep time.sleep happy if wait: loop_interval = 0.3 matched_minions = set(peer_data['minions']) returned_minions = set() loop_counter = 0 while len(returned_minions ^ matched_minions) > 0: load = { 'cmd': 'pub_ret', 'id': __opts__['id'], 'tok': tok, 'jid': peer_data['jid'] } ret = channel.send(load) returned_minions = set(ret.keys()) end_loop = False if returned_minions >= matched_minions: end_loop = True elif (loop_interval * loop_counter) > timeout: # This may be unnecessary, but I am paranoid if len(returned_minions) < 1: return {} end_loop = True if end_loop: if form == 'clean': cret = {} for host in ret: cret[host] = ret[host]['ret'] return cret else: return ret loop_counter = loop_counter + 1 time.sleep(loop_interval) else: time.sleep(float(timeout)) load = { 'cmd': 'pub_ret', 'id': __opts__['id'], 'tok': tok, 'jid': peer_data['jid'] } ret = channel.send(load) if form == 'clean': cret = {} for host in ret: cret[host] = ret[host]['ret'] return cret else: return ret
def set_target(alias, target, type=None, descr=None, detail=None): ''' Set the entry in the aliases file for the given alias, this will overwrite any previous entry for the given alias or create a new one if it does not exist. CLI Example: .. code-block:: bash salt '*' pfsense_aliases.set_target alias target ''' if alias == '': raise SaltInvocationError('alias can not be an empty string') if target == '': raise SaltInvocationError('target can not be an empty string') targets = [] if isinstance(target, list): targets = target else: targets = [target] is_already_ok = True current_targets = get_target(alias) for target in targets: if target not in current_targets: is_already_ok = False break if is_already_ok: return True client = _get_client() config = client.config_get() new_aliases = [] to_add = True if 'alias' in config['aliases']: for current_alias in config['aliases']['alias']: if current_alias['name'] == alias: to_add = False current_alias['address'] = ' '.join(targets) if descr: current_alias['descr'] = descr if detail: current_alias['detail'] = detail if type and type != current_alias['type']: raise CommandExecutionError('You ask for type {0} but already present as {1}'.format(type, current_alias['type'])) new_aliases.append(current_alias) if to_add: if type not in ['port', 'network', 'host']: raise SaltInvocationError('type is not correct') new_alias = { 'name': alias, 'address': ' '.join(targets), 'type': type } if descr: new_alias['descr'] = descr if detail: new_alias['detail'] = detail new_aliases.append(new_alias) if 'alias' not in config['aliases']: config['aliases'] = {'alias': new_aliases} else: config['aliases']['alias'] = new_aliases result = client.config_set(config) if 'message' not in result: raise CommandExecutionError('Problem when updating alias') elif result['message'] != 'ok': logger.warning(result) raise CommandExecutionError('Problem when updating alias') return True
def decrypt(user=None, text=None, filename=None, output=None, use_passphrase=False, gnupghome=None, bare=False): ''' Decrypt a message or file user Which user's keychain to access, defaults to user Salt is running as. Passing the user as 'salt' will set the GPG home directory to /etc/salt/gpgkeys. text The encrypted text to decrypt. filename The encrypted filename to decrypt. output The filename where the decrypted data will be written, default is standard out. use_passphrase Whether to use a passphrase with the signing key. Passphrase is received from pillar. gnupghome Specify the location where GPG related files are stored. bare If True, return the (armored) decrypted block as a string without the standard comment/res dict CLI Example: .. code-block:: bash salt '*' gpg.decrypt filename='/path/to/important.file.gpg' salt '*' gpg.decrypt filename='/path/to/important.file.gpg' use_pasphrase=True ''' ret = {'res': True, 'comment': ''} gpg = _create_gpg(user, gnupghome) if use_passphrase: gpg_passphrase = __salt__['pillar.item']('gpg_passphrase') if not gpg_passphrase: raise SaltInvocationError( 'gpg_passphrase not available in pillar.') gpg_passphrase = gpg_passphrase['gpg_passphrase'] else: gpg_passphrase = None if text: result = gpg.decrypt(text, passphrase=gpg_passphrase) elif filename: with salt.utils.flopen(filename, 'rb') as _fp: if output: result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase, output=output) else: result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase) else: raise SaltInvocationError('filename or text must be passed.') if result.ok: if not bare: if output: ret['comment'] = 'Decrypted data has been written to {0}'.format( output) else: ret['comment'] = result.data else: ret = result.data else: if not bare: ret['res'] = False ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format( result.status) else: ret = False log.error(result.stderr) return ret
def subnet_group_present(name, description, subnet_ids=None, subnet_names=None, tags=None, region=None, key=None, keyid=None, profile=None): ''' Ensure DB subnet group exists. name The name for the DB subnet group. This value is stored as a lowercase string. subnet_ids A list of the EC2 Subnet IDs for the DB subnet group. Either subnet_ids or subnet_names must be provided. subnet_names A list of The EC2 Subnet names for the DB subnet group. Either subnet_ids or subnet_names must be provided. description Subnet group description. tags A dict of tags. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.exactly_one((subnet_ids, subnet_names)): raise SaltInvocationError('One (but not both) of subnet_ids or ' 'subnet_names must be provided.') ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not subnet_ids: subnet_ids = [] if subnet_names: for i in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', name=i, region=region, key=key, keyid=keyid, profile=profile) if 'error' in r: ret['comment'] = 'Error looking up subnet ids: {0}'.format( r['error']['message']) ret['result'] = False return ret if r['id'] is None: ret['comment'] = 'Subnet {0} does not exist.'.format(i) ret['result'] = False return ret subnet_ids.append(r['id']) exists = __salt__['boto_rds.subnet_group_exists'](name=name, tags=tags, region=region, key=key, keyid=keyid, profile=profile) if not exists.get('exists'): if __opts__['test']: ret['comment'] = 'Subnet group {0} is set to be created.'.format( name) ret['result'] = None return ret created = __salt__['boto_rds.create_subnet_group']( name=name, description=description, subnet_ids=subnet_ids, tags=tags, region=region, key=key, keyid=keyid, profile=profile) if not created: ret['result'] = False ret['comment'] = 'Failed to create {0} subnet group.'.format(name) return ret ret['changes']['old'] = None ret['changes']['new'] = name ret['comment'] = 'Subnet {0} created.'.format(name) return ret else: ret['comment'] = 'Subnet {0} present.'.format(name) return ret
def sign(user=None, keyid=None, text=None, filename=None, output=None, use_passphrase=False, gnupghome=None): ''' Sign message or file user Which user's keychain to access, defaults to user Salt is running as. Passing the user as 'salt' will set the GPG home directory to /etc/salt/gpgkeys. keyid The keyid of the key to set the trust level for, defaults to first key in the secret keyring. text The text to sign. filename The filename to sign. output The filename where the signed file will be written, default is standard out. use_passphrase Whether to use a passphrase with the signing key. Passphrase is received from pillar. gnupghome Specify the location where GPG related files are stored. CLI Example: .. code-block:: bash salt '*' gpg.sign text='Hello there. How are you?' salt '*' gpg.sign filename='/path/to/important.file' salt '*' gpg.sign filename='/path/to/important.file' use_pasphrase=True ''' gpg = _create_gpg(user, gnupghome) if use_passphrase: gpg_passphrase = __salt__['pillar.item']('gpg_passphrase') if not gpg_passphrase: raise SaltInvocationError( 'gpg_passphrase not available in pillar.') else: gpg_passphrase = None # Check for at least one secret key to sign with gnupg_version = distutils.version.LooseVersion(gnupg.__version__) if text: if gnupg_version >= '1.3.1': signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase) else: signed_data = gpg.sign(text, keyid=keyid, passphrase=gpg_passphrase) elif filename: with salt.utils.flopen(filename, 'rb') as _fp: if gnupg_version >= '1.3.1': signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase) else: signed_data = gpg.sign_file(_fp, keyid=keyid, passphrase=gpg_passphrase) if output: with salt.utils.flopen(output, 'w') as fout: fout.write(signed_data.data) else: raise SaltInvocationError('filename or text must be passed.') return signed_data.data
def create_function(FunctionName, Runtime, Role, Handler, ZipFile=None, S3Bucket=None, S3Key=None, S3ObjectVersion=None, Description="", Timeout=3, MemorySize=128, Publish=False, WaitForRole=False, RoleRetries=5, region=None, key=None, keyid=None, profile=None): ''' Given a valid config, create a function. Returns {created: true} if the function was created and returns {created: False} if the function was not created. CLI Example: .. code-block:: bash salt myminion boto_lamba.create_function my_function python2.7 my_role my_file.my_function my_function.zip ''' role_arn = _get_role_arn(Role, region=region, key=key, keyid=keyid, profile=profile) try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if ZipFile: if S3Bucket or S3Key or S3ObjectVersion: raise SaltInvocationError( 'Either ZipFile must be specified, or ' 'S3Bucket and S3Key must be provided.') code = { 'ZipFile': _filedata(ZipFile), } else: if not S3Bucket or not S3Key: raise SaltInvocationError( 'Either ZipFile must be specified, or ' 'S3Bucket and S3Key must be provided.') code = { 'S3Bucket': S3Bucket, 'S3Key': S3Key, } if S3ObjectVersion: code['S3ObjectVersion'] = S3ObjectVersion if WaitForRole: retrycount = RoleRetries else: retrycount = 1 for retry in range(retrycount, 0, -1): try: func = conn.create_function(FunctionName=FunctionName, Runtime=Runtime, Role=role_arn, Handler=Handler, Code=code, Description=Description, Timeout=Timeout, MemorySize=MemorySize, Publish=Publish) except ClientError as e: if retry > 1 and e.response.get( 'Error', {}).get('Code') == 'InvalidParameterValueException': log.info( 'Function not created but IAM role may not have propagated, will retry' ) # exponential backoff time.sleep((2**(RoleRetries - retry)) + (random.randint(0, 1000) / 1000)) continue else: raise else: break if func: log.info('The newly created function name is {0}'.format( func['FunctionName'])) return {'created': True, 'name': func['FunctionName']} else: log.warning('Function was not created') return {'created': False} except ClientError as e: return {'created': False, 'error': salt.utils.boto3.get_error(e)}
def set_agent_settings(contact=None, location=None, services=None): """ Manage the SNMP sysContact, sysLocation, and sysServices settings. Args: contact (str, optional): The SNMP contact. location (str, optional): The SNMP location. services (list, optional): A list of selected services. The possible service names can be found via ``win_snmp.get_agent_service_types``. To disable all services pass a list of None, ie: ['None'] Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' win_snmp.set_agent_settings contact='Contact Name' location='Place' services="['Physical']" """ if services is not None: # Filter services for unique items, and sort them for comparison # purposes. services = sorted(set(services)) # Validate the services. for service in services: if service not in _SERVICE_TYPES: message = "Invalid service '{}' specified. Valid services: {}".format( service, get_agent_service_types() ) raise SaltInvocationError(message) settings = {"contact": contact, "location": location, "services": services} current_settings = get_agent_settings() if settings == current_settings: _LOG.debug("Agent settings already contain the provided values.") return True if contact is not None: if contact != current_settings["contact"]: __utils__["reg.set_value"]( _HKEY, _AGENT_KEY, "sysContact", contact, "REG_SZ" ) if location is not None: if location != current_settings["location"]: __utils__["reg.set_value"]( _HKEY, _AGENT_KEY, "sysLocation", location, "REG_SZ" ) if services is not None: if set(services) != set(current_settings["services"]): # Calculate the total value. Produces 0 if an empty list was provided, # corresponding to the None _SERVICE_TYPES value. vdata = sum(_SERVICE_TYPES[service] for service in services) _LOG.debug("Setting sysServices vdata to: %s", vdata) __utils__["reg.set_value"]( _HKEY, _AGENT_KEY, "sysServices", vdata, "REG_DWORD" ) # Get the fields post-change so that we can verify tht all values # were modified successfully. Track the ones that weren't. new_settings = get_agent_settings() failed_settings = dict() for setting in settings: if settings[setting] is not None and settings[setting] != new_settings[setting]: failed_settings[setting] = settings[setting] if failed_settings: _LOG.error("Unable to configure agent settings: %s", failed_settings) return False _LOG.debug("Agent settings configured successfully: %s", settings.keys()) return True
def update_function_code(FunctionName, ZipFile=None, S3Bucket=None, S3Key=None, S3ObjectVersion=None, Publish=False, region=None, key=None, keyid=None, profile=None): ''' Upload the given code to the named lambda function. Returns {updated: true} if the function was updated and returns {updated: False} if the function was not updated. CLI Example: .. code-block:: bash salt myminion boto_lamba.update_function_code my_function ZipFile=function.zip ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: if ZipFile: if S3Bucket or S3Key or S3ObjectVersion: raise SaltInvocationError( 'Either ZipFile must be specified, or ' 'S3Bucket and S3Key must be provided.') r = conn.update_function_code(FunctionName=FunctionName, ZipFile=_filedata(ZipFile), Publish=Publish) else: if not S3Bucket or not S3Key: raise SaltInvocationError( 'Either ZipFile must be specified, or ' 'S3Bucket and S3Key must be provided.') args = { 'S3Bucket': S3Bucket, 'S3Key': S3Key, } if S3ObjectVersion: args['S3ObjectVersion'] = S3ObjectVersion r = conn.update_function_code(FunctionName=FunctionName, Publish=Publish, **args) if r: keys = ('FunctionName', 'Runtime', 'Role', 'Handler', 'CodeSha256', 'CodeSize', 'Description', 'Timeout', 'MemorySize', 'FunctionArn', 'LastModified') return { 'updated': True, 'function': dict([(k, r.get(k)) for k in keys]) } else: log.warning('Function was not updated') return {'updated': False} except ClientError as e: return {'updated': False, 'error': salt.utils.boto3.get_error(e)}
def set_lcm_config(config_mode=None, config_mode_freq=None, refresh_freq=None, reboot_if_needed=None, action_after_reboot=None, refresh_mode=None, certificate_id=None, configuration_id=None, allow_module_overwrite=None, debug_mode=False, status_retention_days=None): ''' For detailed descriptions of the parameters see: https://msdn.microsoft.com/en-us/PowerShell/DSC/metaConfig Args: config_mode (str): How the LCM applies the configuration. Valid values are: - ApplyOnly - ApplyAndMonitor - ApplyAndAutoCorrect config_mode_freq (int): How often, in minutes, the current configuration is checked and applied. Ignored if config_mode is set to ApplyOnly. Default is 15. refresh_mode (str): How the LCM gets configurations. Valid values are: - Disabled - Push - Pull refresh_freq (int): How often, in minutes, the LCM checks for updated configurations. (pull mode only) Default is 30. .. note:: Either `config_mode_freq` or `refresh_freq` needs to be a multiple of the other. See documentation on MSDN for more details. reboot_if_needed (bool): Reboot the machine if needed after a configuration is applied. Default is False. action_after_reboot (str): Action to take after reboot. Valid values are: - ContinueConfiguration - StopConfiguration certificate_id (guid): A GUID that specifies a certificate used to access the configuration: (pull mode) configuration_id (guid): A GUID that identifies the config file to get from a pull server. (pull mode) allow_module_overwrite (bool): New configs are allowed to overwrite old ones on the target node. debug_mode (str): Sets the debug level. Valid values are: - None - ForceModuleImport - All status_retention_days (int): Number of days to keep status of the current config. Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' dsc.set_lcm_config ApplyOnly ''' temp_dir = os.getenv('TEMP', '{0}\\temp'.format(os.getenv('WINDIR'))) cmd = 'Configuration SaltConfig {' cmd += ' Node localhost {' cmd += ' LocalConfigurationManager {' if config_mode: if config_mode not in ('ApplyOnly', 'ApplyAndMonitor', 'ApplyAndAutoCorrect'): error = 'config_mode must be one of ApplyOnly, ApplyAndMonitor, ' \ 'or ApplyAndAutoCorrect. Passed {0}'.format(config_mode) SaltInvocationError(error) return error cmd += ' ConfigurationMode = "{0}";'.format(config_mode) if config_mode_freq: if not isinstance(config_mode_freq, int): SaltInvocationError('config_mode_freq must be an integer') return 'config_mode_freq must be an integer. Passed {0}'.\ format(config_mode_freq) cmd += ' ConfigurationModeFrequencyMins = {0};'.format(config_mode_freq) if refresh_mode: if refresh_mode not in ('Disabled', 'Push', 'Pull'): SaltInvocationError('refresh_mode must be one of Disabled, Push, ' 'or Pull') cmd += ' RefreshMode = "{0}";'.format(refresh_mode) if refresh_freq: if not isinstance(refresh_freq, int): SaltInvocationError('refresh_freq must be an integer') cmd += ' RefreshFrequencyMins = {0};'.format(refresh_freq) if reboot_if_needed is not None: if not isinstance(reboot_if_needed, bool): SaltInvocationError('reboot_if_needed must be a boolean value') if reboot_if_needed: reboot_if_needed = '$true' else: reboot_if_needed = '$false' cmd += ' RebootNodeIfNeeded = {0};'.format(reboot_if_needed) if action_after_reboot: if action_after_reboot not in ('ContinueConfiguration', 'StopConfiguration'): SaltInvocationError('action_after_reboot must be one of ' 'ContinueConfiguration or StopConfiguration') cmd += ' ActionAfterReboot = "{0}"'.format(action_after_reboot) if certificate_id is not None: if certificate_id == '': certificate_id = None cmd += ' CertificateID = "{0}";'.format(certificate_id) if configuration_id is not None: if configuration_id == '': configuration_id = None cmd += ' ConfigurationID = "{0}";'.format(configuration_id) if allow_module_overwrite is not None: if not isinstance(allow_module_overwrite, bool): SaltInvocationError('allow_module_overwrite must be a boolean value') if allow_module_overwrite: allow_module_overwrite = '$true' else: allow_module_overwrite = '$false' cmd += ' AllowModuleOverwrite = {0};'.format(allow_module_overwrite) if debug_mode is not False: if debug_mode is None: debug_mode = 'None' if debug_mode not in ('None', 'ForceModuleImport', 'All'): SaltInvocationError('debug_mode must be one of None, ' 'ForceModuleImport, ResourceScriptBreakAll, or ' 'All') cmd += ' DebugMode = "{0}";'.format(debug_mode) if status_retention_days: if not isinstance(status_retention_days, int): SaltInvocationError('status_retention_days must be an integer') cmd += ' StatusRetentionTimeInDays = {0};'.format(status_retention_days) cmd += ' }}};' cmd += r'SaltConfig -OutputPath "{0}\SaltConfig"'.format(temp_dir) # Execute Config to create the .mof _pshell(cmd) # Apply the config cmd = r'Set-DscLocalConfigurationManager -Path "{0}\SaltConfig"' \ r''.format(temp_dir) ret = __salt__['cmd.run_all'](cmd, shell='powershell', python_shell=True) __salt__['file.remove'](r'{0}\SaltConfig'.format(temp_dir)) if not ret['retcode']: log.info('LCM config applied successfully') return True else: log.error('Failed to apply LCM config. Error {0}'.format(ret)) return False
def volume_absent(name, volume_name=None, volume_id=None, instance_name=None, instance_id=None, device=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 volume is detached and absent. .. versionadded:: 2016.11.0 name State definition name. volume_name Name tag associated with the volume. For safety, if this matches more than one volume, the state will refuse to apply. volume_id Resource ID of the volume. instance_name Only remove volume if it is attached to instance with this Name tag. Exclusive with 'instance_id'. Requires 'device'. instance_id Only remove volume if it is attached to this instance. Exclusive with 'instance_name'. Requires 'device'. device Match by device rather than ID. Requires one of 'instance_name' or 'instance_id'. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } filters = {} running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') if not salt.utils.exactly_one((volume_name, volume_id, instance_name, instance_id)): raise SaltInvocationError("Exactly one of 'volume_name', 'volume_id', " "'instance_name', or 'instance_id' must be provided.") if (instance_name or instance_id) and not device: raise SaltInvocationError("Parameter 'device' is required when either " "'instance_name' or 'instance_id' is specified.") if volume_id: filters.update({'volume-id': volume_id}) if volume_name: filters.update({'tag:Name': volume_name}) if instance_name: instance_id = __salt__['boto_ec2.get_id']( name=instance_name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not instance_id: ret['comment'] = ('Instance with Name {0} not found. Assuming ' 'associated volumes gone.'.format(instance_name)) return ret if instance_id: filters.update({'attachment.instance-id': instance_id}) if device: filters.update({'attachment.device': device}) args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} vols = __salt__['boto_ec2.get_all_volumes'](filters=filters, **args) if len(vols) < 1: ret['comment'] = 'Volume matching criteria not found, assuming already absent' return ret if len(vols) > 1: msg = "More than one volume matched criteria, can't continue in state {0}".format(name) log.error(msg) ret['comment'] = msg ret['result'] = False return ret vol = vols[0] log.info('Matched Volume ID {0}'.format(vol)) if __opts__['test']: ret['comment'] = 'The volume {0} is set to be deleted.'.format(vol) ret['result'] = None return ret if __salt__['boto_ec2.delete_volume'](volume_id=vol, force=True, **args): ret['comment'] = 'Volume {0} deleted.'.format(vol) ret['changes'] = {'old': {'volume_id': vol}, 'new': {'volume_id': None}} else: ret['comment'] = 'Error deleting volume {0}.'.format(vol) ret['result'] = False return ret
def apply_policies(policies=None, logfile=True, errorfile=True): r""" Apply a policy that manages Local Group Policy Objects. :param policies: A policy dictionary, or a list of policy dictionaries. Each policy dictionary must be of one of the forms below: { 'policy_type' : 'regpol', 'key' : '<hive>\path\to\registry\key\value_name', 'value' : 'value of the registry key', 'vtype' : 'DWORD' | 'SZ' | 'EXSZ' } -OR- { 'policy_type' : 'regpol', 'key' : '<hive>\path\to\registry\key\name', 'action' : 'DELETE' | 'DELETEALLVALUES' | 'CREATEKEY' } -OR- { 'policy_type' : 'secedit', 'name' : 'name of the secedit inf setting', 'value' : 'value to apply to the setting' } Policy dictionaries support the same aliases as the individual policy parameters. See ``lgpo.set_registry_value`` for the aliases. :param logfile: The path to the log file where the results of applying the policy will be saved. If set to ``True`` (the Default), then the log file will be created in the system temp directory. If set to ``False``, then no log file will be created. :param errorfile: The path to the error file where errors resulting from applying the policy will be saved. If set to ``True`` (the Default), then the error file will be created in the system temp directory. If set to ``False``, then no error file will be created. CLI Examples: .. code-block:: bash policies="[{'policy_type':'regpol', \ 'key':'HKLM\Software\Salt\Policies\Foo', \ 'value':'0', \ 'vtype':'DWORD'}]" salt '*' lgpo.apply_policies policies="${policies}" """ valid_policies, reason, policy = validate_policies(policies) if not valid_policies: raise SaltInvocationError('{0}; policy={1}'.format(reason, policy)) policy_files = _write_policy_files(valid_policies) command = ' '.join([LGPO_EXE, policy_files.get('regpol', ''), policy_files.get('secedit', '')]) if logfile is True: logfile = mkstemp(prefix='lgpo_', suffix='.log') if logfile: try: __salt__['file.makedirs'](path=logfile) except Exception as exc: raise CommandExecutionError('Error creating directory for logfile ' '"{0}". Exception: {1}'.format(logfile, exc)) log.info('LGPO log file is "{0}"'.format(logfile)) command = ' '.join([command, '/log', logfile]) if errorfile is True: errorfile = mkstemp(prefix='lgpo_', suffix='.err') if errorfile: try: __salt__['file.makedirs'](path=errorfile) except Exception as exc: raise CommandExecutionError('Error creating directory for ' 'errorfile "{0}". Exception: {1}' .format(errorfile, exc)) log.info('LGPO error file is "{0}"'.format(errorfile)) command = ' '.join([command, '/error', errorfile]) log.info('Applying LGPO policies') log.debug('LGPO policy data: {0}'.format(valid_policies)) try: ret = __salt__['cmd.retcode'](command, python_shell=False) except Exception as exc: raise CommandExecutionError('Error applying LGPO policy template ' '"{0}". Exception: {1}' .format(valid_policies, exc)) if errorfile and os.path.getsize(errorfile) > 0: raise CommandExecutionError( 'Encountered errors processing the LGPO policy template. See the ' 'error file for details -- {0}'.format(errorfile)) if ret: raise CommandExecutionError('Non-zero exit [{0}] from {1}. We do not ' 'know what this means. Hopefully the ' 'error log contains details -- {2}' .format(ret, LGPO_EXE, errorfile)) for policy_file in policy_files.values(): if os.path.isfile(policy_file): os.remove(policy_file) return valid_policies
def eni_present( name, subnet_id=None, subnet_name=None, private_ip_address=None, description=None, groups=None, source_dest_check=True, allocate_eip=None, arecords=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 ENI exists. .. versionadded:: 2016.3.0 name Name tag associated with the ENI. subnet_id The VPC subnet ID the ENI will exist within. subnet_name The VPC subnet name the ENI will exist within. private_ip_address The private ip address to use for this ENI. If this is not specified AWS will automatically assign a private IP address to the ENI. Must be specified at creation time; will be ignored afterward. description Description of the key. groups A list of security groups to apply to the ENI. source_dest_check Boolean specifying whether source/destination checking is enabled on the ENI. allocate_eip allocate and associate an EIP to the ENI. Could be 'standard' to allocate Elastic IP to EC2 region or 'vpc' to get it for a particular VPC .. versionchanged:: 2016.11.0 arecords A list of arecord dicts with attributes needed for the DNS add_record state. By default the boto_route53.add_record state will be used, which requires: name, zone, ttl, and identifier. See the boto_route53 state for information about these attributes. Other DNS modules can be called by specifying the provider keyword. By default, the private ENI IP address will be used, set 'public: True' in the arecord dict to use the ENI's public IP address .. versionadded:: 2016.3.0 region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.exactly_one((subnet_id, subnet_name)): raise SaltInvocationError('One (but not both) of subnet_id or ' 'subnet_name must be provided.') if not groups: raise SaltInvocationError('groups is a required argument.') if not isinstance(groups, list): raise SaltInvocationError('groups must be a list.') if not isinstance(source_dest_check, bool): raise SaltInvocationError('source_dest_check must be a bool.') ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} r = __salt__['boto_ec2.get_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in r: ret['result'] = False ret['comment'] = 'Error when attempting to find eni: {0}.'.format( r['error']['message'] ) return ret if not r['result']: if __opts__['test']: ret['comment'] = 'ENI is set to be created.' if allocate_eip: ret['comment'] = ' '.join([ret['comment'], 'An EIP is set to be allocated/assocaited to the ENI.']) if arecords: ret['comment'] = ' '.join([ret['comment'], 'A records are set to be created.']) ret['result'] = None return ret result_create = __salt__['boto_ec2.create_network_interface']( name, subnet_id=subnet_id, subnet_name=subnet_name, private_ip_address=private_ip_address, description=description, groups=groups, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_create: ret['result'] = False ret['comment'] = 'Failed to create ENI: {0}'.format( result_create['error']['message'] ) return ret r['result'] = result_create['result'] ret['comment'] = 'Created ENI {0}'.format(name) ret['changes']['id'] = r['result']['id'] else: _ret = _eni_attribute( r['result'], 'description', description, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = _ret['comment'] if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _eni_groups( r['result'], groups, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret # Actions that need to occur whether creating or updating _ret = _eni_attribute( r['result'], 'source_dest_check', source_dest_check, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret if allocate_eip: if 'allocationId' not in r['result']: if __opts__['test']: ret['comment'] = ' '.join([ret['comment'], 'An EIP is set to be allocated and assocaited to the ENI.']) else: domain = 'vpc' if allocate_eip == 'vpc' else None eip_alloc = __salt__['boto_ec2.allocate_eip_address'](domain=domain, region=region, key=key, keyid=keyid, profile=profile) if eip_alloc: _ret = __salt__['boto_ec2.associate_eip_address'](instance_id=None, instance_name=None, public_ip=None, allocation_id=eip_alloc['allocation_id'], network_interface_id=r['result']['id'], private_ip_address=None, allow_reassociation=False, region=region, key=key, keyid=keyid, profile=profile) if not _ret: _ret = __salt__['boto_ec2.release_eip_address'](public_ip=None, allocation_id=eip_alloc['allocation_id'], region=region, key=key, keyid=keyid, profile=profile) ret['result'] = False msg = 'Failed to assocaite the allocated EIP address with the ENI. The EIP {0}'.format('was successfully released.' if _ret else 'was NOT RELEASED.') ret['comment'] = ' '.join([ret['comment'], msg]) return ret else: ret['result'] = False ret['comment'] = ' '.join([ret['comment'], 'Failed to allocate an EIP address']) return ret else: ret['comment'] = ' '.join([ret['comment'], 'An EIP is already allocated/assocaited to the ENI']) if arecords: for arecord in arecords: if 'name' not in arecord: msg = 'The arecord must contain a "name" property.' raise SaltInvocationError(msg) log.debug('processing arecord {0}'.format(arecord)) _ret = None dns_provider = 'boto_route53' arecord['record_type'] = 'A' public_ip_arecord = False if 'public' in arecord: public_ip_arecord = arecord.pop('public') if public_ip_arecord: if 'publicIp' in r['result']: arecord['value'] = r['result']['publicIp'] elif 'public_ip' in eip_alloc: arecord['value'] = eip_alloc['public_ip'] else: msg = 'Unable to add an A record for the public IP address, a public IP address does not seem to be allocated to this ENI.' raise CommandExecutionError(msg) else: arecord['value'] = r['result']['private_ip_address'] if 'provider' in arecord: dns_provider = arecord.pop('provider') if dns_provider == 'boto_route53': if 'profile' not in arecord: arecord['profile'] = profile if 'key' not in arecord: arecord['key'] = key if 'keyid' not in arecord: arecord['keyid'] = keyid if 'region' not in arecord: arecord['region'] = region _ret = __states__['.'.join([dns_provider, 'present'])](**arecord) log.debug('ret from dns_provider.present = {0}'.format(_ret)) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret return ret
def copy_to(name, source, dest, container_type=None, exec_driver=None, overwrite=False, makedirs=False): ''' Common logic for copying files to containers CLI Example: .. code-block:: salt myminion container_resource.copy_to mycontainer /local/file/path /container/file/path container_type=docker exec_driver=nsenter ''' # Get the appropriate functions state = __salt__['{0}.state'.format(container_type)] run_all = __salt__['{0}.run_all'.format(container_type)] c_state = state(name) if c_state != 'running': raise CommandExecutionError( 'Container \'{0}\' is not running'.format(name)) local_file = cache_file(source) source_dir, source_name = os.path.split(local_file) # Source file sanity checks if not os.path.isabs(local_file): raise SaltInvocationError('Source path must be absolute') elif not os.path.exists(local_file): raise SaltInvocationError( 'Source file {0} does not exist'.format(local_file)) elif not os.path.isfile(local_file): raise SaltInvocationError('Source must be a regular file') # Destination file sanity checks if not os.path.isabs(dest): raise SaltInvocationError('Destination path must be absolute') if run_all(name, 'test -d {0}'.format(pipes.quote(dest)), ignore_retcode=True)['retcode'] == 0: # Destination is a directory, full path to dest file will include the # basename of the source file. dest = os.path.join(dest, source_name) else: # Destination was not a directory. We will check to see if the parent # dir is a directory, and then (if makedirs=True) attempt to create the # parent directory. dest_dir, dest_name = os.path.split(dest) if run_all(name, 'test -d {0}'.format(pipes.quote(dest_dir)), ignore_retcode=True)['retcode'] != 0: if makedirs: result = run_all(name, 'mkdir -p {0}'.format(pipes.quote(dest_dir))) if result['retcode'] != 0: error = ('Unable to create destination directory {0} in ' 'container \'{1}\''.format(dest_dir, name)) if result['stderr']: error += ': {0}'.format(result['stderr']) raise CommandExecutionError(error) else: raise SaltInvocationError( 'Directory {0} does not exist on {1} container \'{2}\''. format(dest_dir, container_type, name)) if not overwrite and run_all(name, 'test -e {0}'.format(pipes.quote(dest)), ignore_retcode=True)['retcode'] == 0: raise CommandExecutionError( 'Destination path {0} already exists. Use overwrite=True to ' 'overwrite it'.format(dest)) # Before we try to replace the file, compare checksums. source_md5 = __salt__['file.get_sum'](local_file, 'md5') if source_md5 == _get_md5(name, dest, run_all): log.debug('{0} and {1}:{2} are the same file, skipping copy'.format( source, name, dest)) return True log.debug('Copying {0} to {1} container \'{2}\' as {3}'.format( source, container_type, name, dest)) # Using cat here instead of opening the file, reading it into memory, # and passing it as stdin to run(). This will keep down memory # usage for the minion and make the operation run quicker. if exec_driver == 'lxc-attach': copy_cmd = ( 'cat "{0}" | lxc-attach --clear-env --set-var {1} -n {2} -- ' 'tee "{3}"'.format(local_file, PATH, name, dest)) elif exec_driver == 'nsenter': pid = __salt__['{0}.pid'.format(container_type)](name) copy_cmd = ('cat "{0}" | {1} env -i {2} tee "{3}"'.format( local_file, _nsenter(pid), PATH, dest)) elif exec_driver == 'docker-exec': copy_cmd = ( 'cat "{0}" | docker exec -i {1} env -i {2} tee "{3}"'.format( local_file, name, PATH, dest)) __salt__['cmd.run'](copy_cmd, python_shell=True, output_loglevel='quiet') return source_md5 == _get_md5(name, dest, run_all)
def instance_present(name, instance_name=None, instance_id=None, image_id=None, image_name=None, tags=None, key_name=None, security_groups=None, user_data=None, instance_type=None, placement=None, kernel_id=None, ramdisk_id=None, vpc_id=None, vpc_name=None, monitoring_enabled=None, subnet_id=None, subnet_name=None, private_ip_address=None, block_device_map=None, disable_api_termination=None, instance_initiated_shutdown_behavior=None, placement_group=None, client_token=None, security_group_ids=None, security_group_names=None, additional_info=None, tenancy=None, instance_profile_arn=None, instance_profile_name=None, ebs_optimized=None, network_interfaces=None, network_interface_name=None, network_interface_id=None, attributes=None, target_state=None, public_ip=None, allocation_id=None, allocate_eip=False, region=None, key=None, keyid=None, profile=None): ### TODO - implement 'target_state={running, stopped}' ''' Ensure an EC2 instance is running with the given attributes and state. name (string) - The name of the state definition. Recommended that this match the instance_name attribute (generally the FQDN of the instance). instance_name (string) - The name of the instance, generally its FQDN. Exclusive with 'instance_id'. instance_id (string) - The ID of the instance (if known). Exclusive with 'instance_name'. image_id (string) – The ID of the AMI image to run. image_name (string) – The name of the AMI image to run. tags (dict) - Tags to apply to the instance. key_name (string) – The name of the key pair with which to launch instances. security_groups (list of strings) – The names of the EC2 classic security groups with which to associate instances user_data (string) – The Base64-encoded MIME user data to be made available to the instance(s) in this reservation. instance_type (string) – The EC2 instance size/type. Note that only certain types are compatible with HVM based AMIs. placement (string) – The Availability Zone to launch the instance into. kernel_id (string) – The ID of the kernel with which to launch the instances. ramdisk_id (string) – The ID of the RAM disk with which to launch the instances. vpc_id (string) - The ID of a VPC to attach the instance to. vpc_name (string) - The name of a VPC to attach the instance to. monitoring_enabled (bool) – Enable detailed CloudWatch monitoring on the instance. subnet_id (string) – The ID of the subnet within which to launch the instances for VPC. subnet_name (string) – The name of the subnet within which to launch the instances for VPC. private_ip_address (string) – If you’re using VPC, you can optionally use this parameter to assign the instance a specific available IP address from the subnet (e.g., 10.0.0.25). block_device_map (boto.ec2.blockdevicemapping.BlockDeviceMapping) – A BlockDeviceMapping data structure describing the EBS volumes associated with the Image. disable_api_termination (bool) – If True, the instances will be locked and will not be able to be terminated via the API. instance_initiated_shutdown_behavior (string) – Specifies whether the instance stops or terminates on instance-initiated shutdown. Valid values are: - 'stop' - 'terminate' placement_group (string) – If specified, this is the name of the placement group in which the instance(s) will be launched. client_token (string) – Unique, case-sensitive identifier you provide to ensure idempotency of the request. Maximum 64 ASCII characters. security_group_ids (list of strings) – The IDs of the VPC security groups with which to associate instances. security_group_names (list of strings) – The names of the VPC security groups with which to associate instances. additional_info (string) – Specifies additional information to make available to the instance(s). tenancy (string) – The tenancy of the instance you want to launch. An instance with a tenancy of ‘dedicated’ runs on single-tenant hardware and can only be launched into a VPC. Valid values are:”default” or “dedicated”. NOTE: To use dedicated tenancy you MUST specify a VPC subnet-ID as well. instance_profile_arn (string) – The Amazon resource name (ARN) of the IAM Instance Profile (IIP) to associate with the instances. instance_profile_name (string) – The name of the IAM Instance Profile (IIP) to associate with the instances. ebs_optimized (bool) – Whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and a tuned configuration stack to provide optimal EBS I/O performance. This optimization isn’t available with all instance types. network_interfaces (boto.ec2.networkinterface.NetworkInterfaceCollection) – A NetworkInterfaceCollection data structure containing the ENI specifications for the instance. network_interface_name (string) - The name of Elastic Network Interface to attach .. versionadded:: 2016.11.0 network_interface_id (string) - The id of Elastic Network Interface to attach .. versionadded:: 2016.11.0 attributes (dict) - Instance attributes and value to be applied to the instance. Available options are: - instanceType - A valid instance type (m1.small) - kernel - Kernel ID (None) - ramdisk - Ramdisk ID (None) - userData - Base64 encoded String (None) - disableApiTermination - Boolean (true) - instanceInitiatedShutdownBehavior - stop|terminate - blockDeviceMapping - List of strings - ie: [‘/dev/sda=false’] - sourceDestCheck - Boolean (true) - groupSet - Set of Security Groups or IDs - ebsOptimized - Boolean (false) - sriovNetSupport - String - ie: ‘simple’ target_state (string) - The desired target state of the instance. Available options are: - running - stopped Note that this option is currently UNIMPLEMENTED. public_ip: (string) - The IP of a previously allocated EIP address, which will be attached to the instance. EC2 Classic instances ONLY - for VCP pass in an allocation_id instead. allocation_id: (string) - The ID of a previously allocated EIP address, which will be attached to the instance. VPC instances ONLY - for Classic pass in a public_ip instead. allocate_eip: (bool) - Allocate and attach an EIP on-the-fly for this instance. Note you'll want to releaase this address when terminating the instance, either manually or via the 'release_eip' flag to 'instance_absent'. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.3.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } _create = False running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') changed_attrs = {} if not salt.utils.exactly_one((image_id, image_name)): raise SaltInvocationError('Exactly one of image_id OR ' 'image_name must be provided.') if (public_ip or allocation_id or allocate_eip) and not salt.utils.exactly_one((public_ip, allocation_id, allocate_eip)): raise SaltInvocationError('At most one of public_ip, allocation_id OR ' 'allocate_eip may be provided.') if instance_id: exists = __salt__['boto_ec2.exists'](instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not exists: _create = True else: instances = __salt__['boto_ec2.find_instances'](name=instance_name if instance_name else name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not len(instances): _create = True if _create: if __opts__['test']: ret['comment'] = 'The instance {0} is set to be created.'.format(name) ret['result'] = None return ret if image_name: args = {'ami_name': image_name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} image_ids = __salt__['boto_ec2.find_images'](**args) if len(image_ids): image_id = image_ids[0] else: image_id = image_name r = __salt__['boto_ec2.run'](image_id, instance_name if instance_name else name, tags=tags, key_name=key_name, security_groups=security_groups, user_data=user_data, instance_type=instance_type, placement=placement, kernel_id=kernel_id, ramdisk_id=ramdisk_id, vpc_id=vpc_id, vpc_name=vpc_name, monitoring_enabled=monitoring_enabled, subnet_id=subnet_id, subnet_name=subnet_name, private_ip_address=private_ip_address, block_device_map=block_device_map, disable_api_termination=disable_api_termination, instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior, placement_group=placement_group, client_token=client_token, security_group_ids=security_group_ids, security_group_names=security_group_names, additional_info=additional_info, tenancy=tenancy, instance_profile_arn=instance_profile_arn, instance_profile_name=instance_profile_name, ebs_optimized=ebs_optimized, network_interfaces=network_interfaces, network_interface_name=network_interface_name, network_interface_id=network_interface_id, region=region, key=key, keyid=keyid, profile=profile) if not r or 'instance_id' not in r: ret['result'] = False ret['comment'] = 'Failed to create instance {0}.'.format(instance_name if instance_name else name) return ret instance_id = r['instance_id'] ret['changes'] = {'old': {}, 'new': {}} ret['changes']['old']['instance_id'] = None ret['changes']['new']['instance_id'] = instance_id # To avoid issues we only allocate new EIPs at instance creation. # This might miss situations where an instance is initially created # created without and one is added later, but the alternative is the # risk of EIPs allocated at every state run. if allocate_eip: if __opts__['test']: ret['comment'] = 'New EIP would be allocated.' ret['result'] = None return ret domain = 'vpc' if vpc_id or vpc_name else None r = __salt__['boto_ec2.allocate_eip_address']( domain=domain, region=region, key=key, keyid=keyid, profile=profile) if not r: ret['result'] = False ret['comment'] = 'Failed to allocate new EIP.' return ret allocation_id = r['allocation_id'] log.info("New EIP with address {0} allocated.".format(r['public_ip'])) else: log.info("EIP not requested.") if public_ip or allocation_id: # This can take a bit to show up, give it a chance to... tries = 10 secs = 3 for t in range(tries): r = __salt__['boto_ec2.get_eip_address_info']( addresses=public_ip, allocation_ids=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: break else: log.info("Waiting up to {0} secs for new EIP {1} to become available".format( tries * secs, public_ip or allocation_id)) time.sleep(secs) if not r: ret['result'] = False ret['comment'] = 'Failed to lookup EIP {0}.'.format(public_ip or allocation_id) return ret ip = r[0]['public_ip'] if r[0].get('instance_id'): if r[0]['instance_id'] != instance_id: ret['result'] = False ret['comment'] = ('EIP {0} is already associated with instance ' '{1}.'.format(public_ip if public_ip else allocation_id, r[0]['instance_id'])) return ret else: if __opts__['test']: ret['comment'] = 'Instance {0} to be updated.'.format(name) ret['result'] = None return ret r = __salt__['boto_ec2.associate_eip_address']( instance_id=instance_id, public_ip=public_ip, allocation_id=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: ret['changes']['new']['public_ip'] = ip else: ret['result'] = False ret['comment'] = 'Failed to attach EIP to instance {0}.'.format( instance_name if instance_name else name) return ret if attributes: for k, v in six.iteritems(attributes): curr = __salt__['boto_ec2.get_attribute'](k, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) if not isinstance(curr, dict): curr = {} if curr.get(k) == v: continue else: if __opts__['test']: changed_attrs[k] = 'The instance attribute {0} is set to be changed from \'{1}\' to \'{2}\'.'.format( k, curr.get(k), v) continue try: r = __salt__['boto_ec2.set_attribute'](attribute=k, attribute_value=v, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) except SaltInvocationError as e: ret['result'] = False ret['comment'] = 'Failed to set attribute {0} to {1} on instance {2}.'.format(k, v, instance_name) return ret ret['changes'] = ret['changes'] if ret['changes'] else {'old': {}, 'new': {}} ret['changes']['old'][k] = curr.get(k) ret['changes']['new'][k] = v if __opts__['test']: if changed_attrs: ret['changes']['new'] = changed_attrs ret['result'] = None else: ret['comment'] = 'Instance {0} is in the correct state'.format(instance_name if instance_name else name) ret['result'] = True return ret
def make_repo(repodir, keyid=None, env=None, use_passphrase=False, gnupghome='/etc/salt/gpgkeys', runas='root', timeout=15.0): ''' Make a package repository and optionally sign it and packages present Given the repodir (directory to create repository in), create a Debian repository and optionally sign it and packages present. This state is best used with onchanges linked to your package building states. repodir The directory to find packages that will be in the repository. keyid .. versionchanged:: 2016.3.0 Optional Key ID to use in signing packages and repository. Utilizes Public and Private keys associated with keyid which have been loaded into the minion's Pillar data. Leverages gpg-agent and gpg-preset-passphrase for caching keys, etc. For example, contents from a Pillar data file with named Public and Private keys as follows: .. code-block:: yaml gpg_pkg_priv_key: | -----BEGIN PGP PRIVATE KEY BLOCK----- Version: GnuPG v1 lQO+BFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc . . Ebe+8JCQTwqSXPRTzXmy/b5WXDeM79CkLWvuGpXFor76D+ECMRPv/rawukEcNptn R5OmgHqvydEnO4pWbn8JzQO9YX/Us0SMHBVzLC8eIi5ZIopzalvX =JvW8 -----END PGP PRIVATE KEY BLOCK----- gpg_pkg_priv_keyname: gpg_pkg_key.pem gpg_pkg_pub_key: | -----BEGIN PGP PUBLIC KEY BLOCK----- Version: GnuPG v1 mQENBFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc . . bYP7t5iwJmQzRMyFInYRt77wkJBPCpJc9FPNebL9vlZcN4zv0KQta+4alcWivvoP 4QIxE+/+trC6QRw2m2dHk6aAeq/J0Sc7ilZufwnNA71hf9SzRIwcFXMsLx4iLlki inNqW9c= =s1CX -----END PGP PUBLIC KEY BLOCK----- gpg_pkg_pub_keyname: gpg_pkg_key.pub env .. versionchanged:: 2016.3.0 A dictionary of environment variables to be utilized in creating the repository. use_passphrase : False .. versionadded:: 2016.3.0 Use a passphrase with the signing key presented in ``keyid``. Passphrase is received from Pillar data which could be passed on the command line with ``pillar`` parameter. For example: .. code-block:: bash pillar='{ "gpg_passphrase" : "my_passphrase" }' gnupghome : /etc/salt/gpgkeys .. versionadded:: 2016.3.0 Location where GPG related files are stored, used with ``keyid``. runas : root .. versionadded:: 2016.3.0 User to create the repository as, and optionally sign packages. .. note:: Ensure the user has correct permissions to any files and directories which are to be utilized. timeout : 15.0 .. versionadded:: 2016.3.4 Timeout in seconds to wait for the prompt for inputting the passphrase. CLI Example: .. code-block:: bash salt '*' pkgbuild.make_repo /var/www/html ''' res = {'retcode': 1, 'stdout': '', 'stderr': 'initialization value'} SIGN_PROMPT_RE = re.compile(r'Enter passphrase: ', re.M) REPREPRO_SIGN_PROMPT_RE = re.compile(r'Passphrase: ', re.M) repoconf = os.path.join(repodir, 'conf') if not os.path.isdir(repoconf): os.makedirs(repoconf) codename, repocfg_dists = _get_repo_dists_env(env) repoconfdist = os.path.join(repoconf, 'distributions') with salt.utils.files.fopen(repoconfdist, 'w') as fow: fow.write('{0}'.format(repocfg_dists)) repocfg_opts = _get_repo_options_env(env) repoconfopts = os.path.join(repoconf, 'options') with salt.utils.files.fopen(repoconfopts, 'w') as fow: fow.write('{0}'.format(repocfg_opts)) local_keygrip_to_use = None local_key_fingerprint = None local_keyid = None phrase = '' # preset passphase and interaction with gpg-agent gpg_info_file = '{0}/gpg-agent-info-salt'.format(gnupghome) gpg_tty_info_file = '{0}/gpg-tty-info-salt'.format(gnupghome) gpg_tty_info_dict = {} # if using older than gnupg 2.1, then env file exists older_gnupg = __salt__['file.file_exists'](gpg_info_file) if keyid is not None: with salt.utils.files.fopen(repoconfdist, 'a') as fow: fow.write('SignWith: {0}\n'.format(keyid)) # import_keys pkg_pub_key_file = '{0}/{1}'.format( gnupghome, __salt__['pillar.get']('gpg_pkg_pub_keyname', None)) pkg_priv_key_file = '{0}/{1}'.format( gnupghome, __salt__['pillar.get']('gpg_pkg_priv_keyname', None)) if pkg_pub_key_file is None or pkg_priv_key_file is None: raise SaltInvocationError( 'Pillar data should contain Public and Private keys associated with \'keyid\'' ) try: __salt__['gpg.import_key'](user=runas, filename=pkg_pub_key_file, gnupghome=gnupghome) __salt__['gpg.import_key'](user=runas, filename=pkg_priv_key_file, gnupghome=gnupghome) except SaltInvocationError: raise SaltInvocationError( 'Public and Private key files associated with Pillar data and \'keyid\' ' '{0} could not be found'.format(keyid)) # gpg keys should have been loaded as part of setup # retrieve specified key, obtain fingerprint and preset passphrase local_keys = __salt__['gpg.list_keys'](user=runas, gnupghome=gnupghome) for gpg_key in local_keys: if keyid == gpg_key['keyid'][8:]: local_keygrip_to_use = gpg_key['fingerprint'] local_key_fingerprint = gpg_key['fingerprint'] local_keyid = gpg_key['keyid'] break if not older_gnupg: _check_repo_sign_utils_support('gpg2') cmd = '{0} --with-keygrip --list-secret-keys'.format( salt.utils.path.which('gpg2')) local_keys2_keygrip = __salt__['cmd.run'](cmd, runas=runas) local_keys2 = iter(local_keys2_keygrip.splitlines()) try: for line in local_keys2: if line.startswith('sec'): line_fingerprint = next(local_keys2).lstrip().rstrip() if local_key_fingerprint == line_fingerprint: lkeygrip = next(local_keys2).split('=') local_keygrip_to_use = lkeygrip[1].lstrip().rstrip( ) break except StopIteration: raise SaltInvocationError( 'unable to find keygrip associated with fingerprint \'{0}\' for keyid \'{1}\'' .format(local_key_fingerprint, local_keyid)) if local_keyid is None: raise SaltInvocationError( 'The key ID \'{0}\' was not found in GnuPG keyring at \'{1}\''. format(keyid, gnupghome)) _check_repo_sign_utils_support('debsign') if older_gnupg: with salt.utils.files.fopen(gpg_info_file, 'r') as fow: gpg_raw_info = fow.readlines() for gpg_info_line in gpg_raw_info: gpg_info = gpg_info_line.split('=') gpg_info_dict = {gpg_info[0]: gpg_info[1]} __salt__['environ.setenv'](gpg_info_dict) break else: with salt.utils.files.fopen(gpg_tty_info_file, 'r') as fow: gpg_raw_info = fow.readlines() for gpg_tty_info_line in gpg_raw_info: gpg_tty_info = gpg_tty_info_line.split('=') gpg_tty_info_dict = {gpg_tty_info[0]: gpg_tty_info[1]} __salt__['environ.setenv'](gpg_tty_info_dict) break if use_passphrase: _check_repo_gpg_phrase_utils_support() phrase = __salt__['pillar.get']('gpg_passphrase') cmd = '/usr/lib/gnupg2/gpg-preset-passphrase --verbose --preset --passphrase "{0}" {1}'.format( phrase, local_keygrip_to_use) __salt__['cmd.run'](cmd, runas=runas) for debfile in os.listdir(repodir): abs_file = os.path.join(repodir, debfile) if debfile.endswith('.changes'): os.remove(abs_file) if debfile.endswith('.dsc'): # sign_it_here if older_gnupg: if local_keyid is not None: cmd = 'debsign --re-sign -k {0} {1}'.format( keyid, abs_file) __salt__['cmd.run'](cmd, cwd=repodir, use_vt=True) cmd = 'reprepro --ignore=wrongdistribution --component=main -Vb . includedsc {0} {1}'.format( codename, abs_file) __salt__['cmd.run'](cmd, cwd=repodir, use_vt=True) else: # interval of 0.125 is really too fast on some systems interval = 0.5 if local_keyid is not None: number_retries = timeout / interval times_looped = 0 error_msg = 'Failed to debsign file {0}'.format(abs_file) cmd = 'debsign --re-sign -k {0} {1}'.format( keyid, abs_file) try: stdout, stderr = None, None proc = salt.utils.vt.Terminal(cmd, shell=True, stream_stdout=True, stream_stderr=True) while proc.has_unread_data: stdout, stderr = proc.recv() if stdout and SIGN_PROMPT_RE.search(stdout): # have the prompt for inputting the passphrase proc.sendline(phrase) else: times_looped += 1 if times_looped > number_retries: raise SaltInvocationError( 'Attemping to sign file {0} failed, timed out after {1} seconds' .format(abs_file, int(times_looped * interval))) time.sleep(interval) proc_exitstatus = proc.exitstatus if proc_exitstatus != 0: raise SaltInvocationError( 'Signing file {0} failed with proc.status {1}'. format(abs_file, proc_exitstatus)) except salt.utils.vt.TerminalException as err: trace = traceback.format_exc() log.error(error_msg, err, trace) res = {'retcode': 1, 'stdout': '', 'stderr': trace} finally: proc.close(terminate=True, kill=True) number_retries = timeout / interval times_looped = 0 error_msg = 'Failed to reprepro includedsc file {0}'.format( abs_file) cmd = 'reprepro --ignore=wrongdistribution --component=main -Vb . includedsc {0} {1}'.format( codename, abs_file) try: stdout, stderr = None, None proc = salt.utils.vt.Terminal(cmd, shell=True, cwd=repodir, env=gpg_tty_info_dict, stream_stdout=True, stream_stderr=True) while proc.has_unread_data: stdout, stderr = proc.recv() if stdout and REPREPRO_SIGN_PROMPT_RE.search(stdout): # have the prompt for inputting the passphrase proc.sendline(phrase) else: times_looped += 1 if times_looped > number_retries: raise SaltInvocationError( 'Attemping to reprepro includedsc for file {0} failed, timed out after {1} loops' .format(abs_file, times_looped)) time.sleep(interval) proc_exitstatus = proc.exitstatus if proc_exitstatus != 0: raise SaltInvocationError( 'Reprepro includedsc for codename {0} and file {1} failed with proc.status {2}' .format(codename, abs_file, proc_exitstatus)) except salt.utils.vt.TerminalException as err: trace = traceback.format_exc() log.error(error_msg, err, trace) res = {'retcode': 1, 'stdout': '', 'stderr': trace} finally: proc.close(terminate=True, kill=True) if debfile.endswith('.deb'): cmd = 'reprepro --ignore=wrongdistribution --component=main -Vb . includedeb {0} {1}'.format( codename, abs_file) res = __salt__['cmd.run_all'](cmd, cwd=repodir, use_vt=True) return res
def create_volume(name, bricks, stripe=False, replica=False, device_vg=False, transport='tcp', start=False, force=False, arbiter=False): ''' Create a glusterfs volume name Name of the gluster volume bricks Bricks to create volume from, in <peer>:<brick path> format. For \ multiple bricks use list format: '["<peer1>:<brick1>", \ "<peer2>:<brick2>"]' stripe Stripe count, the number of bricks should be a multiple of the stripe \ count for a distributed striped volume replica Replica count, the number of bricks should be a multiple of the \ replica count for a distributed replicated volume arbiter If true, specifies volume should use arbiter brick(s). \ Valid configuration limited to "replica 3 arbiter 1" per \ Gluster documentation. Every third brick in the brick list \ is used as an arbiter brick. .. versionadded:: 2019.2.0 device_vg If true, specifies volume should use block backend instead of regular \ posix backend. Block device backend volume does not support multiple \ bricks transport Transport protocol to use, can be 'tcp', 'rdma' or 'tcp,rdma' start Start the volume after creation force Force volume creation, this works even if creating in root FS CLI Examples: .. code-block:: bash salt host1 glusterfs.create newvolume host1:/brick salt gluster1 glusterfs.create vol2 '["gluster1:/export/vol2/brick", \ "gluster2:/export/vol2/brick"]' replica=2 start=True ''' # If single brick given as a string, accept it if isinstance(bricks, six.string_types): bricks = [bricks] # Error for block devices with multiple bricks if device_vg and len(bricks) > 1: raise SaltInvocationError('Block device backend volume does not ' + 'support multiple bricks') # Validate bricks syntax for brick in bricks: try: peer_name, path = brick.split(':') if not path.startswith('/'): raise SaltInvocationError( 'Brick paths must start with / in {0}'.format(brick)) except ValueError: raise SaltInvocationError( 'Brick syntax is <peer>:<path> got {0}'.format(brick)) # Validate arbiter config if arbiter and replica != 3: raise SaltInvocationError('Arbiter configuration only valid ' + 'in replica 3 volume') # Format creation call cmd = 'volume create {0} '.format(name) if stripe: cmd += 'stripe {0} '.format(stripe) if replica: cmd += 'replica {0} '.format(replica) if arbiter: cmd += 'arbiter 1 ' if device_vg: cmd += 'device vg ' if transport != 'tcp': cmd += 'transport {0} '.format(transport) cmd += ' '.join(bricks) if force: cmd += ' force' if not _gluster(cmd): return False if start: return start_volume(name) return True