Beispiel #1
0
def create_cert_binding(name,
                        site,
                        hostheader='',
                        ipaddress='*',
                        port=443,
                        sslflags=0):
    '''
    Assign a certificate to an IIS binding.

    .. note:

        The web binding that the certificate is being assigned to must already exist.

    :param str name: The thumbprint of the certificate.
    :param str site: The IIS site name.
    :param str hostheader: The host header of the binding.
    :param str ipaddress: The IP address of the binding.
    :param str port: The TCP port of the binding.
    :param str sslflags: Flags representing certificate type and certificate storage of the binding.

    :return: A boolean representing whether all changes succeeded.
    :rtype: bool

    .. versionadded:: Carbon

    CLI Example:

    .. code-block:: bash

        salt '*' win_iis.create_cert_binding name='AAA000' site='site0' hostheader='example' ipaddress='*' port='443'
    '''
    pscmd = list()
    name = str(name).upper()
    binding_info = _get_binding_info(hostheader, ipaddress, port)
    binding_path = r"IIS:\SslBindings\{0}".format(
        binding_info.replace(':', '!'))

    if sslflags not in _VALID_SSL_FLAGS:
        message = ("Invalid sslflags '{0}' specified. Valid sslflags range:"
                   ' {1}..{2}').format(sslflags, _VALID_SSL_FLAGS[0],
                                       _VALID_SSL_FLAGS[-1])
        raise SaltInvocationError(message)

    # Verify that the target binding exists.
    current_bindings = list_bindings(site)

    if binding_info not in current_bindings:
        _LOG.error('Binding not present: %s', binding_info)
        return False

    # Check to see if the certificate is already assigned.
    current_name = None

    for current_binding in current_bindings:
        if binding_info == current_binding:
            current_name = current_bindings[current_binding]['certificatehash']

    _LOG.debug('Current certificate thumbprint: %s', current_name)
    _LOG.debug('New certificate thumbprint: %s', name)

    if name == current_name:
        _LOG.debug('Certificate already present for binding: %s', name)
        return True

    # Verify that the certificate exists.
    certs = _list_certs()

    if name not in certs:
        _LOG.error('Certificate not present: %s', name)
        return False

    pscmd.append("New-Item -Path '{0}' -Thumbprint '{1}'".format(
        binding_path, name))
    pscmd.append(" -SSLFlags {0}".format(sslflags))

    cmd_ret = _srvmgr(str().join(pscmd))

    if cmd_ret['retcode'] == 0:
        new_cert_bindings = list_cert_bindings(site)

        if binding_info not in new_cert_bindings:
            _LOG.error('Binding not present: %s', binding_info)
            return False

        if name == new_cert_bindings[binding_info]['certificatehash']:
            _LOG.debug('Certificate binding created successfully: %s', name)
            return True
    _LOG.error('Unable to create certificate binding: %s', name)
    return False
Beispiel #2
0
def present(name,
            image_id,
            key_name=None,
            security_groups=None,
            user_data=None,
            cloud_init=None,
            instance_type='m1.small',
            kernel_id=None,
            ramdisk_id=None,
            block_device_mappings=None,
            instance_monitoring=False,
            spot_price=None,
            instance_profile_name=None,
            ebs_optimized=False,
            associate_public_ip_address=None,
            volume_type=None,
            delete_on_termination=True,
            iops=None,
            use_block_device_types=False,
            region=None,
            key=None,
            keyid=None,
            profile=None):
    '''
    Ensure the launch configuration exists.

    name
        Name of the launch configuration.

    image_id
        AMI to use for instances. AMI must exist or creation of the launch
        configuration will fail.

    key_name
        Name of the EC2 key pair to use for instances. Key must exist or
        creation of the launch configuration will fail.

    security_groups
        List of Names or security group id’s of the security groups with which
        to associate the EC2 instances or VPC instances, respectively. Security
        groups must exist, or creation of the launch configuration will fail.

    user_data
        The user data available to launched EC2 instances.

    cloud_init
        A dict of cloud_init configuration. Currently supported values:
        scripts, cloud-config. Mutually exclusive with user_data.

    instance_type
        The instance type. ex: m1.small.

    kernel_id
        The kernel id for the instance.

    ramdisk_id
        The RAM disk ID for the instance.

    block_device_mappings
        A dict of block device mappings.

    instance_monitoring
        Whether instances in group are launched with detailed monitoring.

    spot_price
        The spot price you are bidding. Only applies if you are building an
        autoscaling group with spot instances.

    instance_profile_name
        The name or the Amazon Resource Name (ARN) of the instance profile
        associated with the IAM role for the instance. Instance profile must
        exist or the creation of the launch configuration will fail.

    ebs_optimized
        Specifies whether the instance is optimized for EBS I/O (true) or not
        (false).

    associate_public_ip_address
        Used for Auto Scaling groups that launch instances into an Amazon
        Virtual Private Cloud. Specifies whether to assign a public IP address
        to each instance launched in a Amazon VPC.

    volume_type
        Undocumented in boto.

    delete_on_termination
        Undocumented in boto.

    iops
        Undocumented in boto.

    use_block_device_types
        Undocumented in boto.

    region
        The region to connect to.

    key
        Secret key to be used.

    keyid
        Access key to be used.

    profile
        A dict with region, key and keyid, or a pillar key (string)
        that contains a dict with region, key and keyid.
    '''
    if user_data and cloud_init:
        raise SaltInvocationError('user_data and cloud_init are mutually'
                                  ' exclusive options.')
    ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
    exists = __salt__['boto_asg.launch_configuration_exists'](name, region,
                                                              key, keyid,
                                                              profile)
    if not exists:
        if __opts__['test']:
            msg = 'Launch configuration set to be created.'
            ret['comment'] = msg
            ret['result'] = None
            return ret
        if cloud_init:
            user_data = __salt__['boto_asg.get_cloud_init_mime'](cloud_init)
        # TODO: Ensure image_id, key_name, security_groups and instance_profile
        # exist, or throw an invocation error.
        created = __salt__['boto_asg.create_launch_configuration'](
            name, image_id, key_name, security_groups, user_data,
            instance_type, kernel_id, ramdisk_id, block_device_mappings,
            instance_monitoring, spot_price, instance_profile_name,
            ebs_optimized, associate_public_ip_address, volume_type,
            delete_on_termination, iops, use_block_device_types, region, key,
            keyid, profile)
        if created:
            ret['changes']['old'] = None
            ret['changes']['new'] = name
        else:
            ret['result'] = False
            ret['comment'] = 'Failed to create launch configuration.'
    else:
        ret['comment'] = 'Launch configuration present.'
    return ret
Beispiel #3
0
def install(
    name,
    version=None,
    source=None,
    force=False,
    pre_versions=False,
    install_args=None,
    override_args=False,
    force_x86=False,
    package_args=None,
    allow_multiple=False,
    execution_timeout=None,
):
    """
    Instructs Chocolatey to install a package.

    Args:

        name (str):
            The name of the package to be installed. Only accepts a single
            argument. Required.

        version (str):
            Install a specific version of the package. Defaults to latest
            version. Default is None.

        source (str):
            Chocolatey repository (directory, share or remote URL feed) the
            package comes from. Defaults to the official Chocolatey feed.
            Default is None.

            Alternate Sources:

            - cygwin
            - python
            - ruby
            - webpi
            - windowsfeatures

        force (bool):
            Reinstall the current version of an existing package. Do not use
            with ``allow_multiple``. Default is False.

        pre_versions (bool):
            Include pre-release packages. Default is False.

        install_args (str):
            A list of install arguments you want to pass to the installation
            process i.e product key or feature list. Default is None.

        override_args (bool):
            Set to true if you want to override the original install arguments
            (for the native installer) in the package and use your own. When
            this is set to False install_args will be appended to the end of the
            default arguments. Default is None.

        force_x86 (bool):
            Force x86 (32bit) installation on 64 bit systems. Default is False.

        package_args (str):
            Arguments you want to pass to the package. Default is None.

        allow_multiple (bool):
            Allow multiple versions of the package to be installed. Do not use
            with ``force``. Does not work with all packages. Default is False.

            .. versionadded:: 2017.7.0

        execution_timeout (str):
            Chocolatey execution timeout value you want to pass to the
            installation process. Default is None.

            .. versionadded:: 2018.3.0

    Returns:
        str: The output of the ``chocolatey`` command

    CLI Example:

    .. code-block:: bash

        salt '*' chocolatey.install <package name>
        salt '*' chocolatey.install <package name> version=<package version>
        salt '*' chocolatey.install <package name> install_args=<args> override_args=True
    """
    if force and allow_multiple:
        raise SaltInvocationError(
            "Cannot use 'force' in conjunction with 'allow_multiple'")

    choc_path = _find_chocolatey()
    # chocolatey helpfully only supports a single package argument
    # CORRECTION: it also supports multiple package names separated by spaces
    # but any additional arguments apply to ALL packages specified
    cmd = [choc_path, "install", name]
    if version:
        cmd.extend(["--version", version])
    if source:
        cmd.extend(["--source", source])
    if salt.utils.data.is_true(force):
        cmd.append("--force")
    if salt.utils.data.is_true(pre_versions):
        cmd.append("--prerelease")
    if install_args:
        cmd.extend(["--installarguments", install_args])
    if override_args:
        cmd.append("--overridearguments")
    if force_x86:
        cmd.append("--forcex86")
    if package_args:
        cmd.extend(["--packageparameters", package_args])
    if allow_multiple:
        cmd.append("--allow-multiple")
    if execution_timeout:
        cmd.extend(["--execution-timeout", execution_timeout])

    # Salt doesn't need to see the progress
    cmd.extend(_no_progress())
    cmd.extend(_yes())
    result = __salt__["cmd.run_all"](cmd, python_shell=False)

    if result["retcode"] not in [0, 1641, 3010]:
        err = "Running chocolatey failed: {}".format(result["stdout"])
        raise CommandExecutionError(err)

    if name == "chocolatey":
        _clear_context()

    return result["stdout"]
Beispiel #4
0
def make_src_pkg(dest_dir,
                 spec,
                 sources,
                 env=None,
                 template=None,
                 saltenv="base",
                 runas="root"):
    """
    Create a source rpm from the given spec file and sources

    CLI Example:

    .. code-block:: bash

        salt '*' pkgbuild.make_src_pkg /var/www/html/
                https://raw.githubusercontent.com/saltstack/libnacl/master/pkg/rpm/python-libnacl.spec
                https://pypi.python.org/packages/source/l/libnacl/libnacl-1.3.5.tar.gz

    This example command should build the libnacl SOURCE package and place it in
    /var/www/html/ on the minion

    .. versionchanged:: 2017.7.0

    dest_dir
        The directory on the minion to place the built package(s)

    spec
        The location of the spec file (used for rpms)

    sources
        The list of package sources

    env
        A dictionary of environment variables to be set prior to execution.

    template
        Run the spec file through a templating engine
        Optional argument, allows for no templating engine used to be
        if none is desired.

    saltenv
        The saltenv to use for files downloaded from the salt filesever

    runas
        The user to run the build process as

        .. versionadded:: 2018.3.3


    .. note::

        using SHA256 as digest and minimum level dist el6

    """
    _create_rpmmacros(runas)
    tree_base = _mk_tree(runas)
    spec_path = _get_spec(tree_base, spec, template, saltenv)
    __salt__["file.chown"](path=spec_path, user=runas, group="mock")
    __salt__["file.chown"](path=tree_base, user=runas, group="mock")

    if isinstance(sources, six.string_types):
        sources = sources.split(",")
    for src in sources:
        _get_src(tree_base, src, saltenv, runas)

    # make source rpms for dist el6 with SHA256, usable with mock on other dists
    cmd = 'rpmbuild --verbose --define "_topdir {0}" -bs --define "dist .el6" {1}'.format(
        tree_base, spec_path)
    retrc = __salt__["cmd.retcode"](cmd, runas=runas)
    if retrc != 0:
        raise SaltInvocationError(
            "Make source package for destination directory {0}, spec {1}, sources {2}, failed "
            "with return error {3}, check logs for further details".format(
                dest_dir, spec, sources, retrc))

    srpms = os.path.join(tree_base, "SRPMS")
    ret = []
    if not os.path.isdir(dest_dir):
        __salt__["file.makedirs_perms"](name=dest_dir,
                                        user=runas,
                                        group="mock")
    for fn_ in os.listdir(srpms):
        full = os.path.join(srpms, fn_)
        tgt = os.path.join(dest_dir, fn_)
        shutil.copy(full, tgt)
        ret.append(tgt)
    return ret
Beispiel #5
0
def make_repo(
    repodir,
    keyid=None,
    env=None,
    use_passphrase=False,
    gnupghome="/etc/salt/gpgkeys",
    runas="root",
    timeout=15.0,
):
    """
    Make a package repository and optionally sign packages present

    Given the repodir, create a ``yum`` repository out of the rpms therein
    and optionally sign it and packages present, the name is directory to
    turn into a repo. This state is best used with onchanges linked to
    your package building states.

    repodir
        The directory to find packages that will be in the repository.

    keyid
        .. versionchanged:: 2016.3.0

        Optional Key ID to use in signing packages and repository.
        Utilizes Public and Private keys associated with keyid which have
        been loaded into the minion's Pillar data.

        For example, contents from a Pillar data file with named Public
        and Private keys as follows:

        .. code-block:: yaml

            gpg_pkg_priv_key: |
              -----BEGIN PGP PRIVATE KEY BLOCK-----
              Version: GnuPG v1

              lQO+BFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b
              w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc
              .
              .
              Ebe+8JCQTwqSXPRTzXmy/b5WXDeM79CkLWvuGpXFor76D+ECMRPv/rawukEcNptn
              R5OmgHqvydEnO4pWbn8JzQO9YX/Us0SMHBVzLC8eIi5ZIopzalvX
              =JvW8
              -----END PGP PRIVATE KEY BLOCK-----

            gpg_pkg_priv_keyname: gpg_pkg_key.pem

            gpg_pkg_pub_key: |
              -----BEGIN PGP PUBLIC KEY BLOCK-----
              Version: GnuPG v1

              mQENBFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b
              w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc
              .
              .
              bYP7t5iwJmQzRMyFInYRt77wkJBPCpJc9FPNebL9vlZcN4zv0KQta+4alcWivvoP
              4QIxE+/+trC6QRw2m2dHk6aAeq/J0Sc7ilZufwnNA71hf9SzRIwcFXMsLx4iLlki
              inNqW9c=
              =s1CX
              -----END PGP PUBLIC KEY BLOCK-----

            gpg_pkg_pub_keyname: gpg_pkg_key.pub

    env
        .. versionchanged:: 2016.3.0

        A dictionary of environment variables to be utilized in creating the
        repository.

        .. note::

            This parameter is not used for making ``yum`` repositories.

    use_passphrase : False
        .. versionadded:: 2016.3.0

        Use a passphrase with the signing key presented in ``keyid``.
        Passphrase is received from Pillar data which could be passed on the
        command line with ``pillar`` parameter. For example:

        .. code-block:: bash

            pillar='{ "gpg_passphrase" : "my_passphrase" }'

    gnupghome : /etc/salt/gpgkeys
        .. versionadded:: 2016.3.0

        Location where GPG related files are stored, used with ``keyid``.

    runas : root
        .. versionadded:: 2016.3.0

        User to create the repository as, and optionally sign packages.

        .. note::

            Ensure the user has correct permissions to any files and
            directories which are to be utilized.

    timeout : 15.0
        .. versionadded:: 2016.3.4

        Timeout in seconds to wait for the prompt for inputting the passphrase.

    CLI Example:

    .. code-block:: bash

        salt '*' pkgbuild.make_repo /var/www/html/

    """
    SIGN_PROMPT_RE = re.compile(r"Enter pass phrase: ", re.M)

    define_gpg_name = ""
    local_keyid = None
    local_uids = None
    phrase = ""

    if keyid is not None:
        # import_keys
        pkg_pub_key_file = "{0}/{1}".format(
            gnupghome, __salt__["pillar.get"]("gpg_pkg_pub_keyname", None))
        pkg_priv_key_file = "{0}/{1}".format(
            gnupghome, __salt__["pillar.get"]("gpg_pkg_priv_keyname", None))

        if pkg_pub_key_file is None or pkg_priv_key_file is None:
            raise SaltInvocationError(
                "Pillar data should contain Public and Private keys associated with 'keyid'"
            )
        try:
            __salt__["gpg.import_key"](user=runas,
                                       filename=pkg_pub_key_file,
                                       gnupghome=gnupghome)
            __salt__["gpg.import_key"](user=runas,
                                       filename=pkg_priv_key_file,
                                       gnupghome=gnupghome)

        except SaltInvocationError:
            raise SaltInvocationError(
                "Public and Private key files associated with Pillar data and 'keyid' "
                "{0} could not be found".format(keyid))

        # gpg keys should have been loaded as part of setup
        # retrieve specified key and preset passphrase
        local_keys = __salt__["gpg.list_keys"](user=runas, gnupghome=gnupghome)
        for gpg_key in local_keys:
            if keyid == gpg_key["keyid"][8:]:
                local_uids = gpg_key["uids"]
                local_keyid = gpg_key["keyid"]
                break

        if local_keyid is None:
            raise SaltInvocationError(
                "The key ID '{0}' was not found in GnuPG keyring at '{1}'".
                format(keyid, gnupghome))

        if use_passphrase:
            phrase = __salt__["pillar.get"]("gpg_passphrase")

        if local_uids:
            define_gpg_name = "--define='%_signature gpg' --define='%_gpg_name {0}'".format(
                local_uids[0])

        # need to update rpm with public key
        cmd = "rpm --import {0}".format(pkg_pub_key_file)
        retrc = __salt__["cmd.retcode"](cmd, runas=runas, use_vt=True)
        if retrc != 0:
            raise SaltInvocationError(
                "Failed to import public key from file {0} with return "
                "error {1}, check logs for further details".format(
                    pkg_pub_key_file, retrc))

        # sign_it_here
        # interval of 0.125 is really too fast on some systems
        interval = 0.5
        for fileused in os.listdir(repodir):
            if fileused.endswith(".rpm"):
                abs_file = os.path.join(repodir, fileused)
                number_retries = timeout / interval
                times_looped = 0
                error_msg = "Failed to sign file {0}".format(abs_file)
                cmd = "rpm {0} --addsign {1}".format(define_gpg_name, abs_file)
                preexec_fn = functools.partial(
                    salt.utils.user.chugid_and_umask, runas, None)
                try:
                    stdout, stderr = None, None
                    proc = salt.utils.vt.Terminal(
                        cmd,
                        shell=True,
                        preexec_fn=preexec_fn,
                        stream_stdout=True,
                        stream_stderr=True,
                    )
                    while proc.has_unread_data:
                        stdout, stderr = proc.recv()
                        if stdout and SIGN_PROMPT_RE.search(stdout):
                            # have the prompt for inputting the passphrase
                            proc.sendline(phrase)
                        else:
                            times_looped += 1

                        if times_looped > number_retries:
                            raise SaltInvocationError(
                                "Attemping to sign file {0} failed, timed out after {1} seconds"
                                .format(abs_file,
                                        int(times_looped * interval)))
                        time.sleep(interval)

                    proc_exitstatus = proc.exitstatus
                    if proc_exitstatus != 0:
                        raise SaltInvocationError(
                            "Signing file {0} failed with proc.status {1}".
                            format(abs_file, proc_exitstatus))
                except salt.utils.vt.TerminalException as err:
                    trace = traceback.format_exc()
                    log.error(error_msg, err, trace)
                finally:
                    proc.close(terminate=True, kill=True)

    cmd = "createrepo --update {0}".format(repodir)
    return __salt__["cmd.run_all"](cmd, runas=runas)
Beispiel #6
0
def thread_multi_return(cls, minion_instance, opts, data):
    """
    This method should be used as a threading target, start the actual
    minion side execution.
    """
    fn_ = os.path.join(minion_instance.proc_dir, data["jid"])

    salt.utils.process.appendproctitle("{}._thread_multi_return {}".format(
        cls.__name__, data["jid"]))

    sdata = {"pid": os.getpid()}
    sdata.update(data)
    log.info("Starting a new job with PID %s", sdata["pid"])
    with salt.utils.files.fopen(fn_, "w+b") as fp_:
        fp_.write(salt.payload.dumps(sdata))

    multifunc_ordered = opts.get("multifunc_ordered", False)
    num_funcs = len(data["fun"])
    if multifunc_ordered:
        ret = {
            "return": [None] * num_funcs,
            "retcode": [None] * num_funcs,
            "success": [False] * num_funcs,
        }
    else:
        ret = {"return": {}, "retcode": {}, "success": {}}

    for ind in range(0, num_funcs):
        if not multifunc_ordered:
            ret["success"][data["fun"][ind]] = False
        try:
            minion_blackout_violation = False
            if minion_instance.connected and minion_instance.opts[
                    "pillar"].get("minion_blackout", False):
                whitelist = minion_instance.opts["pillar"].get(
                    "minion_blackout_whitelist", [])
                # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
                if (data["fun"][ind] != "saltutil.refresh_pillar"
                        and data["fun"][ind] not in whitelist):
                    minion_blackout_violation = True
            elif minion_instance.opts["grains"].get("minion_blackout", False):
                whitelist = minion_instance.opts["grains"].get(
                    "minion_blackout_whitelist", [])
                if (data["fun"][ind] != "saltutil.refresh_pillar"
                        and data["fun"][ind] not in whitelist):
                    minion_blackout_violation = True
            if minion_blackout_violation:
                raise SaltInvocationError(
                    "Minion in blackout mode. Set 'minion_blackout' "
                    "to False in pillar or grains to resume operations. Only "
                    "saltutil.refresh_pillar allowed in blackout mode.")

            func = minion_instance.functions[data["fun"][ind]]

            args, kwargs = salt.minion.load_args_and_kwargs(
                func, data["arg"][ind], data)
            minion_instance.functions.pack["__context__"]["retcode"] = 0
            key = ind if multifunc_ordered else data["fun"][ind]
            ret["return"][key] = func(*args, **kwargs)
            retcode = minion_instance.functions.pack["__context__"].get(
                "retcode", 0)
            if retcode == 0:
                # No nonzero retcode in __context__ dunder. Check if return
                # is a dictionary with a "result" or "success" key.
                try:
                    func_result = all(ret["return"][key].get(x, True)
                                      for x in ("result", "success"))
                except Exception:  # pylint: disable=broad-except
                    # return data is not a dict
                    func_result = True
                if not func_result:
                    retcode = 1

            ret["retcode"][key] = retcode
            ret["success"][key] = retcode == 0
        except Exception as exc:  # pylint: disable=broad-except
            trb = traceback.format_exc()
            log.warning("The minion function caused an exception: %s", exc)
            if multifunc_ordered:
                ret["return"][ind] = trb
            else:
                ret["return"][data["fun"][ind]] = trb
        ret["jid"] = data["jid"]
        ret["fun"] = data["fun"]
        ret["fun_args"] = data["arg"]
    if "metadata" in data:
        ret["metadata"] = data["metadata"]
    if minion_instance.connected:
        minion_instance._return_pub(
            ret, timeout=minion_instance._return_retry_timer())
    if data["ret"]:
        if "ret_config" in data:
            ret["ret_config"] = data["ret_config"]
        if "ret_kwargs" in data:
            ret["ret_kwargs"] = data["ret_kwargs"]
        for returner in set(data["ret"].split(",")):
            ret["id"] = opts["id"]
            try:
                minion_instance.returners["{}.returner".format(returner)](ret)
            except Exception as exc:  # pylint: disable=broad-except
                log.error("The return failed for job %s: %s", data["jid"], exc)
Beispiel #7
0
def _run_composer(action,
                  directory=None,
                  composer=None,
                  php=None,
                  runas=None,
                  prefer_source=None,
                  prefer_dist=None,
                  no_scripts=None,
                  no_plugins=None,
                  optimize=None,
                  no_dev=None,
                  quiet=False,
                  composer_home='/root',
                  extra_flags=None):
    '''
    Run PHP's composer with a specific action.

    If composer has not been installed globally making it available in the
    system PATH & making it executable, the ``composer`` and ``php`` parameters
    will need to be set to the location of the executables.

    action
        The action to pass to composer ('install', 'update', 'selfupdate', etc).

    directory
        Directory location of the composer.json file.  Required except when
        action='selfupdate'

    composer
        Location of the composer.phar file. If not set composer will
        just execute "composer" as if it is installed globally.
        (i.e. /path/to/composer.phar)

    php
        Location of the php executable to use with composer.
        (i.e. /usr/bin/php)

    runas
        Which system user to run composer as.

    prefer_source
        --prefer-source option of composer.

    prefer_dist
        --prefer-dist option of composer.

    no_scripts
        --no-scripts option of composer.

    no_plugins
        --no-plugins option of composer.

    optimize
        --optimize-autoloader option of composer. Recommended for production.

    no_dev
        --no-dev option for composer. Recommended for production.

    quiet
        --quiet option for composer. Whether or not to return output from composer.

    composer_home
        $COMPOSER_HOME environment variable

    extra_flags
        None, or a string containing extra flags to pass to composer.
    '''
    if composer is not None:
        if php is None:
            php = 'php'
    else:
        composer = 'composer'

    # Validate Composer is there
    if not _valid_composer(composer):
        raise CommandNotFoundError(
            '\'composer.{0}\' is not available. Couldn\'t find \'{1}\'.'
            .format(action, composer)
        )

    if action is None:
        raise SaltInvocationError('The \'action\' argument is required')

    # Don't need a dir for the 'selfupdate' action; all other actions do need a dir
    if directory is None and action != 'selfupdate':
        raise SaltInvocationError(
            'The \'directory\' argument is required for composer.{0}'.format(action)
        )

    # Base Settings
    cmd = [composer, action, '--no-interaction', '--no-ansi']

    if extra_flags is not None:
        cmd.extend(salt.utils.shlex_split(extra_flags))

    # If php is set, prepend it
    if php is not None:
        cmd = [php] + cmd

    # Add Working Dir
    if directory is not None:
        cmd.extend(['--working-dir', directory])

    # Other Settings
    if quiet is True:
        cmd.append('--quiet')

    if no_dev is True:
        cmd.append('--no-dev')

    if prefer_source is True:
        cmd.append('--prefer-source')

    if prefer_dist is True:
        cmd.append('--prefer-dist')

    if no_scripts is True:
        cmd.append('--no-scripts')

    if no_plugins is True:
        cmd.append('--no-plugins')

    if optimize is True:
        cmd.append('--optimize-autoloader')

    result = __salt__['cmd.run_all'](cmd,
                                     runas=runas,
                                     env={'COMPOSER_HOME': composer_home},
                                     python_shell=False)

    if result['retcode'] != 0:
        raise CommandExecutionError(result['stderr'])

    if quiet is True:
        return True

    return result
Beispiel #8
0
def associate_vpc_with_hosted_zone(HostedZoneId=None, Name=None, VPCId=None,
                                   VPCName=None, VPCRegion=None, Comment=None,
                                   region=None, key=None, keyid=None, profile=None):
    '''
    Associates an Amazon VPC with a private hosted zone.

    To perform the association, the VPC and the private hosted zone must already exist. You can't
    convert a public hosted zone into a private hosted zone.  If you want to associate a VPC from
    one AWS account with a zone from a another, the AWS account owning the hosted zone must first
    submit a CreateVPCAssociationAuthorization (using create_vpc_association_authorization() or by
    other means, such as the AWS console).  With that done, the account owning the VPC can then call
    associate_vpc_with_hosted_zone() to create the association.

    Note that if both sides happen to be within the same account, associate_vpc_with_hosted_zone()
    is enough on its own, and there is no need for the CreateVPCAssociationAuthorization step.

    Also note that looking up hosted zones by name (e.g. using the Name parameter) only works
    within a single account - if you're associating a VPC to a zone in a different account, as
    outlined above, you unfortunately MUST use the HostedZoneId parameter exclusively.

    HostedZoneId
        The unique Zone Identifier for the Hosted Zone.

    Name
        The domain name associated with the Hosted Zone(s).

    VPCId
        When working with a private hosted zone, either the VPC ID or VPC Name to associate with is
        required.  Exclusive with VPCName.

    VPCName
        When working with a private hosted zone, either the VPC ID or VPC Name to associate with is
        required.  Exclusive with VPCId.

    VPCRegion
        When working with a private hosted zone, the region of the associated VPC is required.  If
        not provided, an effort will be made to determine it from VPCId or VPCName, if possible.  If
        this fails, you'll need to provide an explicit value for VPCRegion.

    Comment
        Any comments you want to include about the change being made.

    CLI Example::

        salt myminion boto3_route53.associate_vpc_with_hosted_zone \
                    Name=example.org. VPCName=myVPC \
                    VPCRegion=us-east-1 Comment="Whoo-hoo!  I added another VPC."

    '''
    if not _exactly_one((HostedZoneId, Name)):
        raise SaltInvocationError('Exactly one of either HostedZoneId or Name is required.')
    if not _exactly_one((VPCId, VPCName)):
        raise SaltInvocationError('Exactly one of either VPCId or VPCName is required.')
    if Name:
        # {'PrivateZone': True} because you can only associate VPCs with private hosted zones.
        args = {'Name': Name, 'PrivateZone': True, 'region': region,
                'key': key, 'keyid': keyid, 'profile': profile}
        zone = find_hosted_zone(**args)
        if not zone:
            log.error(
                "Couldn't resolve domain name %s to a private hosted zone ID.",
                Name
            )
            return False
        HostedZoneId = zone[0]['HostedZone']['Id']
    vpcs = __salt__['boto_vpc.describe_vpcs'](vpc_id=VPCId, name=VPCName, region=region, key=key,
                                              keyid=keyid, profile=profile).get('vpcs', [])
    if VPCRegion and vpcs:
        vpcs = [v for v in vpcs if v['region'] == VPCRegion]
    if not vpcs:
        log.error('No VPC matching the given criteria found.')
        return False
    if len(vpcs) > 1:
        log.error('Multiple VPCs matching the given criteria found: %s.',
                  ', '.join([v['id'] for v in vpcs]))
        return False
    vpc = vpcs[0]
    if VPCName:
        VPCId = vpc['id']
    if not VPCRegion:
        VPCRegion = vpc['region']
    args = {'HostedZoneId': HostedZoneId, 'VPC': {'VPCId': VPCId, 'VPCRegion': VPCRegion}}
    args.update({'Comment': Comment}) if Comment is not None else None

    conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
    tries = 10
    while tries:
        try:
            r = conn.associate_vpc_with_hosted_zone(**args)
            return _wait_for_sync(r['ChangeInfo']['Id'], conn)
        except ClientError as e:
            if tries and e.response.get('Error', {}).get('Code') == 'Throttling':
                log.debug('Throttled by AWS API.')
                time.sleep(3)
                tries -= 1
                continue
            log.error('Failed to associate VPC %s with hosted zone %s: %s',
                      VPCName or VPCId, Name or HostedZoneId, e)
    return False
Beispiel #9
0
def disassociate_vpc_from_hosted_zone(HostedZoneId=None, Name=None, VPCId=None,
                                     VPCName=None, VPCRegion=None, Comment=None,
                                     region=None, key=None, keyid=None, profile=None):
    '''
    Disassociates an Amazon VPC from a private hosted zone.

    You can't disassociate the last VPC from a private hosted zone.  You also can't convert a
    private hosted zone into a public hosted zone.

    Note that looking up hosted zones by name (e.g. using the Name parameter) only works XXX FACTCHECK
    within a single AWS account - if you're disassociating a VPC in one account from a hosted zone
    in a different account you unfortunately MUST use the HostedZoneId parameter exclusively. XXX FIXME DOCU

    HostedZoneId
        The unique Zone Identifier for the Hosted Zone.

    Name
        The domain name associated with the Hosted Zone(s).

    VPCId
        When working with a private hosted zone, either the VPC ID or VPC Name to associate with is
        required.  Exclusive with VPCName.

    VPCName
        When working with a private hosted zone, either the VPC ID or VPC Name to associate with is
        required.  Exclusive with VPCId.

    VPCRegion
        When working with a private hosted zone, the region of the associated VPC is required.  If
        not provided, an effort will be made to determine it from VPCId or VPCName, if possible.  If
        this fails, you'll need to provide an explicit value for VPCRegion.

    Comment
        Any comments you want to include about the change being made.

    CLI Example::

        salt myminion boto3_route53.disassociate_vpc_from_hosted_zone \
                    Name=example.org. VPCName=myVPC \
                    VPCRegion=us-east-1 Comment="Whoops!  Don't wanna talk to this-here zone no more."

    '''
    if not _exactly_one((HostedZoneId, Name)):
        raise SaltInvocationError('Exactly one of either HostedZoneId or Name is required.')
    if not _exactly_one((VPCId, VPCName)):
        raise SaltInvocationError('Exactly one of either VPCId or VPCName is required.')
    if Name:
        # {'PrivateZone': True} because you can only associate VPCs with private hosted zones.
        args = {'Name': Name, 'PrivateZone': True, 'region': region,
                'key': key, 'keyid': keyid, 'profile': profile}
        zone = find_hosted_zone(**args)
        if not zone:
            log.error("Couldn't resolve domain name %s to a private hosted zone ID.", Name)
            return False
        HostedZoneId = zone[0]['HostedZone']['Id']
    vpcs = __salt__['boto_vpc.describe_vpcs'](vpc_id=VPCId, name=VPCName, region=region, key=key,
                                              keyid=keyid, profile=profile).get('vpcs', [])
    if VPCRegion and vpcs:
        vpcs = [v for v in vpcs if v['region'] == VPCRegion]
    if not vpcs:
        log.error('No VPC matching the given criteria found.')
        return False
    if len(vpcs) > 1:
        log.error('Multiple VPCs matching the given criteria found: %s.',
                  ', '.join([v['id'] for v in vpcs]))
        return False
    vpc = vpcs[0]
    if VPCName:
        VPCId = vpc['id']
    if not VPCRegion:
        VPCRegion = vpc['region']
    args = ({'HostedZoneId': HostedZoneId, 'VPC': {'VPCId': VPCId, 'VPCRegion': VPCRegion}})
    args.update({'Comment': Comment}) if Comment is not None else None

    conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
    tries = 10
    while tries:
        try:
            r = conn.disassociate_vpc_from_hosted_zone(**args)
            return _wait_for_sync(r['ChangeInfo']['Id'], conn)
        except ClientError as e:
            if tries and e.response.get('Error', {}).get('Code') == 'Throttling':
                log.debug('Throttled by AWS API.')
                time.sleep(3)
                tries -= 1
                continue
            log.error('Failed to associate VPC %s with hosted zone %s: %s',
                      VPCName or VPCId, Name or HostedZoneId, e)
    return False
Beispiel #10
0
def install(name=None, refresh=False, pkgs=None, saltenv='base', **kwargs):
    r'''
    Install the passed package(s) on the system using winrepo

    :param name:
        The name of a single package, or a comma-separated list of packages to
        install. (no spaces after the commas)
    :type name: str, list, or None

    :param bool refresh: Boolean value representing whether or not to refresh
        the winrepo db

    :param pkgs: A list of packages to install from a software repository.
        All packages listed under ``pkgs`` will be installed via a single
        command.
    :type pkgs: list or None

    :param str saltenv: The salt environment to use. Default is ``base``.

    *Keyword Arguments (kwargs)*

    :param str version:
        The specific version to install. If omitted, the latest version will be
        installed. If passed with multiple install, the version will apply to
        all packages. Recommended for single installation only.

    :param str cache_file:
        A single file to copy down for use with the installer. Copied to the
        same location as the installer. Use this over ``cache_dir`` if there
        are many files in the directory and you only need a specific file and
        don't want to cache additional files that may reside in the installer
        directory. Only applies to files on ``salt://``

    :param bool cache_dir:
        True will copy the contents of the installer directory. This is useful
        for installations that are not a single file. Only applies to
        directories on ``salt://``

    :return: Return a dict containing the new package names and versions::
    :rtype: dict

        If the package is installed by ``pkg.install``:

        .. code-block:: cfg

            {'<package>': {'old': '<old-version>',
                           'new': '<new-version>'}}

        If the package is already installed:

        .. code-block:: cfg

            {'<package>': {'current': '<current-version>'}}

    The following example will refresh the winrepo and install a single package,
    7zip.

    CLI Example:

    .. code-block:: bash

        salt '*' pkg.install 7zip refresh=True

    CLI Example:

    .. code-block:: bash

        salt '*' pkg.install 7zip
        salt '*' pkg.install 7zip,filezilla
        salt '*' pkg.install pkgs='["7zip","filezilla"]'

    WinRepo Definition File Examples:

    The following example demonstrates the use of ``cache_file``. This would be
    used if you have multiple installers in the same directory that use the same
    ``install.ini`` file and you don't want to download the additional
    installers.

    .. code-block:: bash

        ntp:
          4.2.8:
            installer: 'salt://win/repo/ntp/ntp-4.2.8-win32-setup.exe'
            full_name: Meinberg NTP Windows Client
            locale: en_US
            reboot: False
            cache_file: 'salt://win/repo/ntp/install.ini'
            install_flags: '/USEFILE=C:\salt\var\cache\salt\minion\files\base\win\repo\ntp\install.ini'
            uninstaller: 'NTP/uninst.exe'

    The following example demonstrates the use of ``cache_dir``. It assumes a
    file named ``install.ini`` resides in the same directory as the installer.

    .. code-block:: bash

        ntp:
          4.2.8:
            installer: 'salt://win/repo/ntp/ntp-4.2.8-win32-setup.exe'
            full_name: Meinberg NTP Windows Client
            locale: en_US
            reboot: False
            cache_dir: True
            install_flags: '/USEFILE=C:\salt\var\cache\salt\minion\files\base\win\repo\ntp\install.ini'
            uninstaller: 'NTP/uninst.exe'
    '''
    ret = {}
    if refresh:
        refresh_db(saltenv)

    # Make sure name or pkgs is passed
    if not name and not pkgs:
        return 'Must pass a single package or a list of packages'

    # Ignore pkg_type from parse_targets, Windows does not support the
    # "sources" argument
    pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs,
                                                        **kwargs)[0]

    if pkg_params is None or len(pkg_params) == 0:
        log.error('No package definition found')
        return {}

    if not pkgs and len(pkg_params) == 1:
        # Only use the 'version' param if 'name' was not specified as a
        # comma-separated list
        pkg_params = {
            name: {
                'version': kwargs.get('version'),
                'extra_install_flags': kwargs.get('extra_install_flags')
            }
        }

    # Get a list of currently installed software for comparison at the end
    old = list_pkgs(saltenv=saltenv)

    # Loop through each package
    changed = []
    latest = []
    for pkg_name, options in six.iteritems(pkg_params):

        # Load package information for the package
        pkginfo = _get_package_info(pkg_name, saltenv=saltenv)

        # Make sure pkginfo was found
        if not pkginfo:
            log.error('Unable to locate package {0}'.format(pkg_name))
            ret[pkg_name] = 'Unable to locate package {0}'.format(pkg_name)
            continue

        # Get the version number passed or the latest available
        version_num = ''
        if options:
            version_num = options.get('version', False)

        if not version_num:
            version_num = _get_latest_pkg_version(pkginfo)

        # Check if the version is already installed
        if version_num == old.get(pkg_name) \
                or (pkg_name in old and old[pkg_name] == 'Not Found'):
            # Desired version number already installed
            ret[pkg_name] = {'current': version_num}
            continue

        # If version number not installed, is the version available?
        elif version_num not in pkginfo:
            log.error('Version {0} not found for package '
                      '{1}'.format(version_num, pkg_name))
            ret[pkg_name] = {'not found': version_num}
            continue

        if 'latest' in pkginfo:
            latest.append(pkg_name)

        # Get the installer settings from winrepo.p
        installer = pkginfo[version_num].get('installer', False)
        cache_dir = pkginfo[version_num].get('cache_dir', False)
        cache_file = pkginfo[version_num].get('cache_file', False)

        # Is there an installer configured?
        if not installer:
            log.error('No installer configured for version {0} of package '
                      '{1}'.format(version_num, pkg_name))
            ret[pkg_name] = {'no installer': version_num}
            continue

        # Is the installer in a location that requires caching
        if installer.startswith(('salt:', 'http:', 'https:', 'ftp:')):

            # Check for the 'cache_dir' parameter in the .sls file
            # If true, the entire directory will be cached instead of the
            # individual file. This is useful for installations that are not
            # single files
            if cache_dir and installer.startswith('salt:'):
                path, _ = os.path.split(installer)
                __salt__['cp.cache_dir'](path, saltenv, False, None,
                                         '[email protected]$')

            # Check to see if the cache_file is cached... if passed
            if cache_file and cache_file.startswith('salt:'):

                # Check to see if the file is cached
                cached_file = __salt__['cp.is_cached'](cache_file, saltenv)
                if not cached_file:
                    cached_file = __salt__['cp.cache_file'](cache_file,
                                                            saltenv)

                # Make sure the cached file is the same as the source
                if __salt__['cp.hash_file'](cache_file, saltenv) != \
                        __salt__['cp.hash_file'](cached_file):
                    cached_file = __salt__['cp.cache_file'](cache_file,
                                                            saltenv)

                    # Check if the cache_file was cached successfully
                    if not cached_file:
                        log.error('Unable to cache {0}'.format(cache_file))
                        ret[pkg_name] = {
                            'failed to cache cache_file': cache_file
                        }
                        continue

            # Check to see if the installer is cached
            cached_pkg = __salt__['cp.is_cached'](installer, saltenv)
            if not cached_pkg:
                # It's not cached. Cache it, mate.
                cached_pkg = __salt__['cp.cache_file'](installer, saltenv)

                # Check if the installer was cached successfully
                if not cached_pkg:
                    log.error('Unable to cache file {0} '
                              'from saltenv: {1}'.format(installer, saltenv))
                    ret[pkg_name] = {'unable to cache': installer}
                    continue

            # Compare the hash of the cached installer to the source only if the
            # file is hosted on salt:
            if installer.startswith('salt:'):
                if __salt__['cp.hash_file'](installer, saltenv) != \
                        __salt__['cp.hash_file'](cached_pkg):
                    try:
                        cached_pkg = __salt__['cp.cache_file'](installer,
                                                               saltenv)
                    except MinionError as exc:
                        return '{0}: {1}'.format(exc, installer)

                    # Check if the installer was cached successfully
                    if not cached_pkg:
                        log.error('Unable to cache {0}'.format(installer))
                        ret[pkg_name] = {'unable to cache': installer}
                        continue
        else:
            # Run the installer directly (not hosted on salt:, https:, etc.)
            cached_pkg = installer

        # Fix non-windows slashes
        cached_pkg = cached_pkg.replace('/', '\\')
        cache_path, _ = os.path.split(cached_pkg)

        # Compare the hash sums
        source_hash = pkginfo[version_num].get('source_hash', False)
        if source_hash:
            source_sum = _get_source_sum(source_hash, cached_pkg, saltenv)
            log.debug('Source {0} hash: {1}'.format(source_sum['hash_type'],
                                                    source_sum['hsum']))

            cached_pkg_sum = salt.utils.get_hash(cached_pkg,
                                                 source_sum['hash_type'])
            log.debug('Package {0} hash: {1}'.format(source_sum['hash_type'],
                                                     cached_pkg_sum))

            if source_sum['hsum'] != cached_pkg_sum:
                raise SaltInvocationError(
                    ("Source hash '{0}' does not match package hash"
                     " '{1}'").format(source_sum['hsum'], cached_pkg_sum))
            log.debug('Source hash matches package hash.')

        # Get install flags
        install_flags = '{0}'.format(pkginfo[version_num].get('install_flags'))
        if options and options.get('extra_install_flags'):
            install_flags = '{0} {1}'.format(
                install_flags, options.get('extra_install_flags', ''))

        # Install the software
        # Check Use Scheduler Option
        if pkginfo[version_num].get('use_scheduler', False):

            # Build Scheduled Task Parameters
            if pkginfo[version_num].get('msiexec'):
                cmd = 'msiexec.exe'
                arguments = ['/i', cached_pkg]
                if pkginfo['version_num'].get('allusers', True):
                    arguments.append('ALLUSERS="1"')
                arguments.extend(salt.utils.shlex_split(install_flags))
            else:
                cmd = cached_pkg
                arguments = salt.utils.shlex_split(install_flags)

            # Create Scheduled Task
            __salt__['task.create_task'](name='update-salt-software',
                                         user_name='System',
                                         force=True,
                                         action_type='Execute',
                                         cmd=cmd,
                                         arguments=' '.join(arguments),
                                         start_in=cache_path,
                                         trigger_type='Once',
                                         start_date='1975-01-01',
                                         start_time='01:00')
            # Run Scheduled Task
            __salt__['task.run_wait'](name='update-salt-software')
        else:
            # Build the install command
            cmd = []
            if pkginfo[version_num].get('msiexec'):
                cmd.extend(['msiexec', '/i', cached_pkg])
                if pkginfo[version_num].get('allusers', True):
                    cmd.append('ALLUSERS="1"')
            else:
                cmd.append(cached_pkg)
            cmd.extend(salt.utils.shlex_split(install_flags))
            # Launch the command
            result = __salt__['cmd.run_all'](cmd,
                                             cache_path,
                                             output_loglevel='quiet',
                                             python_shell=False,
                                             redirect_stderr=True)
            if not result['retcode']:
                ret[pkg_name] = {'install status': 'success'}
                changed.append(pkg_name)
            elif result['retcode'] == 3010:
                # 3010 is ERROR_SUCCESS_REBOOT_REQUIRED
                ret[pkg_name] = {'install status': 'success, reboot required'}
                changed.append(pkg_name)
            else:
                log.error('Failed to install {0}'.format(pkg_name))
                log.error('retcode {0}'.format(result['retcode']))
                log.error('installer output: {0}'.format(result['stdout']))
                ret[pkg_name] = {'install status': 'failed'}

    # Get a new list of installed software
    new = list_pkgs(saltenv=saltenv)

    # For installers that have no specific version (ie: chrome)
    # The software definition file will have a version of 'latest'
    # In that case there's no way to know which version has been installed
    # Just return the current installed version
    if latest:
        for pkg_name in latest:
            if old.get(pkg_name, 'old') == new.get(pkg_name, 'new'):
                ret[pkg_name] = {'current': new[pkg_name]}

    # Check for changes in the registry
    difference = salt.utils.compare_dicts(old, new)

    # Compare the software list before and after
    # Add the difference to ret
    ret.update(difference)

    return ret
Beispiel #11
0
def create_hosted_zone(Name, VPCId=None, VPCName=None, VPCRegion=None, CallerReference=None,
                       Comment='', PrivateZone=False, DelegationSetId=None,
                       region=None, key=None, keyid=None, profile=None):
    '''
    Create a new Route53 Hosted Zone. Returns a Python data structure with information about the
    newly created Hosted Zone.

    Name
        The name of the domain. This should be a fully-specified domain, and should terminate with
        a period. This is the name you have registered with your DNS registrar. It is also the name
        you will delegate from your registrar to the Amazon Route 53 delegation servers returned in
        response to this request.

    VPCId
        When creating a private hosted zone, either the VPC ID or VPC Name to associate with is
        required.  Exclusive with VPCName.  Ignored if passed for a non-private zone.

    VPCName
        When creating a private hosted zone, either the VPC ID or VPC Name to associate with is
        required.  Exclusive with VPCId.  Ignored if passed for a non-private zone.

    VPCRegion
        When creating a private hosted zone, the region of the associated VPC is required.  If not
        provided, an effort will be made to determine it from VPCId or VPCName, if possible.  If
        this fails, you'll need to provide an explicit value for this option.  Ignored if passed for
        a non-private zone.

    CallerReference
        A unique string that identifies the request and that allows create_hosted_zone() calls to be
        retried without the risk of executing the operation twice.  This is a required parameter
        when creating new Hosted Zones.  Maximum length of 128.

    Comment
        Any comments you want to include about the hosted zone.

    PrivateZone
        Boolean - Set to True if creating a private hosted zone.

    DelegationSetId
        If you want to associate a reusable delegation set with this hosted zone, the ID that Amazon
        Route 53 assigned to the reusable delegation set when you created it.  Note that XXX TODO
        create_delegation_set() is not yet implemented, so you'd need to manually create any
        delegation sets before utilizing this.

    region
        Region endpoint to connect to.

    key
        AWS key to bind with.

    keyid
        AWS keyid to bind with.

    profile
        Dict, or pillar key pointing to a dict, containing AWS region/key/keyid.

    CLI Example::

        salt myminion boto3_route53.create_hosted_zone example.org.
    '''
    if not Name.endswith('.'):
        raise SaltInvocationError('Domain must be fully-qualified, complete with trailing period.')
    conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
    deets = find_hosted_zone(Name=Name, PrivateZone=PrivateZone,
                             region=region, key=key, keyid=keyid, profile=profile)
    if deets:
        log.info(
            'Route 53 hosted zone %s already exists. You may want to pass '
            'e.g. \'PrivateZone=True\' or similar...', Name
        )
        return None
    args = {
            'Name': Name,
            'CallerReference': CallerReference,
            'HostedZoneConfig': {
              'Comment': Comment,
              'PrivateZone': PrivateZone
            }
          }
    args.update({'DelegationSetId': DelegationSetId}) if DelegationSetId else None
    if PrivateZone:
        if not _exactly_one((VPCName, VPCId)):
            raise SaltInvocationError('Either VPCName or VPCId is required when creating a '
                                      'private zone.')
        vpcs = __salt__['boto_vpc.describe_vpcs'](
                vpc_id=VPCId, name=VPCName, region=region, key=key,
                keyid=keyid, profile=profile).get('vpcs', [])
        if VPCRegion and vpcs:
            vpcs = [v for v in vpcs if v['region'] == VPCRegion]
        if not vpcs:
            log.error('Private zone requested but no VPC matching given criteria found.')
            return None
        if len(vpcs) > 1:
            log.error(
                'Private zone requested but multiple VPCs matching given '
                'criteria found: %s.', [v['id'] for v in vpcs]
            )
            return None
        vpc = vpcs[0]
        if VPCName:
            VPCId = vpc['id']
        if not VPCRegion:
            VPCRegion = vpc['region']
        args.update({'VPC': {'VPCId': VPCId, 'VPCRegion': VPCRegion}})
    else:
        if any((VPCId, VPCName, VPCRegion)):
            log.info('Options VPCId, VPCName, and VPCRegion are ignored when creating '
                     'non-private zones.')
    tries = 10
    while tries:
        try:
            r = conn.create_hosted_zone(**args)
            r.pop('ResponseMetadata', None)
            if _wait_for_sync(r['ChangeInfo']['Id'], conn):
                return [r]
            return []
        except ClientError as e:
            if tries and e.response.get('Error', {}).get('Code') == 'Throttling':
                log.debug('Throttled by AWS API.')
                time.sleep(3)
                tries -= 1
                continue
            log.error('Failed to create hosted zone %s: %s', Name, e)
            return []
    return []
Beispiel #12
0
def readlink(path):
    '''
    Return the path that a symlink points to

    This is only supported on Windows Vista or later.

    Inline with Unix behavior, this function will raise an error if the path is
    not a symlink, however, the error raised will be a SaltInvocationError, not
    an OSError.

    CLI Example:

    .. code-block:: bash

        salt '*' file.readlink /path/to/link
    '''
    if sys.getwindowsversion().major < 6:
        raise SaltInvocationError(
            'Symlinks are only supported on Windows Vista or later.')

    if not os.path.isabs(path):
        raise SaltInvocationError('Path to link must be absolute.')

    reparse_data = _get_reparse_data(path)

    if not reparse_data:
        raise SaltInvocationError(
            'The path specified is not a reparse point (symlinks are a type of reparse point).'
        )

    # REPARSE_DATA_BUFFER structure - see
    # http://msdn.microsoft.com/en-us/library/ff552012.aspx

    # parse the structure header to work out which type of reparse point this is
    header_parser = struct.Struct('L')
    ReparseTag, = header_parser.unpack(reparse_data[:header_parser.size])
    # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365511.aspx
    if not ReparseTag & 0xA000FFFF == 0xA000000C:
        raise SaltInvocationError(
            'The path specified is not a symlink, but another type of reparse point (0x{0:X}).'
            .format(ReparseTag))

    # parse as a symlink reparse point structure (the structure for other
    # reparse points is different)
    data_parser = struct.Struct('LHHHHHHL')
    ReparseTag, ReparseDataLength, Reserved, SubstituteNameOffset, \
    SubstituteNameLength, PrintNameOffset, \
    PrintNameLength, Flags = data_parser.unpack(reparse_data[:data_parser.size])

    path_buffer_offset = data_parser.size
    absolute_substitute_name_offset = path_buffer_offset + SubstituteNameOffset
    target_bytes = reparse_data[
        absolute_substitute_name_offset:absolute_substitute_name_offset +
        SubstituteNameLength]
    target = target_bytes.decode('UTF-16')

    if target.startswith('\\??\\'):
        target = target[4:]

    try:
        # comes out in 8.3 form; convert it to LFN to make it look nicer
        target = win32file.GetLongPathName(target)
    except pywinerror as exc:
        # if file is not found (i.e. bad symlink), return it anyway like on *nix
        if exc.winerror == 2:
            return target
        raise

    return target
Beispiel #13
0
    def proc_run(self, msg):
        '''
        Execute the run in a dedicated process
        '''
        data = msg['pub']
        fn_ = os.path.join(self.proc_dir, data['jid'])
        self.opts['__ex_id'] = data['jid']
        salt.utils.process.daemonize_if(self.opts)

        salt.transport.jobber_stack = stack = self._setup_jobber_stack()
        # set up return destination from source
        src_estate, src_yard, src_share = msg['route']['src']
        salt.transport.jobber_estate_name = src_estate
        salt.transport.jobber_yard_name = src_yard

        sdata = {'pid': os.getpid()}
        sdata.update(data)
        with salt.utils.files.fopen(fn_, 'w+b') as fp_:
            fp_.write(self.serial.dumps(sdata))
        ret = {'success': False}
        function_name = data['fun']
        if function_name in self.modules.value:
            try:
                func = self.modules.value[data['fun']]
                args, kwargs = salt.minion.load_args_and_kwargs(
                    func,
                    salt.utils.args.parse_input(data['arg'],
                                                no_parse=data.get(
                                                    'no_parse', [])), data)
                sys.modules[func.__module__].__context__['retcode'] = 0

                executors = data.get('module_executors') or self.opts.get(
                    'module_executors', ['direct_call'])
                if isinstance(executors, six.string_types):
                    executors = [executors]
                elif not isinstance(executors, list) or not executors:
                    raise SaltInvocationError(
                        "Wrong executors specification: {0}. String or non-empty list expected"
                        .format(executors))
                if self.opts.get('sudo_user', '') and executors[-1] != 'sudo':
                    executors[-1] = 'sudo.get'  # replace
                log.trace("Executors list {0}".format(executors))

                for name in executors:
                    if name not in self.module_executors.value:
                        raise SaltInvocationError(
                            "Executor '{0}' is not available".format(name))
                    return_data = self.module_executors.value[name].execute(
                        self.opts, data, func, args, kwargs)
                    if return_data is not None:
                        break

                if isinstance(return_data, types.GeneratorType):
                    ind = 0
                    iret = {}
                    for single in return_data:
                        if isinstance(single, dict) and isinstance(iret, list):
                            iret.update(single)
                        else:
                            if not iret:
                                iret = []
                            iret.append(single)
                        tag = tagify(
                            [data['jid'], 'prog', self.opts['id'],
                             str(ind)], 'job')
                        event_data = {'return': single}
                        self._fire_master(event_data,
                                          tag)  # Need to look into this
                        ind += 1
                    ret['return'] = iret
                else:
                    ret['return'] = return_data
                ret['retcode'] = sys.modules[func.__module__].__context__.get(
                    'retcode', 0)
                ret['success'] = True
            except CommandNotFoundError as exc:
                msg = 'Command required for \'{0}\' not found'.format(
                    function_name)
                log.debug(msg, exc_info=True)
                ret['return'] = '{0}: {1}'.format(msg, exc)
            except CommandExecutionError as exc:
                log.error('A command in \'{0}\' had a problem: {1}'.format(
                    function_name, exc),
                          exc_info_on_loglevel=logging.DEBUG)
                ret['return'] = 'ERROR: {0}'.format(exc)
            except SaltInvocationError as exc:
                log.error('Problem executing \'{0}\': {1}'.format(
                    function_name, exc),
                          exc_info_on_loglevel=logging.DEBUG)
                ret['return'] = 'ERROR executing \'{0}\': {1}'.format(
                    function_name, exc)
            except TypeError as exc:
                msg = ('TypeError encountered executing {0}: {1}. See '
                       'debug log for more info.').format(function_name, exc)
                log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
                ret['return'] = msg
            except Exception:
                msg = 'The minion function caused an exception'
                log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
                ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
        else:
            ret['return'] = '\'{0}\' is not available.'.format(function_name)

        ret['jid'] = data['jid']
        ret['fun'] = data['fun']
        ret['fun_args'] = data['arg']
        self._return_pub(msg, ret, stack)
        if data['ret']:
            ret['id'] = self.opts['id']
            for returner in set(data['ret'].split(',')):
                try:
                    self.returners.value['{0}.returner'.format(returner)](ret)
                except Exception as exc:
                    log.error('The return failed for job {0} {1}'.format(
                        data['jid'], exc))
        console.concise("Closing Jobber Stack {0}\n".format(stack.name))
        stack.server.close()
        salt.transport.jobber_stack = None
Beispiel #14
0
def orchestrate(mods,
                saltenv='base',
                test=None,
                exclude=None,
                pillar=None,
                pillarenv=None,
                pillar_enc=None,
                orchestration_jid=None):
    '''
    .. versionadded:: 0.17.0

    Execute a state run from the master, used as a powerful orchestration
    system.

    .. seealso:: More Orchestrate documentation

        * :ref:`Full Orchestrate Tutorial <orchestrate-runner>`
        * :py:mod:`Docs for the master-side state module <salt.states.saltmod>`

    CLI Examples:

    .. code-block:: bash

        salt-run state.orchestrate webserver
        salt-run state.orchestrate webserver saltenv=dev test=True
        salt-run state.orchestrate webserver saltenv=dev pillarenv=aws

    .. versionchanged:: 2014.1.1

        Runner renamed from ``state.sls`` to ``state.orchestrate``

    .. versionchanged:: 2014.7.0

        Runner uses the pillar variable

    .. versionchanged:: develop

        Runner uses the pillar_enc variable that allows renderers to render the pillar.
        This is usable when supplying the contents of a file as pillar, and the file contains
        gpg-encrypted entries.

    .. seealso:: GPG renderer documentation

    CLI Examples:

    .. code-block:: bash

       salt-run state.orchestrate webserver pillar_enc=gpg pillar="$(cat somefile.json)"

    '''
    if pillar is not None and not isinstance(pillar, dict):
        raise SaltInvocationError(
            'Pillar data must be formatted as a dictionary')
    __opts__['file_client'] = 'local'
    minion = salt.minion.MasterMinion(__opts__)

    if pillarenv is None and 'pillarenv' in __opts__:
        pillarenv = __opts__['pillarenv']
    if saltenv is None and 'saltenv' in __opts__:
        saltenv = __opts__['saltenv']

    running = minion.functions['state.sls'](
        mods,
        test,
        exclude,
        pillar=pillar,
        saltenv=saltenv,
        pillarenv=pillarenv,
        pillar_enc=pillar_enc,
        orchestration_jid=orchestration_jid)
    ret = {'data': {minion.opts['id']: running}, 'outputter': 'highstate'}
    res = __utils__['state.check_result'](ret['data'])
    if res:
        ret['retcode'] = 0
    else:
        ret['retcode'] = 1
    return ret
Beispiel #15
0
def present(name,
            launch_config_name,
            availability_zones,
            min_size,
            max_size,
            launch_config=None,
            desired_capacity=None,
            load_balancers=None,
            default_cooldown=None,
            health_check_type=None,
            health_check_period=None,
            placement_group=None,
            vpc_zone_identifier=None,
            subnet_names=None,
            tags=None,
            termination_policies=None,
            termination_policies_from_pillar='boto_asg_termination_policies',
            suspended_processes=None,
            scaling_policies=None,
            scaling_policies_from_pillar='boto_asg_scaling_policies',
            scheduled_actions=None,
            scheduled_actions_from_pillar='boto_asg_scheduled_actions',
            alarms=None,
            alarms_from_pillar='boto_asg_alarms',
            region=None,
            key=None,
            keyid=None,
            profile=None,
            notification_arn=None,
            notification_arn_from_pillar='boto_asg_notification_arn',
            notification_types=None,
            notification_types_from_pillar='boto_asg_notification_types'):
    '''
    Ensure the autoscale group exists.

    name
        Name of the autoscale group.

    launch_config_name
        Name of the launch config to use for the group.  Or, if
        ``launch_config`` is specified, this will be the launch config
        name's prefix.  (see below)

    launch_config
        A dictionary of launch config attributes.  If specified, a
        launch config will be used or created, matching this set
        of attributes, and the autoscale group will be set to use
        that launch config.  The launch config name will be the
        ``launch_config_name`` followed by a hyphen followed by a hash
        of the ``launch_config`` dict contents.
        Example:

        .. code-block:: yaml

            my_asg:
              boto_asg.present:
              - launch_config:
                - ebs_optimized: false
                - instance_profile_name: my_iam_profile
                - kernel_id: ''
                - ramdisk_id: ''
                - key_name: my_ssh_key
                - image_name: aws2015091-hvm
                - instance_type: c3.xlarge
                - instance_monitoring: false
                - security_groups:
                  - my_sec_group_01
                  - my_sec_group_02

    availability_zones
        List of availability zones for the group.

    min_size
        Minimum size of the group.

    max_size
        Maximum size of the group.

    desired_capacity
        The desired capacity of the group.

    load_balancers
        List of load balancers for the group. Once set this can not be
        updated (Amazon restriction).

    default_cooldown
        Number of seconds after a Scaling Activity completes before any further
        scaling activities can start.

    health_check_type
        The service you want the health status from, Amazon EC2 or Elastic Load
        Balancer (EC2 or ELB).

    health_check_period
        Length of time in seconds after a new EC2 instance comes into service
        that Auto Scaling starts checking its health.

    placement_group
        Physical location of your cluster placement group created in Amazon
        EC2. Once set this can not be updated (Amazon restriction).

    vpc_zone_identifier
        A list of the subnet identifiers of the Virtual Private Cloud.

    subnet_names
        For VPC, a list of subnet names (NOT subnet IDs) to deploy into.
        Exclusive with vpc_zone_identifier.

    tags
        A list of tags. Example:

        .. code-block:: yaml

            - key: 'key'
              value: 'value'
              propagate_at_launch: true

    termination_policies
        A list of termination policies. Valid values are:

        * ``OldestInstance``
        * ``NewestInstance``
        * ``OldestLaunchConfiguration``
        * ``ClosestToNextInstanceHour``
        * ``Default``

        If no value is specified, the ``Default`` value is used.

    termination_policies_from_pillar:
        name of pillar dict that contains termination policy settings.   Termination policies
        defined for this specific state will override those from pillar.

    suspended_processes
        List of processes to be suspended. see
        http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US_SuspendResume.html

    scaling_policies
        List of scaling policies.  Each policy is a dict of key-values described by
        https://boto.readthedocs.io/en/latest/ref/autoscale.html#boto.ec2.autoscale.policy.ScalingPolicy

    scaling_policies_from_pillar:
        name of pillar dict that contains scaling policy settings.   Scaling policies defined for
        this specific state will override those from pillar.

    scheduled_actions:
        a dictionary of scheduled actions. Each key is the name of scheduled action and each value
        is dictionary of options. For example:

        .. code-block:: yaml

            - scheduled_actions:
                scale_up_at_10:
                    desired_capacity: 4
                    min_size: 3
                    max_size: 5
                    recurrence: "0 9 * * 1-5"
                scale_down_at_7:
                    desired_capacity: 1
                    min_size: 1
                    max_size: 1
                    recurrence: "0 19 * * 1-5"

    scheduled_actions_from_pillar:
        name of pillar dict that contains scheduled_actions settings. Scheduled actions
        for this specific state will override those from pillar.

    alarms:
        a dictionary of name->boto_cloudwatch_alarm sections to be associated with this ASG.
        All attributes should be specified except for dimension which will be
        automatically set to this ASG.

        See the :mod:`salt.states.boto_cloudwatch_alarm` state for information
        about these attributes.

        If any alarm actions include  ":self:" this will be replaced with the asg name.
        For example, alarm_actions reading "['scaling_policy:self:ScaleUp']" will
        map to the arn for this asg's scaling policy named "ScaleUp".
        In addition, any alarms that have only scaling_policy as actions will be ignored if
        min_size is equal to max_size for this ASG.

    alarms_from_pillar:
        name of pillar dict that contains alarm settings.   Alarms defined for this specific
        state will override those from pillar.

    region
        The region to connect to.

    key
        Secret key to be used.

    keyid
        Access key to be used.

    profile
        A dict with region, key and keyid, or a pillar key (string)
        that contains a dict with region, key and keyid.

    notification_arn
        The AWS arn that notifications will be sent to

    notification_arn_from_pillar
        name of the pillar dict that contains ``notifcation_arn`` settings.  A
        ``notification_arn`` defined for this specific state will override the
        one from pillar.

    notification_types
        A list of event names that will trigger a notification.  The list of valid
        notification types is:

        * ``autoscaling:EC2_INSTANCE_LAUNCH``
        * ``autoscaling:EC2_INSTANCE_LAUNCH_ERROR``
        * ``autoscaling:EC2_INSTANCE_TERMINATE``
        * ``autoscaling:EC2_INSTANCE_TERMINATE_ERROR``
        * ``autoscaling:TEST_NOTIFICATION``

    notification_types_from_pillar
        name of the pillar dict that contains ``notifcation_types`` settings.
        ``notification_types`` defined for this specific state will override those
        from the pillar.
    '''
    if vpc_zone_identifier and subnet_names:
        raise SaltInvocationError('vpc_zone_identifier and subnet_names are '
                                  'mutually exclusive options.')
    ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
    if subnet_names:
        vpc_zone_identifier = []
        for i in subnet_names:
            r = __salt__['boto_vpc.get_resource_id']('subnet',
                                                     name=i,
                                                     region=region,
                                                     key=key,
                                                     keyid=keyid,
                                                     profile=profile)
            if 'error' in r:
                ret['comment'] = 'Error looking up subnet ids: {0}'.format(
                    r['error'])
                ret['result'] = False
                return ret
            if 'id' not in r:
                ret['comment'] = 'Subnet {0} does not exist.'.format(i)
                ret['result'] = False
                return ret
            vpc_zone_identifier.append(r['id'])
    if vpc_zone_identifier:
        vpc_id = __salt__['boto_vpc.get_subnet_association'](
            vpc_zone_identifier, region, key, keyid, profile)
        vpc_id = vpc_id.get('vpc_id')
        log.debug(
            'Auto Scaling Group {0} is associated with VPC ID {1}'.format(
                name, vpc_id))
    else:
        vpc_id = None
        log.debug('Auto Scaling Group {0} has no VPC Association'.format(name))
    # if launch_config is defined, manage the launch config first.
    # hash the launch_config dict to create a unique name suffix and then
    # ensure it is present
    if launch_config:
        launch_config_name = launch_config_name + '-' + hashlib.md5(
            str(launch_config)).hexdigest()
        args = {
            'name': launch_config_name,
            'region': region,
            'key': key,
            'keyid': keyid,
            'profile': profile
        }

        for index, item in enumerate(launch_config):
            if 'image_name' in item:
                image_name = item['image_name']
                iargs = {
                    'ami_name': image_name,
                    'region': region,
                    'key': key,
                    'keyid': keyid,
                    'profile': profile
                }
                image_ids = __salt__['boto_ec2.find_images'](**iargs)
                if len(image_ids):
                    launch_config[index]['image_id'] = image_ids[0]
                else:
                    launch_config[index]['image_id'] = image_name
                del launch_config[index]['image_name']
                break

        if vpc_id:
            log.debug('Auto Scaling Group {0} is a associated with a vpc')
            # locate the security groups attribute of a launch config
            sg_index = None
            for index, item in enumerate(launch_config):
                if 'security_groups' in item:
                    sg_index = index
                    break
            # if security groups exist within launch_config then convert
            # to group ids
            if sg_index is not None:
                log.debug('security group associations found in launch config')
                _group_ids = __salt__['boto_secgroup.convert_to_group_ids'](
                    launch_config[sg_index]['security_groups'],
                    vpc_id=vpc_id,
                    region=region,
                    key=key,
                    keyid=keyid,
                    profile=profile)
                launch_config[sg_index]['security_groups'] = _group_ids

        for d in launch_config:
            args.update(d)
        if not __opts__['test']:
            lc_ret = __states__['boto_lc.present'](**args)
            if lc_ret['result'] is True and lc_ret['changes']:
                if 'launch_config' not in ret['changes']:
                    ret['changes']['launch_config'] = {}
                ret['changes']['launch_config'] = lc_ret['changes']

    asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile)
    termination_policies = _determine_termination_policies(
        termination_policies, termination_policies_from_pillar)
    scaling_policies = _determine_scaling_policies(
        scaling_policies, scaling_policies_from_pillar)
    scheduled_actions = _determine_scheduled_actions(
        scheduled_actions, scheduled_actions_from_pillar)
    if asg is None:
        ret['result'] = False
        ret['comment'] = 'Failed to check autoscale group existence.'
    elif not asg:
        if __opts__['test']:
            msg = 'Autoscale group set to be created.'
            ret['comment'] = msg
            ret['result'] = None
            return ret
        notification_arn, notification_types = _determine_notification_info(
            notification_arn, notification_arn_from_pillar, notification_types,
            notification_types_from_pillar)
        created = __salt__['boto_asg.create'](
            name, launch_config_name, availability_zones, min_size, max_size,
            desired_capacity, load_balancers, default_cooldown,
            health_check_type, health_check_period, placement_group,
            vpc_zone_identifier, tags, termination_policies,
            suspended_processes, scaling_policies, scheduled_actions, region,
            notification_arn, notification_types, key, keyid, profile)
        if created:
            ret['changes']['old'] = None
            asg = __salt__['boto_asg.get_config'](name, region, key, keyid,
                                                  profile)
            ret['changes']['new'] = asg
        else:
            ret['result'] = False
            ret['comment'] = 'Failed to create autoscale group'
    else:
        need_update = False
        # If any of these attributes can't be modified after creation
        # time, we should remove them from the dict.
        if scaling_policies:
            for policy in scaling_policies:
                if 'min_adjustment_step' not in policy:
                    policy['min_adjustment_step'] = None
        if scheduled_actions:
            for s_name, action in six.iteritems(scheduled_actions):
                if 'end_time' not in action:
                    action['end_time'] = None
        config = {
            'launch_config_name': launch_config_name,
            'availability_zones': availability_zones,
            'min_size': min_size,
            'max_size': max_size,
            'desired_capacity': desired_capacity,
            'default_cooldown': default_cooldown,
            'health_check_type': health_check_type,
            'health_check_period': health_check_period,
            'vpc_zone_identifier': vpc_zone_identifier,
            'tags': tags,
            'termination_policies': termination_policies,
            'suspended_processes': suspended_processes,
            'scaling_policies': scaling_policies,
            'scheduled_actions': scheduled_actions
        }
        #ensure that we reset termination_policies to default if none are specified
        if not termination_policies:
            config['termination_policies'] = ['Default']
        if suspended_processes is None:
            config['suspended_processes'] = []
        # ensure that we delete scaling_policies if none are specified
        if scaling_policies is None:
            config['scaling_policies'] = []
        # ensure that we delete scheduled_actions if none are specified
        if scheduled_actions is None:
            config['scheduled_actions'] = {}
        # allow defaults on start_time
        for s_name, action in six.iteritems(scheduled_actions):
            if 'start_time' not in action:
                asg_action = asg['scheduled_actions'].get(s_name, {})
                if 'start_time' in asg_action:
                    del asg_action['start_time']
        # note: do not loop using "key, value" - this can modify the value of
        # the aws access key
        for asg_property, value in six.iteritems(config):
            # Only modify values being specified; introspection is difficult
            # otherwise since it's hard to track default values, which will
            # always be returned from AWS.
            if value is None:
                continue
            if asg_property in asg:
                _value = asg[asg_property]
                if not _recursive_compare(value, _value):
                    log_msg = '{0} asg_property differs from {1}'
                    log.debug(log_msg.format(value, _value))
                    need_update = True
                    break
        if need_update:
            if __opts__['test']:
                msg = 'Autoscale group set to be updated.'
                ret['comment'] = msg
                ret['result'] = None
                return ret
            # add in alarms
            notification_arn, notification_types = _determine_notification_info(
                notification_arn, notification_arn_from_pillar,
                notification_types, notification_types_from_pillar)
            updated, msg = __salt__['boto_asg.update'](
                name,
                launch_config_name,
                availability_zones,
                min_size,
                max_size,
                desired_capacity=desired_capacity,
                load_balancers=load_balancers,
                default_cooldown=default_cooldown,
                health_check_type=health_check_type,
                health_check_period=health_check_period,
                placement_group=placement_group,
                vpc_zone_identifier=vpc_zone_identifier,
                tags=tags,
                termination_policies=termination_policies,
                suspended_processes=suspended_processes,
                scaling_policies=scaling_policies,
                scheduled_actions=scheduled_actions,
                region=region,
                notification_arn=notification_arn,
                notification_types=notification_types,
                key=key,
                keyid=keyid,
                profile=profile)
            if asg['launch_config_name'] != launch_config_name:
                # delete the old launch_config_name
                deleted = __salt__['boto_asg.delete_launch_configuration'](
                    asg['launch_config_name'],
                    region=region,
                    key=key,
                    keyid=keyid,
                    profile=profile)
                if deleted:
                    if 'launch_config' not in ret['changes']:
                        ret['changes']['launch_config'] = {}
                    ret['changes']['launch_config']['deleted'] = asg[
                        'launch_config_name']
            if updated:
                ret['changes']['old'] = asg
                asg = __salt__['boto_asg.get_config'](name, region, key, keyid,
                                                      profile)
                ret['changes']['new'] = asg
                ret['comment'] = 'Updated autoscale group.'
            else:
                ret['result'] = False
                ret['comment'] = msg
        else:
            ret['comment'] = 'Autoscale group present.'
    # add in alarms
    _ret = _alarms_present(name, min_size == max_size, alarms,
                           alarms_from_pillar, region, key, keyid, profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
    return ret
Beispiel #16
0
def get_resource_records(HostedZoneId=None, Name=None, StartRecordName=None,
                         StartRecordType=None, PrivateZone=None,
                         region=None, key=None, keyid=None, profile=None):
    '''
    Get all resource records from a given zone matching the provided StartRecordName (if given) or all
    records in the zone (if not), optionally filtered by a specific StartRecordType.  This will return
    any and all RRs matching, regardless of their special AWS flavors (weighted, geolocation, alias,
    etc.) so your code should be prepared for potentially large numbers of records back from this
    function - for example, if you've created a complex geolocation mapping with lots of entries all
    over the world providing the same server name to many different regional clients.

    If you want EXACTLY ONE record to operate on, you'll need to implement any logic required to
    pick the specific RR you care about from those returned.

    Note that if you pass in Name without providing a value for PrivateZone (either True or
    False), CommandExecutionError can be raised in the case of both public and private zones
    matching the domain. XXX FIXME DOCU

    CLI example::

        salt myminion boto3_route53.get_records test.example.org example.org A
    '''
    if not _exactly_one((HostedZoneId, Name)):
        raise SaltInvocationError('Exactly one of either HostedZoneId or Name must '
                                  'be provided.')
    if Name:
        args = {'Name': Name, 'region': region, 'key': key, 'keyid': keyid,
                'profile': profile}
        args.update({'PrivateZone': PrivateZone}) if PrivateZone is not None else None
        zone = find_hosted_zone(**args)
        if not zone:
            log.error("Couldn't resolve domain name %s to a hosted zone ID.", Name)
            return []
        HostedZoneId = zone[0]['HostedZone']['Id']

    conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
    ret = []
    next_rr_name = StartRecordName
    next_rr_type = StartRecordType
    next_rr_id = None
    done = False
    while True:
        if done:
            return ret
        args = {'HostedZoneId': HostedZoneId}
        args.update({'StartRecordName': next_rr_name}) if next_rr_name else None
        # Grrr, can't specify type unless name is set...  We'll do this via filtering later instead
        args.update({'StartRecordType': next_rr_type}) if next_rr_name and next_rr_type else None
        args.update({'StartRecordIdentifier': next_rr_id}) if next_rr_id else None
        try:
            r = conn.list_resource_record_sets(**args)
            rrs = r['ResourceRecordSets']
            next_rr_name = r.get('NextRecordName')
            next_rr_type = r.get('NextRecordType')
            next_rr_id = r.get('NextRecordIdentifier')
            for rr in rrs:
                if StartRecordName and rr['Name'] != StartRecordName:
                    done = True
                    break
                if StartRecordType and rr['Type'] != StartRecordType:
                    if StartRecordName:
                        done = True
                        break
                    else:
                        # We're filtering by type alone, and there might be more later, so...
                        continue
                ret += [rr]
            if not next_rr_name:
                done = True
        except ClientError as e:
            # Try forever on a simple thing like this...
            if e.response.get('Error', {}).get('Code') == 'Throttling':
                log.debug('Throttled by AWS API.')
                time.sleep(3)
                continue
            raise e
Beispiel #17
0
def thread_return(cls, minion_instance, opts, data):
    """
    This method should be used as a threading target, start the actual
    minion side execution.
    """
    fn_ = os.path.join(minion_instance.proc_dir, data["jid"])

    salt.utils.process.appendproctitle("{}._thread_return {}".format(
        cls.__name__, data["jid"]))

    sdata = {"pid": os.getpid()}
    sdata.update(data)
    log.info("Starting a new job with PID %s", sdata["pid"])
    with salt.utils.files.fopen(fn_, "w+b") as fp_:
        fp_.write(salt.payload.dumps(sdata))
    ret = {"success": False}
    function_name = data["fun"]
    executors = (data.get("module_executors")
                 or getattr(minion_instance, "module_executors", [])
                 or opts.get("module_executors", ["direct_call"]))
    allow_missing_funcs = any([
        minion_instance.executors["{}.allow_missing_func".format(executor)](
            function_name) for executor in executors if
        "{}.allow_missing_func".format(executor) in minion_instance.executors
    ])
    if function_name in minion_instance.functions or allow_missing_funcs is True:
        try:
            minion_blackout_violation = False
            if minion_instance.connected and minion_instance.opts[
                    "pillar"].get("minion_blackout", False):
                whitelist = minion_instance.opts["pillar"].get(
                    "minion_blackout_whitelist", [])
                # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
                if (function_name != "saltutil.refresh_pillar"
                        and function_name not in whitelist):
                    minion_blackout_violation = True
            # use minion_blackout_whitelist from grains if it exists
            if minion_instance.opts["grains"].get("minion_blackout", False):
                whitelist = minion_instance.opts["grains"].get(
                    "minion_blackout_whitelist", [])
                if (function_name != "saltutil.refresh_pillar"
                        and function_name not in whitelist):
                    minion_blackout_violation = True
            if minion_blackout_violation:
                raise SaltInvocationError(
                    "Minion in blackout mode. Set 'minion_blackout' "
                    "to False in pillar or grains to resume operations. Only "
                    "saltutil.refresh_pillar allowed in blackout mode.")

            if function_name in minion_instance.functions:
                func = minion_instance.functions[function_name]
                args, kwargs = salt.minion.load_args_and_kwargs(
                    func, data["arg"], data)
            else:
                # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True
                func = function_name
                args, kwargs = data["arg"], data
            minion_instance.functions.pack["__context__"]["retcode"] = 0
            if isinstance(executors, str):
                executors = [executors]
            elif not isinstance(executors, list) or not executors:
                raise SaltInvocationError(
                    "Wrong executors specification: {}. String or non-empty list"
                    " expected".format(executors))
            if opts.get("sudo_user", "") and executors[-1] != "sudo":
                executors[-1] = "sudo"  # replace the last one with sudo
            log.trace("Executors list %s", executors)  # pylint: disable=no-member

            for name in executors:
                fname = "{}.execute".format(name)
                if fname not in minion_instance.executors:
                    raise SaltInvocationError(
                        "Executor '{}' is not available".format(name))
                return_data = minion_instance.executors[fname](opts, data,
                                                               func, args,
                                                               kwargs)
                if return_data is not None:
                    break

            if isinstance(return_data, types.GeneratorType):
                ind = 0
                iret = {}
                for single in return_data:
                    if isinstance(single, dict) and isinstance(iret, dict):
                        iret.update(single)
                    else:
                        if not iret:
                            iret = []
                        iret.append(single)
                    tag = tagify([data["jid"], "prog", opts["id"],
                                  str(ind)], "job")
                    event_data = {"return": single}
                    minion_instance._fire_master(event_data, tag)
                    ind += 1
                ret["return"] = iret
            else:
                ret["return"] = return_data

            retcode = minion_instance.functions.pack["__context__"].get(
                "retcode", salt.defaults.exitcodes.EX_OK)
            if retcode == salt.defaults.exitcodes.EX_OK:
                # No nonzero retcode in __context__ dunder. Check if return
                # is a dictionary with a "result" or "success" key.
                try:
                    func_result = all(
                        return_data.get(x, True)
                        for x in ("result", "success"))
                except Exception:  # pylint: disable=broad-except
                    # return data is not a dict
                    func_result = True
                if not func_result:
                    retcode = salt.defaults.exitcodes.EX_GENERIC

            ret["retcode"] = retcode
            ret["success"] = retcode == salt.defaults.exitcodes.EX_OK
        except CommandNotFoundError as exc:
            msg = "Command required for '{}' not found".format(function_name)
            log.debug(msg, exc_info=True)
            ret["return"] = "{}: {}".format(msg, exc)
            ret["out"] = "nested"
            ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
        except CommandExecutionError as exc:
            log.error(
                "A command in '%s' had a problem: %s",
                function_name,
                exc,
                exc_info_on_loglevel=logging.DEBUG,
            )
            ret["return"] = "ERROR: {}".format(exc)
            ret["out"] = "nested"
            ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
        except SaltInvocationError as exc:
            log.error(
                "Problem executing '%s': %s",
                function_name,
                exc,
                exc_info_on_loglevel=logging.DEBUG,
            )
            ret["return"] = "ERROR executing '{}': {}".format(
                function_name, exc)
            ret["out"] = "nested"
            ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
        except TypeError as exc:
            msg = "Passed invalid arguments to {}: {}\n{}".format(
                function_name, exc, func.__doc__ or "")
            log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
            ret["return"] = msg
            ret["out"] = "nested"
            ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
        except Exception:  # pylint: disable=broad-except
            msg = "The minion function caused an exception"
            log.warning(msg, exc_info=True)
            salt.utils.error.fire_exception(salt.exceptions.MinionError(msg),
                                            opts,
                                            job=data)
            ret["return"] = "{}: {}".format(msg, traceback.format_exc())
            ret["out"] = "nested"
            ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
    else:
        docs = minion_instance.functions["sys.doc"](
            "{}*".format(function_name))
        if docs:
            docs[function_name] = minion_instance.functions.missing_fun_string(
                function_name)
            ret["return"] = docs
        else:
            ret["return"] = minion_instance.functions.missing_fun_string(
                function_name)
            mod_name = function_name.split(".")[0]
            if mod_name in minion_instance.function_errors:
                ret["return"] += " Possible reasons: '{}'".format(
                    minion_instance.function_errors[mod_name])
        ret["success"] = False
        ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
        ret["out"] = "nested"

    ret["jid"] = data["jid"]
    ret["fun"] = data["fun"]
    ret["fun_args"] = data["arg"]
    if "master_id" in data:
        ret["master_id"] = data["master_id"]
    if "metadata" in data:
        if isinstance(data["metadata"], dict):
            ret["metadata"] = data["metadata"]
        else:
            log.warning(
                "The metadata parameter must be a dictionary. Ignoring.")
    if minion_instance.connected:
        minion_instance._return_pub(
            ret, timeout=minion_instance._return_retry_timer())

    # Add default returners from minion config
    # Should have been coverted to comma-delimited string already
    if isinstance(opts.get("return"), str):
        if data["ret"]:
            data["ret"] = ",".join((data["ret"], opts["return"]))
        else:
            data["ret"] = opts["return"]

    log.debug("minion return: %s", ret)
    # TODO: make a list? Seems odd to split it this late :/
    if data["ret"] and isinstance(data["ret"], str):
        if "ret_config" in data:
            ret["ret_config"] = data["ret_config"]
        if "ret_kwargs" in data:
            ret["ret_kwargs"] = data["ret_kwargs"]
        ret["id"] = opts["id"]
        for returner in set(data["ret"].split(",")):
            try:
                returner_str = "{}.returner".format(returner)
                if returner_str in minion_instance.returners:
                    minion_instance.returners[returner_str](ret)
                else:
                    returner_err = minion_instance.returners.missing_fun_string(
                        returner_str)
                    log.error(
                        "Returner %s could not be loaded: %s",
                        returner_str,
                        returner_err,
                    )
            except Exception as exc:  # pylint: disable=broad-except
                log.exception("The return failed for job %s: %s", data["jid"],
                              exc)
Beispiel #18
0
def change_resource_record_sets(HostedZoneId=None, Name=None,
                                PrivateZone=None, ChangeBatch=None,
                                region=None, key=None, keyid=None, profile=None):
    '''
    Ugh!!!  Not gonna try to reproduce and validatethis mess in here - just pass what we get to AWS
    and let it decide if it's valid or not...

    See the `AWS Route53 API docs`__ as well as the `Boto3 documentation`__ for all the details...

    .. __: https://docs.aws.amazon.com/Route53/latest/APIReference/API_ChangeResourceRecordSets.html
    .. __: http://boto3.readthedocs.io/en/latest/reference/services/route53.html#Route53.Client.change_resource_record_sets

    The syntax for a ChangeBatch parameter is as follows, but note that the permutations of allowed
    parameters and combinations thereof are quite varied, so perusal of the above linked docs is
    highly recommended for any non-trival configurations.

    .. code-block:: json
    ChangeBatch={
        'Comment': 'string',
        'Changes': [
            {
                'Action': 'CREATE'|'DELETE'|'UPSERT',
                'ResourceRecordSet': {
                    'Name': 'string',
                    'Type': 'SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'AAAA',
                    'SetIdentifier': 'string',
                    'Weight': 123,
                    'Region': 'us-east-1'|'us-east-2'|'us-west-1'|'us-west-2'|'ca-central-1'|'eu-west-1'|'eu-west-2'|'eu-central-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'ap-northeast-2'|'sa-east-1'|'cn-north-1'|'ap-south-1',
                    'GeoLocation': {
                        'ContinentCode': 'string',
                        'CountryCode': 'string',
                        'SubdivisionCode': 'string'
                    },
                    'Failover': 'PRIMARY'|'SECONDARY',
                    'TTL': 123,
                    'ResourceRecords': [
                        {
                            'Value': 'string'
                        },
                    ],
                    'AliasTarget': {
                        'HostedZoneId': 'string',
                        'DNSName': 'string',
                        'EvaluateTargetHealth': True|False
                    },
                    'HealthCheckId': 'string',
                    'TrafficPolicyInstanceId': 'string'
                }
            },
        ]
    }

    CLI Example:

    .. code-block:: bash

        foo='{
               "Name": "my-cname.example.org.",
               "TTL": 600,
               "Type": "CNAME",
               "ResourceRecords": [
                 {
                   "Value": "my-host.example.org"
                 }
               ]
             }'
        foo=`echo $foo`  # Remove newlines
        salt myminion boto3_route53.change_resource_record_sets DomainName=example.org. \
                keyid=A1234567890ABCDEF123 key=xblahblahblah \
                ChangeBatch="{'Changes': [{'Action': 'UPSERT', 'ResourceRecordSet': $foo}]}"
    '''
    if not _exactly_one((HostedZoneId, Name)):
        raise SaltInvocationError('Exactly one of either HostZoneId or Name must be provided.')
    if Name:
        args = {'Name': Name, 'region': region, 'key': key, 'keyid': keyid,
                'profile': profile}
        args.update({'PrivateZone': PrivateZone}) if PrivateZone is not None else None
        zone = find_hosted_zone(**args)
        if not zone:
            log.error("Couldn't resolve domain name %s to a hosted zone ID.", Name)
            return []
        HostedZoneId = zone[0]['HostedZone']['Id']

    args = {'HostedZoneId': HostedZoneId, 'ChangeBatch': ChangeBatch}
    conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
    tries = 20  # A bit more headroom
    while tries:
        try:
            r = conn.change_resource_record_sets(**args)
            return _wait_for_sync(r['ChangeInfo']['Id'], conn, 30)  # And a little extra time here
        except ClientError as e:
            if tries and e.response.get('Error', {}).get('Code') == 'Throttling':
                log.debug('Throttled by AWS API.')
                time.sleep(3)
                tries -= 1
                continue
            log.error('Failed to apply requested changes to the hosted zone %s: %s',
                    (Name or HostedZoneId), six.text_type(e))
            raise e
    return False
Beispiel #19
0
def port_bindings(val, **kwargs):
    '''
    On the CLI, these are passed as multiple instances of a given CLI option.
    In Salt, we accept these as a comma-delimited list but the API expects a
    Python dictionary mapping ports to their bindings. The format the API
    expects is complicated depending on whether or not the external port maps
    to a different internal port, or if the port binding is for UDP instead of
    TCP (the default). For reference, see the "Port bindings" section in the
    docker-py documentation at the following URL:
    http://docker-py.readthedocs.io/en/stable/api.html
    '''
    validate_ip_addrs = kwargs.get('validate_ip_addrs', True)
    if not isinstance(val, dict):
        if not isinstance(val, list):
            try:
                val = _split(val)
            except AttributeError:
                val = _split(str(val))

        for idx in range(len(val)):
            if not isinstance(val[idx], six.string_types):
                val[idx] = str(val[idx])

        def _format_port(port_num, proto):
            return str(port_num) + '/udp' if proto.lower() == 'udp' else port_num

        bindings = {}
        for binding in val:
            bind_parts = _split(binding, ':')
            num_bind_parts = len(bind_parts)
            if num_bind_parts == 1:
                # Single port or port range being passed through (no
                # special mapping)
                container_port = str(bind_parts[0])
                if container_port == '':
                    raise SaltInvocationError(
                        'Empty port binding definition found'
                    )
                container_port, _, proto = container_port.partition('/')
                try:
                    start, end = _get_port_range(container_port)
                except ValueError as exc:
                    # Using __str__() to avoid deprecation warning for using
                    # the message attribute of the ValueError.
                    raise SaltInvocationError(exc.__str__())
                bind_vals = [
                    (_format_port(port_num, proto), None)
                    for port_num in range(start, end + 1)
                ]
            elif num_bind_parts == 2:
                if bind_parts[0] == '':
                    raise SaltInvocationError(
                        'Empty host port in port binding definition '
                        '\'{0}\''.format(binding)
                    )
                if bind_parts[1] == '':
                    raise SaltInvocationError(
                        'Empty container port in port binding definition '
                        '\'{0}\''.format(binding)
                    )
                container_port, _, proto = bind_parts[1].partition('/')
                try:
                    cport_start, cport_end = _get_port_range(container_port)
                    hport_start, hport_end = _get_port_range(bind_parts[0])
                except ValueError as exc:
                    # Using __str__() to avoid deprecation warning for
                    # using the message attribute of the ValueError.
                    raise SaltInvocationError(exc.__str__())
                if (hport_end - hport_start) != (cport_end - cport_start):
                    # Port range is mismatched
                    raise SaltInvocationError(
                        'Host port range ({0}) does not have the same '
                        'number of ports as the container port range '
                        '({1})'.format(bind_parts[0], container_port)
                    )
                cport_list = list(range(cport_start, cport_end + 1))
                hport_list = list(range(hport_start, hport_end + 1))
                bind_vals = [
                    (_format_port(cport_list[x], proto), hport_list[x])
                    for x in range(len(cport_list))
                ]
            elif num_bind_parts == 3:
                host_ip, host_port = bind_parts[0:2]
                if validate_ip_addrs:
                    _validate_ip(host_ip)
                container_port, _, proto = bind_parts[2].partition('/')
                try:
                    cport_start, cport_end = _get_port_range(container_port)
                except ValueError as exc:
                    # Using __str__() to avoid deprecation warning for
                    # using the message attribute of the ValueError.
                    raise SaltInvocationError(exc.__str__())
                cport_list = list(range(cport_start, cport_end + 1))
                if host_port == '':
                    hport_list = [None] * len(cport_list)
                else:
                    try:
                        hport_start, hport_end = _get_port_range(host_port)
                    except ValueError as exc:
                        # Using __str__() to avoid deprecation warning for
                        # using the message attribute of the ValueError.
                        raise SaltInvocationError(exc.__str__())
                    hport_list = list(range(hport_start, hport_end + 1))

                    if (hport_end - hport_start) != (cport_end - cport_start):
                        # Port range is mismatched
                        raise SaltInvocationError(
                            'Host port range ({0}) does not have the same '
                            'number of ports as the container port range '
                            '({1})'.format(host_port, container_port)
                        )

                bind_vals = [(
                    _format_port(val, proto),
                    (host_ip,) if hport_list[idx] is None
                        else (host_ip, hport_list[idx])
                ) for idx, val in enumerate(cport_list)]
            else:
                raise SaltInvocationError(
                    '\'{0}\' is an invalid port binding definition (at most '
                    '3 components are allowed, found {1})'.format(
                        binding, num_bind_parts
                    )
                )

            for cport, bind_def in bind_vals:
                if cport not in bindings:
                    bindings[cport] = bind_def
                else:
                    if isinstance(bindings[cport], list):
                        # Append to existing list of bindings for this
                        # container port.
                        bindings[cport].append(bind_def)
                    else:
                        bindings[cport] = [bindings[cport], bind_def]
                    for idx in range(len(bindings[cport])):
                        if bindings[cport][idx] is None:
                            # Now that we are adding multiple
                            # bindings
                            try:
                                # Convert 1234/udp to 1234
                                bindings[cport][idx] = int(cport.split('/')[0])
                            except AttributeError:
                                # Port was tcp, the AttributeError
                                # signifies that the split failed
                                # because the port number was
                                # already defined as an integer.
                                # Just use the cport.
                                bindings[cport][idx] = cport
        val = bindings
    return val
Beispiel #20
0
def set_hwclock(clock):
    '''
    Sets the hardware clock to be either UTC or localtime

    CLI Example:

    .. code-block:: bash

        salt '*' timezone.set_hwclock UTC
    '''
    if salt.utils.path.which('timedatectl'):
        cmd = [
            'timedatectl', 'set-local-rtc',
            'true' if clock == 'localtime' else 'false'
        ]
        return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
    else:
        os_family = __grains__['os_family']
        if os_family in ('AIX', 'NILinuxRT'):
            if clock.lower() != 'utc':
                raise SaltInvocationError('UTC is the only permitted value')
            return True

        timezone = get_zone()

        if 'Solaris' in __grains__['os_family']:
            if clock.lower() not in ('localtime', 'utc'):
                raise SaltInvocationError(
                    'localtime and UTC are the only permitted values')
            if 'sparc' in __grains__['cpuarch']:
                raise SaltInvocationError(
                    'UTC is the only choice for SPARC architecture')
            cmd = ['rtc', '-z', 'GMT' if clock.lower() == 'utc' else timezone]
            return __salt__['cmd.retcode'](cmd, python_shell=False) == 0

        zonepath = '/usr/share/zoneinfo/{0}'.format(timezone)

        if not os.path.exists(zonepath):
            raise CommandExecutionError(
                'Zone \'{0}\' does not exist'.format(zonepath))

        tzfile = _get_localtime_path()
        os.unlink(tzfile)
        os.symlink(zonepath, tzfile)

        if 'Arch' in __grains__['os_family']:
            cmd = [
                'timezonectl', 'set-local-rtc',
                'true' if clock == 'localtime' else 'false'
            ]
            return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
        elif 'RedHat' in __grains__['os_family']:
            __salt__['file.sed']('/etc/sysconfig/clock', '^ZONE=.*',
                                 'ZONE="{0}"'.format(timezone))
        elif 'Suse' in __grains__['os_family']:
            __salt__['file.sed']('/etc/sysconfig/clock', '^TIMEZONE=.*',
                                 'TIMEZONE="{0}"'.format(timezone))
        elif 'Debian' in __grains__['os_family']:
            if clock == 'UTC':
                __salt__['file.sed']('/etc/default/rcS', '^UTC=.*', 'UTC=yes')
            elif clock == 'localtime':
                __salt__['file.sed']('/etc/default/rcS', '^UTC=.*', 'UTC=no')
        elif 'Gentoo' in __grains__['os_family']:
            if clock not in ('UTC', 'localtime'):
                raise SaltInvocationError(
                    'Only \'UTC\' and \'localtime\' are allowed')
            if clock == 'localtime':
                clock = 'local'
            __salt__['file.sed']('/etc/conf.d/hwclock', '^clock=.*',
                                 'clock="{0}"'.format(clock))

    return True
Beispiel #21
0
def set_hwclock(clock):
    """
    Sets the hardware clock to be either UTC or localtime

    CLI Example:

    .. code-block:: bash

        salt '*' timezone.set_hwclock UTC
    """
    if salt.utils.path.which("timedatectl"):
        cmd = [
            "timedatectl",
            "set-local-rtc",
            "true" if clock == "localtime" else "false",
        ]
        return __salt__["cmd.retcode"](cmd, python_shell=False) == 0
    else:
        os_family = __grains__["os_family"]
        if os_family in ("AIX", "NILinuxRT"):
            if clock.lower() != "utc":
                raise SaltInvocationError("UTC is the only permitted value")
            return True

        timezone = get_zone()

        if "Solaris" in __grains__["os_family"]:
            if clock.lower() not in ("localtime", "utc"):
                raise SaltInvocationError(
                    "localtime and UTC are the only permitted values")
            if "sparc" in __grains__["cpuarch"]:
                raise SaltInvocationError(
                    "UTC is the only choice for SPARC architecture")
            cmd = ["rtc", "-z", "GMT" if clock.lower() == "utc" else timezone]
            return __salt__["cmd.retcode"](cmd, python_shell=False) == 0

        zonepath = "/usr/share/zoneinfo/{0}".format(timezone)

        if not os.path.exists(zonepath):
            raise CommandExecutionError(
                "Zone '{0}' does not exist".format(zonepath))

        os.unlink("/etc/localtime")
        os.symlink(zonepath, "/etc/localtime")

        if "Arch" in __grains__["os_family"]:
            cmd = [
                "timezonectl",
                "set-local-rtc",
                "true" if clock == "localtime" else "false",
            ]
            return __salt__["cmd.retcode"](cmd, python_shell=False) == 0
        elif "RedHat" in __grains__["os_family"]:
            __salt__["file.sed"]("/etc/sysconfig/clock", "^ZONE=.*",
                                 'ZONE="{0}"'.format(timezone))
        elif "Suse" in __grains__["os_family"]:
            __salt__["file.sed"](
                "/etc/sysconfig/clock",
                "^TIMEZONE=.*",
                'TIMEZONE="{0}"'.format(timezone),
            )
        elif "Debian" in __grains__["os_family"]:
            if clock == "UTC":
                __salt__["file.sed"]("/etc/default/rcS", "^UTC=.*", "UTC=yes")
            elif clock == "localtime":
                __salt__["file.sed"]("/etc/default/rcS", "^UTC=.*", "UTC=no")
        elif "Gentoo" in __grains__["os_family"]:
            if clock not in ("UTC", "localtime"):
                raise SaltInvocationError(
                    "Only 'UTC' and 'localtime' are allowed")
            if clock == "localtime":
                clock = "local"
            __salt__["file.sed"]("/etc/conf.d/hwclock", "^clock=.*",
                                 'clock="{0}"'.format(clock))

    return True
Beispiel #22
0
def create(vm_):
    """
    Provision a single machine

    CLI Example:

    .. code-block:: bash

        salt-cloud -p my_profile new_node_1

    """
    name = vm_["name"]
    machine = config.get_cloud_config_value("machine",
                                            vm_,
                                            __opts__,
                                            default="")
    vm_["machine"] = machine
    host = config.get_cloud_config_value("host",
                                         vm_,
                                         __opts__,
                                         default=NotImplemented)
    vm_["cwd"] = config.get_cloud_config_value("cwd",
                                               vm_,
                                               __opts__,
                                               default="/")
    vm_["runas"] = config.get_cloud_config_value(
        "vagrant_runas", vm_, __opts__, default=os.getenv("SUDO_USER"))
    vm_["timeout"] = config.get_cloud_config_value("vagrant_up_timeout",
                                                   vm_,
                                                   __opts__,
                                                   default=300)
    vm_["vagrant_provider"] = config.get_cloud_config_value("vagrant_provider",
                                                            vm_,
                                                            __opts__,
                                                            default="")
    vm_["grains"] = {"salt-cloud:vagrant": {"host": host, "machine": machine}}

    log.info("sending 'vagrant.init %s machine=%s' command to %s", name,
             machine, host)

    local = salt.client.LocalClient()
    ret = local.cmd(host,
                    "vagrant.init", [name],
                    kwarg={
                        "vm": vm_,
                        "start": True
                    })
    log.info("response ==> %s", ret[host])

    network_mask = config.get_cloud_config_value("network_mask",
                                                 vm_,
                                                 __opts__,
                                                 default="")
    if "ssh_host" not in vm_:
        ret = local.cmd(
            host,
            "vagrant.get_ssh_config",
            [name],
            kwarg={
                "network_mask": network_mask,
                "get_private_key": True
            },
        )[host]
    with tempfile.NamedTemporaryFile() as pks:
        if "private_key" not in vm_ and ret and ret.get("private_key", False):
            pks.write(ret["private_key"])
            pks.flush()
            log.debug("wrote private key to %s", pks.name)
            vm_["key_filename"] = pks.name
        if "ssh_host" not in vm_:
            try:
                vm_.setdefault("ssh_username", ret["ssh_username"])
                if ret.get("ip_address"):
                    vm_["ssh_host"] = ret["ip_address"]
                else:  # if probe failed or not used, use Vagrant's reported ssh info
                    vm_["ssh_host"] = ret["ssh_host"]
                    vm_.setdefault("ssh_port", ret["ssh_port"])
            except (KeyError, TypeError):
                raise SaltInvocationError(
                    "Insufficient SSH addressing information for {}".format(
                        name))

        log.info(
            "Provisioning machine %s as node %s using ssh %s",
            machine,
            name,
            vm_["ssh_host"],
        )
        ret = __utils__["cloud.bootstrap"](vm_, __opts__)
        return ret
Beispiel #23
0
def build(
    runas,
    tgt,
    dest_dir,
    spec,
    sources,
    deps,
    env,
    template,
    saltenv="base",
    log_dir="/var/log/salt/pkgbuild",
):
    """
    Given the package destination directory, the spec file source and package
    sources, use mock to safely build the rpm defined in the spec file

    CLI Example:

    .. code-block:: bash

        salt '*' pkgbuild.build mock epel-7-x86_64 /var/www/html
                    https://raw.githubusercontent.com/saltstack/libnacl/master/pkg/rpm/python-libnacl.spec
                    https://pypi.python.org/packages/source/l/libnacl/libnacl-1.3.5.tar.gz

    This example command should build the libnacl package for rhel 7 using user
    mock and place it in /var/www/html/ on the minion
    """
    ret = {}
    try:
        __salt__["file.chown"](path=dest_dir, user=runas, group="mock")
    except OSError as exc:
        if exc.errno != errno.EEXIST:
            raise
    srpm_dir = os.path.join(dest_dir, "SRPMS")
    srpm_build_dir = tempfile.mkdtemp()
    try:
        srpms = make_src_pkg(srpm_build_dir, spec, sources, env, template,
                             saltenv, runas)
    except Exception as exc:  # pylint: disable=broad-except
        shutil.rmtree(srpm_build_dir)
        log.error("Failed to make src package")
        return ret

    distset = _get_distset(tgt)

    noclean = ""
    deps_dir = tempfile.mkdtemp()
    deps_list = _get_deps(deps, deps_dir, saltenv)

    retrc = 0
    for srpm in srpms:
        dbase = os.path.dirname(srpm)
        results_dir = tempfile.mkdtemp()
        try:
            __salt__["file.chown"](path=dbase, user=runas, group="mock")
            __salt__["file.chown"](path=results_dir, user=runas, group="mock")
            cmd = "mock --root={0} --resultdir={1} --init".format(
                tgt, results_dir)
            retrc |= __salt__["cmd.retcode"](cmd, runas=runas)
            if deps_list and not deps_list.isspace():
                cmd = "mock --root={0} --resultdir={1} --install {2} {3}".format(
                    tgt, results_dir, deps_list, noclean)
                retrc |= __salt__["cmd.retcode"](cmd, runas=runas)
                noclean += " --no-clean"

            cmd = "mock --root={0} --resultdir={1} {2} {3} {4}".format(
                tgt, results_dir, distset, noclean, srpm)
            retrc |= __salt__["cmd.retcode"](cmd, runas=runas)
            cmdlist = [
                "rpm",
                "-qp",
                "--queryformat",
                "{0}/%{{name}}/%{{version}}-%{{release}}".format(log_dir),
                srpm,
            ]
            log_dest = __salt__["cmd.run_stdout"](cmdlist, python_shell=False)
            for filename in os.listdir(results_dir):
                full = os.path.join(results_dir, filename)
                if filename.endswith("src.rpm"):
                    sdest = os.path.join(srpm_dir, filename)
                    try:
                        __salt__["file.makedirs_perms"](name=srpm_dir,
                                                        user=runas,
                                                        group="mock")
                    except OSError as exc:
                        if exc.errno != errno.EEXIST:
                            raise
                    shutil.copy(full, sdest)
                    ret.setdefault("Source Packages", []).append(sdest)
                elif filename.endswith(".rpm"):
                    bdist = os.path.join(dest_dir, filename)
                    shutil.copy(full, bdist)
                    ret.setdefault("Packages", []).append(bdist)
                else:
                    log_file = os.path.join(log_dest, filename)
                    try:
                        __salt__["file.makedirs_perms"](name=log_dest,
                                                        user=runas,
                                                        group="mock")
                    except OSError as exc:
                        if exc.errno != errno.EEXIST:
                            raise
                    shutil.copy(full, log_file)
                    ret.setdefault("Log Files", []).append(log_file)
        except Exception as exc:  # pylint: disable=broad-except
            log.error("Error building from %s: %s", srpm, exc)
        finally:
            shutil.rmtree(results_dir)
    if retrc != 0:
        raise SaltInvocationError(
            "Building packages for destination directory {0}, spec {1}, sources {2}, failed "
            "with return error {3}, check logs for further details".format(
                dest_dir, spec, sources, retrc))
    shutil.rmtree(deps_dir)
    shutil.rmtree(srpm_build_dir)
    return ret
Beispiel #24
0
def _determine_auth(**kwargs):
    '''
    Acquire Azure ARM Credentials
    '''
    if 'profile' in kwargs:
        azure_credentials = __salt__['config.option'](kwargs['profile'])
        kwargs.update(azure_credentials)

    service_principal_creds_kwargs = ['client_id', 'secret', 'tenant']
    user_pass_creds_kwargs = ['username', 'password']

    try:
        if kwargs.get('cloud_environment') and kwargs.get(
                'cloud_environment').startswith('http'):
            cloud_env = get_cloud_from_metadata_endpoint(
                kwargs['cloud_environment'])
        else:
            cloud_env_module = importlib.import_module(
                'msrestazure.azure_cloud')
            cloud_env = getattr(
                cloud_env_module,
                kwargs.get('cloud_environment', 'AZURE_PUBLIC_CLOUD'))
    except (AttributeError, ImportError, MetadataEndpointError):
        raise sys.exit(
            'The Azure cloud environment {0} is not available.'.format(
                kwargs['cloud_environment']))

    if set(service_principal_creds_kwargs).issubset(kwargs):
        if not (kwargs['client_id'] and kwargs['secret'] and kwargs['tenant']):
            raise SaltInvocationError(
                'The client_id, secret, and tenant parameters must all be '
                'populated if using service principals.')
        else:
            credentials = ServicePrincipalCredentials(
                kwargs['client_id'],
                kwargs['secret'],
                tenant=kwargs['tenant'],
                cloud_environment=cloud_env)
    elif set(user_pass_creds_kwargs).issubset(kwargs):
        if not (kwargs['username'] and kwargs['password']):
            raise SaltInvocationError(
                'The username and password parameters must both be '
                'populated if using username/password authentication.')
        else:
            credentials = UserPassCredentials(kwargs['username'],
                                              kwargs['password'],
                                              cloud_environment=cloud_env)
    elif 'subscription_id' in kwargs:
        credentials = MSIAuthentication(cloud_environment=cloud_env)

    else:
        raise SaltInvocationError(
            'Unable to determine credentials. '
            'A subscription_id with username and password, '
            'or client_id, secret, and tenant or a profile with the '
            'required parameters populated')

    if 'subscription_id' not in kwargs:
        raise SaltInvocationError('A subscription_id must be specified')

    subscription_id = salt.utils.stringutils.to_str(kwargs['subscription_id'])

    return credentials, subscription_id, cloud_env
Beispiel #25
0
def enabled(name='allprofiles'):
    '''
    Enable all the firewall profiles (Windows only)

    Args:
        profile (Optional[str]): The name of the profile to enable. Default is
            ``allprofiles``. Valid options are:

            - allprofiles
            - domainprofile
            - privateprofile
            - publicprofile

    Example:

    .. code-block:: yaml

        # To enable the domain profile
        enable_domain:
          win_firewall.enabled:
            - name: domainprofile

        # To enable all profiles
        enable_all:
          win_firewall.enabled:
            - name: allprofiles
    '''
    ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''}

    profile_map = {
        'domainprofile': 'Domain',
        'privateprofile': 'Private',
        'publicprofile': 'Public',
        'allprofiles': 'All'
    }

    # Make sure the profile name is valid
    if name not in profile_map:
        raise SaltInvocationError('Invalid profile name: {0}'.format(name))

    current_config = __salt__['firewall.get_config']()
    if name != 'allprofiles' and profile_map[name] not in current_config:
        ret['result'] = False
        ret['comment'] = 'Profile {0} does not exist in firewall.get_config' \
                         ''.format(name)
        return ret

    for key in current_config:
        if not current_config[key]:
            if name == 'allprofiles' or key == profile_map[name]:
                ret['changes'][key] = 'enabled'

    if __opts__['test']:
        ret['result'] = not ret['changes'] or None
        ret['comment'] = ret['changes']
        ret['changes'] = {}
        return ret

    # Enable it
    if ret['changes']:
        try:
            ret['result'] = __salt__['firewall.enable'](name)
        except CommandExecutionError:
            ret['comment'] = 'Firewall Profile {0} could not be enabled' \
                             ''.format(profile_map[name])
    else:
        if name == 'allprofiles':
            msg = 'All the firewall profiles are enabled'
        else:
            msg = 'Firewall profile {0} is enabled'.format(name)
        ret['comment'] = msg

    return ret
Beispiel #26
0
def create_function(FunctionName,
                    Runtime,
                    Role,
                    Handler,
                    ZipFile=None,
                    S3Bucket=None,
                    S3Key=None,
                    S3ObjectVersion=None,
                    Description="",
                    Timeout=3,
                    MemorySize=128,
                    Publish=False,
                    WaitForRole=False,
                    RoleRetries=5,
                    region=None,
                    key=None,
                    keyid=None,
                    profile=None,
                    VpcConfig=None,
                    Environment=None):
    '''
    Given a valid config, create a function.

    Environment
        The parent object that contains your environment's configuration
        settings.  This is a dictionary of the form:
        {
            'Variables': {
                'VariableName': 'VariableValue'
            }
        }

        .. versionadded:: Nitrogen

    Returns {created: true} if the function was created and returns
    {created: False} if the function was not created.

    CLI Example:

    .. code-block:: bash

        salt myminion boto_lamba.create_function my_function python2.7 my_role my_file.my_function my_function.zip

    '''

    role_arn = _get_role_arn(Role,
                             region=region,
                             key=key,
                             keyid=keyid,
                             profile=profile)
    try:
        conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
        if ZipFile:
            if S3Bucket or S3Key or S3ObjectVersion:
                raise SaltInvocationError(
                    'Either ZipFile must be specified, or '
                    'S3Bucket and S3Key must be provided.')
            code = {
                'ZipFile': _filedata(ZipFile),
            }
        else:
            if not S3Bucket or not S3Key:
                raise SaltInvocationError(
                    'Either ZipFile must be specified, or '
                    'S3Bucket and S3Key must be provided.')
            code = {
                'S3Bucket': S3Bucket,
                'S3Key': S3Key,
            }
            if S3ObjectVersion:
                code['S3ObjectVersion'] = S3ObjectVersion
        kwargs = {}
        if VpcConfig is not None:
            kwargs['VpcConfig'] = _resolve_vpcconfig(VpcConfig,
                                                     region=region,
                                                     key=key,
                                                     keyid=keyid,
                                                     profile=profile)
        if Environment is not None:
            kwargs['Environment'] = Environment
        if WaitForRole:
            retrycount = RoleRetries
        else:
            retrycount = 1
        for retry in range(retrycount, 0, -1):
            try:
                func = conn.create_function(FunctionName=FunctionName,
                                            Runtime=Runtime,
                                            Role=role_arn,
                                            Handler=Handler,
                                            Code=code,
                                            Description=Description,
                                            Timeout=Timeout,
                                            MemorySize=MemorySize,
                                            Publish=Publish,
                                            **kwargs)
            except ClientError as e:
                if retry > 1 and e.response.get(
                        'Error',
                    {}).get('Code') == 'InvalidParameterValueException':
                    log.info(
                        'Function not created but IAM role may not have propagated, will retry'
                    )
                    # exponential backoff
                    time.sleep((2**(RoleRetries - retry)) +
                               (random.randint(0, 1000) / 1000))
                    continue
                else:
                    raise
            else:
                break
        if func:
            log.info('The newly created function name is {0}'.format(
                func['FunctionName']))

            return {'created': True, 'name': func['FunctionName']}
        else:
            log.warning('Function was not created')
            return {'created': False}
    except ClientError as e:
        return {'created': False, 'error': salt.utils.boto3.get_error(e)}
Beispiel #27
0
def unhold(name=None, pkgs=None, sources=None, **kwargs):  # pylint: disable=W0613
    """
    Set package current in 'hold' state to install state,
    meaning it will be upgraded.

    .. versionadded:: 3001

    name
        The name of the package, e.g., 'tmux'

     CLI Example:

     .. code-block:: bash

        salt '*' pkg.unhold <package name>

    pkgs
        A list of packages to unhold. Must be passed as a python list.

    CLI Example:

    .. code-block:: bash

        salt '*' pkg.unhold pkgs='["foo", "bar"]'
    """
    if not name and not pkgs and not sources:
        raise SaltInvocationError(
            "One of name, pkgs, or sources must be specified.")
    if pkgs and sources:
        raise SaltInvocationError(
            "Only one of pkgs or sources can be specified.")

    targets = []
    if pkgs:
        targets.extend(pkgs)
    elif sources:
        for source in sources:
            targets.append(next(iter(source)))
    else:
        targets.append(name)

    ret = {}
    pinned = _list_pinned()
    installed = list_pkgs()
    for target in targets:
        if isinstance(target, dict):
            target = next(iter(target))

        ret[target] = {
            "name": target,
            "changes": {},
            "result": False,
            "comment": ""
        }

        if target not in installed:
            ret[target][
                "comment"] = "Package {} does not have a state.".format(target)
        elif target in pinned:
            if "test" in __opts__ and __opts__["test"]:
                ret[target].update(result=None)
                ret[target][
                    "comment"] = "Package {} is set to be unheld.".format(
                        target)
            else:
                result = _unpin(target)
                if result:
                    changes = {"old": "hold", "new": "install"}
                    ret[target].update(changes=changes, result=True)
                    ret[target][
                        "comment"] = "Package {} is no longer being held.".format(
                            target)
                else:
                    ret[target].update(result=False)
                    ret[target][
                        "comment"] = "Unable to unhold package {}.".format(
                            target)
        else:
            ret[target].update(result=True)
            ret[target][
                "comment"] = "Package {} is already set not to be held.".format(
                    target)
    return ret
Beispiel #28
0
def update_function_code(FunctionName,
                         ZipFile=None,
                         S3Bucket=None,
                         S3Key=None,
                         S3ObjectVersion=None,
                         Publish=False,
                         region=None,
                         key=None,
                         keyid=None,
                         profile=None):
    '''
    Upload the given code to the named lambda function.

    Returns {updated: true} if the function was updated and returns
    {updated: False} if the function was not updated.

    CLI Example:

    .. code-block:: bash

        salt myminion boto_lamba.update_function_code my_function ZipFile=function.zip

    '''

    conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
    try:
        if ZipFile:
            if S3Bucket or S3Key or S3ObjectVersion:
                raise SaltInvocationError(
                    'Either ZipFile must be specified, or '
                    'S3Bucket and S3Key must be provided.')
            r = conn.update_function_code(FunctionName=FunctionName,
                                          ZipFile=_filedata(ZipFile),
                                          Publish=Publish)
        else:
            if not S3Bucket or not S3Key:
                raise SaltInvocationError(
                    'Either ZipFile must be specified, or '
                    'S3Bucket and S3Key must be provided.')
            args = {
                'S3Bucket': S3Bucket,
                'S3Key': S3Key,
            }
            if S3ObjectVersion:
                args['S3ObjectVersion'] = S3ObjectVersion
            r = conn.update_function_code(FunctionName=FunctionName,
                                          Publish=Publish,
                                          **args)
        if r:
            keys = ('FunctionName', 'Runtime', 'Role', 'Handler', 'CodeSha256',
                    'CodeSize', 'Description', 'Timeout', 'MemorySize',
                    'FunctionArn', 'LastModified', 'VpcConfig', 'Environment')
            return {
                'updated': True,
                'function': dict([(k, r.get(k)) for k in keys])
            }
        else:
            log.warning('Function was not updated')
            return {'updated': False}
    except ClientError as e:
        return {'updated': False, 'error': salt.utils.boto3.get_error(e)}
Beispiel #29
0
def setvalue(*args):
    '''
    Set a value for a specific augeas path

    CLI Example:

    .. code-block:: bash

        salt '*' augeas.setvalue /files/etc/hosts/1/canonical localhost

    This will set the first entry in /etc/hosts to localhost

    CLI Example:

    .. code-block:: bash

        salt '*' augeas.setvalue /files/etc/hosts/01/ipaddr 192.168.1.1 \\
                                 /files/etc/hosts/01/canonical test

    Adds a new host to /etc/hosts the ip address 192.168.1.1 and hostname test

    CLI Example:

    .. code-block:: bash

        salt '*' augeas.setvalue prefix=/files/etc/sudoers/ \\
                 "spec[user = '******']/user" "%wheel" \\
                 "spec[user = '******']/host_group/host" 'ALL' \\
                 "spec[user = '******']/host_group/command[1]" 'ALL' \\
                 "spec[user = '******']/host_group/command[1]/tag" 'PASSWD' \\
                 "spec[user = '******']/host_group/command[2]" '/usr/bin/apt-get' \\
                 "spec[user = '******']/host_group/command[2]/tag" NOPASSWD

    Ensures that the following line is present in /etc/sudoers::

        %wheel ALL = PASSWD : ALL , NOPASSWD : /usr/bin/apt-get , /usr/bin/aptitude
    '''
    aug = Augeas()
    ret = {'retval': False}

    tuples = filter(lambda x: not x.startswith('prefix='), args)
    prefix = filter(lambda x: x.startswith('prefix='), args)
    if prefix:
        if len(prefix) > 1:
            raise SaltInvocationError(
                'Only one \'prefix=\' value is permitted')
        else:
            prefix = prefix[0].split('=', 1)[1]

    if len(tuples) % 2 != 0:
        raise SaltInvocationError('Uneven number of path/value arguments')

    tuple_iter = iter(tuples)
    for path, value in zip(tuple_iter, tuple_iter):
        target_path = path
        if prefix:
            target_path = os.path.join(prefix.rstrip('/'), path.lstrip('/'))
        try:
            aug.set(target_path, str(value))
        except ValueError as err:
            ret['error'] = 'Multiple values: {0}'.format(err)

    try:
        aug.save()
        ret['retval'] = True
    except IOError as err:
        ret['error'] = str(err)
    return ret
Beispiel #30
0
def create_binding(site,
                   hostheader='',
                   ipaddress='*',
                   port=80,
                   protocol='http',
                   sslflags=0):
    '''
    Create an IIS binding.

    .. note:

        This function only validates against the binding ipaddress:port:hostheader combination,
        and will return True even if the binding already exists with a different configuration.
        It will not modify the configuration of an existing binding.

    :param str site: The IIS site name.
    :param str hostheader: The host header of the binding.
    :param str ipaddress: The IP address of the binding.
    :param str port: The TCP port of the binding.
    :param str protocol: The application protocol of the binding.
    :param str sslflags: The flags representing certificate type and storage of the binding.

    :return: A boolean representing whether all changes succeeded.
    :rtype: bool

    CLI Example:

    .. code-block:: bash

        salt '*' win_iis.create_binding site='site0' hostheader='example' ipaddress='*' port='80'
    '''
    pscmd = list()
    protocol = str(protocol).lower()
    sslflags = int(sslflags)
    name = _get_binding_info(hostheader, ipaddress, port)

    if protocol not in _VALID_PROTOCOLS:
        message = ("Invalid protocol '{0}' specified. Valid formats:"
                   ' {1}').format(protocol, _VALID_PROTOCOLS)
        raise SaltInvocationError(message)

    if sslflags not in _VALID_SSL_FLAGS:
        message = ("Invalid sslflags '{0}' specified. Valid sslflags range:"
                   ' {1}..{2}').format(sslflags, _VALID_SSL_FLAGS[0],
                                       _VALID_SSL_FLAGS[-1])
        raise SaltInvocationError(message)

    current_bindings = list_bindings(site)

    if name in current_bindings:
        _LOG.debug("Binding already present: %s", name)
        return True

    pscmd.append("New-WebBinding -Name '{0}' -HostHeader '{1}'".format(
        site, hostheader))
    pscmd.append(" -IpAddress '{0}' -Port '{1}'".format(ipaddress, port))
    pscmd.append(" -Protocol '{0}' -SslFlags {1}".format(protocol, sslflags))

    cmd_ret = _srvmgr(str().join(pscmd))

    if cmd_ret['retcode'] == 0:
        new_bindings = list_bindings(site)

        if name in new_bindings:
            _LOG.debug('Binding created successfully: %s', name)
            return True
    _LOG.error('Unable to create binding: %s', name)
    return False