Пример #1
0
 def ext_pillar(self, pillar):
     '''
     Render the external pillar data
     '''
     if not 'ext_pillar' in self.opts:
         return  {}
     if not isinstance(self.opts['ext_pillar'], list):
         log.critical('The "ext_pillar" option is malformed')
         return {}
     for run in self.opts['ext_pillar']:
         if not isinstance(run, dict):
             log.critical('The "ext_pillar" option is malformed')
             return {}
         for key, val in run.items():
             if key not in self.ext_pillars:
                 err = ('Specified ext_pillar interface {0} is '
                        'unavailable').format(key)
                 log.critical(err)
                 continue
             try:
                 if isinstance(val, dict):
                     ext = self.ext_pillars[key](pillar, **val)
                 elif isinstance(val, list):
                     ext = self.ext_pillars[key](pillar, *val)
                 else:
                     ext = self.ext_pillars[key](pillar, val)
                 update(pillar, ext)
             except Exception as exc:
                 log.exception('Failed to load ext_pillar {0}: {1}'.format(key, exc))
     return pillar
Пример #2
0
 def ext_pillar(self, pillar):
     '''
     Render the external pillar data
     '''
     if not 'ext_pillar' in self.opts:
         return {}
     if not isinstance(self.opts['ext_pillar'], list):
         log.critical('The "ext_pillar" option is malformed')
         return {}
     for run in self.opts['ext_pillar']:
         if not isinstance(run, dict):
             log.critical('The "ext_pillar" option is malformed')
             return {}
         for key, val in run.items():
             if key not in self.ext_pillars:
                 err = ('Specified ext_pillar interface {0} is '
                        'unavailable').format(key)
                 log.critical(err)
                 continue
             try:
                 if isinstance(val, dict):
                     ext = self.ext_pillars[key](pillar, **val)
                 elif isinstance(val, list):
                     ext = self.ext_pillars[key](pillar, *val)
                 else:
                     ext = self.ext_pillars[key](pillar, val)
                 update(pillar, ext)
             except Exception as exc:
                 log.exception('Failed to load ext_pillar {0}: {1}'.format(
                     key, exc))
     return pillar
Пример #3
0
    def render_pillar(self, matches):
        '''
        Extract the sls pillar files from the matches and render them into the
        pillar
        '''
        pillar = {}
        errors = []
        for saltenv, pstates in matches.items():
            mods = set()
            for sls in pstates:
                pstate, mods, err = self.render_pstate(sls, saltenv, mods)

                if err:
                    errors += err

                if pstate is not None:
                    if not isinstance(pstate, dict):
                        log.error(
                            'The rendered pillar sls file, {0!r} state did '
                            'not return the expected data format. This is '
                            'a sign of a malformed pillar sls file. Returned '
                            'errors: {1}'.format(
                                sls,
                                ', '.join(['{0!r}'.format(e) for e in errors])
                            )
                        )
                        continue
                    update(pillar, pstate)

        return pillar, errors
Пример #4
0
def absent(
        name,
        region=None,
        key=None,
        keyid=None,
        profile=None):
    '''
    Ensure the IAM role is deleted.

    name
        Name of the IAM role.

    region
        Region to connect to.

    key
        Secret key to be used.

    keyid
        Access key to be used.

    profile
        A dict with region, key and keyid, or a pillar key (string)
        that contains a dict with region, key and keyid.
    '''
    ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
    _ret = _policies_absent(name, region, key, keyid, profile)
    ret['changes'] = _ret['changes']
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    _ret = _policies_detached(name, region, key, keyid, profile)
    ret['changes'] = _ret['changes']
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    _ret = _instance_profile_disassociated(name, region, key, keyid, profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    _ret = _instance_profile_absent(name, region, key, keyid, profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    _ret = _role_absent(name, region, key, keyid, profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
    return ret
Пример #5
0
def absent(
        name,
        region=None,
        key=None,
        keyid=None,
        profile=None):
    '''
    Ensure the IAM role is deleted.

    name
        Name of the IAM role.

    region
        Region to connect to.

    key
        Secret key to be used.

    keyid
        Access key to be used.

    profile
        A dict with region, key and keyid, or a pillar key (string)
        that contains a dict with region, key and keyid.
    '''
    ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
    _ret = _policies_absent(name, region, key, keyid, profile)
    ret['changes'] = _ret['changes']
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    _ret = _policies_detached(name, region, key, keyid, profile)
    ret['changes'] = _ret['changes']
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    _ret = _instance_profile_disassociated(name, region, key, keyid, profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    _ret = _instance_profile_absent(name, region, key, keyid, profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    _ret = _role_absent(name, region, key, keyid, profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
    return ret
Пример #6
0
    def ext_pillar(self, pillar):
        '''
        Render the external pillar data
        '''
        if not 'ext_pillar' in self.opts:
            return {}
        if not isinstance(self.opts['ext_pillar'], list):
            log.critical('The "ext_pillar" option is malformed')
            return {}
        for run in self.opts['ext_pillar']:
            if not isinstance(run, dict):
                log.critical('The "ext_pillar" option is malformed')
                return {}
            for key, val in run.items():
                if key not in self.ext_pillars:
                    err = ('Specified ext_pillar interface {0} is '
                           'unavailable').format(key)
                    log.critical(err)
                    continue
                try:
                    try:
                        # try the new interface, which includes the minion ID
                        # as first argument
                        if isinstance(val, dict):
                            ext = self.ext_pillars[key](self.opts['id'], pillar, **val)
                        elif isinstance(val, list):
                            ext = self.ext_pillars[key](self.opts['id'], pillar, *val)
                        else:
                            ext = self.ext_pillars[key](self.opts['id'], pillar, val)
                        update(pillar, ext)

                    except TypeError as e:
                        if e.message.startswith('ext_pillar() takes exactly '):
                            log.warning('Deprecation warning: ext_pillar "{0}"'
                                        ' needs to accept minion_id as first'
                                        ' argument'.format(key))
                        else:
                            raise

                        if isinstance(val, dict):
                            ext = self.ext_pillars[key](pillar, **val)
                        elif isinstance(val, list):
                            ext = self.ext_pillars[key](pillar, *val)
                        else:
                            ext = self.ext_pillars[key](pillar, val)
                        update(pillar, ext)

                except Exception as exc:
                    log.exception(
                            'Failed to load ext_pillar {0}: {1}'.format(
                                key,
                                exc
                                )
                            )
        return pillar
Пример #7
0
def absent(name, region=None, key=None, keyid=None, profile=None):
    """
    Ensure the IAM role is deleted.

    name
        Name of the IAM role.

    region
        Region to connect to.

    key
        Secret key to be used.

    keyid
        Access key to be used.

    profile
        A dict with region, key and keyid, or a pillar key (string)
        that contains a dict with region, key and keyid.
    """
    ret = {"name": name, "result": True, "comment": "", "changes": {}}
    _ret = _policies_absent(name, region, key, keyid, profile)
    ret["changes"] = _ret["changes"]
    ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
    if not _ret["result"]:
        ret["result"] = _ret["result"]
        if ret["result"] is False:
            return ret
    _ret = _policies_detached(name, region, key, keyid, profile)
    ret["changes"] = _ret["changes"]
    ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
    if not _ret["result"]:
        ret["result"] = _ret["result"]
        if ret["result"] is False:
            return ret
    _ret = _instance_profile_disassociated(name, region, key, keyid, profile)
    ret["changes"] = dictupdate.update(ret["changes"], _ret["changes"])
    ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
    if not _ret["result"]:
        ret["result"] = _ret["result"]
        if ret["result"] is False:
            return ret
    _ret = _instance_profile_absent(name, region, key, keyid, profile)
    ret["changes"] = dictupdate.update(ret["changes"], _ret["changes"])
    ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
    if not _ret["result"]:
        ret["result"] = _ret["result"]
        if ret["result"] is False:
            return ret
    _ret = _role_absent(name, region, key, keyid, profile)
    ret["changes"] = dictupdate.update(ret["changes"], _ret["changes"])
    ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
    if not _ret["result"]:
        ret["result"] = _ret["result"]
    return ret
Пример #8
0
def _alarms_present(name, alarms, alarms_from_pillar, region, key, keyid, profile):
    '''helper method for present.  ensure that cloudwatch_alarms are set'''
    # load data from alarms_from_pillar
    tmp = __salt__['config.option'](alarms_from_pillar, {})
    # merge with data from alarms
    if alarms:
        tmp = dictupdate.update(tmp, alarms)
    # set alarms, using boto_cloudwatch_alarm.present
    merged_return_value = {'name': name, 'result': True, 'comment': '', 'changes': {}}
    for _, info in six.iteritems(tmp):
        # add asg to name and description
        info['name'] = name + ' ' + info['name']
        info['attributes']['description'] = name + ' ' + info['attributes']['description']
        # add dimension attribute
        info['attributes']['dimensions'] = {'AutoScalingGroupName': [name]}
        # set alarm
        kwargs = {
            'name': info['name'],
            'attributes': info['attributes'],
            'region': region,
            'key': key,
            'keyid': keyid,
            'profile': profile,
        }
        ret = __salt__['state.single']('boto_cloudwatch_alarm.present', **kwargs)
        results = next(six.itervalues(ret))
        if not results['result']:
            merged_return_value['result'] = False
        if results.get('changes', {}) != {}:
            merged_return_value['changes'][info['name']] = results['changes']
        if 'comment' in results:
            merged_return_value['comment'] += results['comment']
    return merged_return_value
Пример #9
0
def _alarms_present(name, alarms, alarms_from_pillar, region, key, keyid, profile):
    '''helper method for present.  ensure that cloudwatch_alarms are set'''
    # load data from alarms_from_pillar
    tmp = __salt__['config.option'](alarms_from_pillar, {})
    # merge with data from alarms
    if alarms:
        tmp = dictupdate.update(tmp, alarms)
    # set alarms, using boto_cloudwatch_alarm.present
    merged_return_value = {'name': name, 'result': True, 'comment': '', 'changes': {}}
    for _, info in tmp.items():
        # add elb to name and description
        info["name"] = name + " " + info["name"]
        info["attributes"]["description"] = name + " " + info["attributes"]["description"]
        # add dimension attribute
        info["attributes"]["dimensions"] = {"LoadBalancerName": [name]}
        # set alarm
        kwargs = {
            "name": info["name"],
            "attributes": info["attributes"],
            "region": region,
            "key": key,
            "keyid": keyid,
            "profile": profile,
        }
        ret = __salt__["state.single"]('boto_cloudwatch_alarm.present', **kwargs)
        results = ret.values()[0]
        if not results["result"]:
            merged_return_value["result"] = results["result"]
        if results.get("changes", {}) != {}:
            merged_return_value["changes"][info["name"]] = results["changes"]
        if "comment" in results:
            merged_return_value["comment"] += results["comment"]
    return merged_return_value
Пример #10
0
def _alarms_present(name, alarms, alarms_from_pillar, region, key, keyid, profile):
    '''helper method for present.  ensure that cloudwatch_alarms are set'''
    # load data from alarms_from_pillar
    tmp = __salt__['config.option'](alarms_from_pillar, {})
    # merge with data from alarms
    if alarms:
        tmp = dictupdate.update(tmp, alarms)
    # set alarms, using boto_cloudwatch_alarm.present
    merged_return_value = {'name': name, 'result': True, 'comment': '', 'changes': {}}
    for _, info in six.iteritems(tmp):
        # add elb to name and description
        info["name"] = name + " " + info["name"]
        info["attributes"]["description"] = name + " " + info["attributes"]["description"]
        # add dimension attribute
        info["attributes"]["dimensions"] = {"LoadBalancerName": [name]}
        # set alarm
        kwargs = {
            "name": info["name"],
            "attributes": info["attributes"],
            "region": region,
            "key": key,
            "keyid": keyid,
            "profile": profile,
        }
        ret = __salt__["state.single"]('boto_cloudwatch_alarm.present', **kwargs)
        results = next(six.itervalues(ret))
        if not results["result"]:
            merged_return_value["result"] = results["result"]
        if results.get("changes", {}) != {}:
            merged_return_value["changes"][info["name"]] = results["changes"]
        if "comment" in results:
            merged_return_value["comment"] += results["comment"]
    return merged_return_value
Пример #11
0
def merge(dest, src, merge_lists=False, in_place=True):
    '''
    defaults.merge
        Allows deep merging of dicts in formulas.

    merge_lists : False
        If True, it will also merge lists instead of replace their items.

    in_place : True
        If True, it will merge into dest dict,
        if not it will make a new copy from that dict and return it.

        CLI Example:
        .. code-block:: bash

        salt '*' default.merge a=b d=e

    It is more typical to use this in a templating language in formulas,
    instead of directly on the command-line.
    '''
    if in_place:
        merged = dest
    else:
        merged = copy.deepcopy(dest)
    return dictupdate.update(merged, src, merge_lists=merge_lists)
Пример #12
0
def present(
        name,
        description,
        vpc_id=None,
        rules=None,
        rules_egress=None,
        region=None,
        key=None,
        keyid=None,
        profile=None):
    '''
    Ensure the security group exists with the specified rules.

    name
        Name of the security group.

    description
        A description of this security group.

    vpc_id
        The ID of the VPC to create the security group in, if any.

    rules
        A list of ingress rule dicts.

    rules_egress
        A list of egress rule dicts.

    region
        Region to connect to.

    key
        Secret key to be used.

    keyid
        Access key to be used.

    profile
        A dict with region, key and keyid, or a pillar key (string)
        that contains a dict with region, key and keyid.
    '''
    ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
    _ret = _security_group_present(name, description, vpc_id, region, key,
                                   keyid, profile)
    ret['changes'] = _ret['changes']
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    if not rules:
        rules = []
    if not rules_egress:
        rules_egress = []
    _ret = _rules_present(name, rules, rules_egress, vpc_id, region, key, keyid, profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
    return ret
Пример #13
0
def _crawl_attribute(this_data, this_attr):
    '''
    helper function to crawl an attribute specified for retrieval
    '''
    if isinstance(this_data, list):
        t_list = []
        for d in this_data:
            t_list.append(_crawl_attribute(d, this_attr))
        return t_list
    else:
        if isinstance(this_attr, dict):
            t_dict = {}
            for k in this_attr:
                if hasattr(this_data, k):
                    t_dict[k] = _crawl_attribute(getattr(this_data, k, None),
                                                 this_attr[k])
            return t_dict
        elif isinstance(this_attr, list):
            this_dict = {}
            for l in this_attr:
                this_dict = dictupdate.update(this_dict,
                                              _crawl_attribute(this_data, l))
            return this_dict
        else:
            return {
                this_attr:
                _recurse_config_to_dict(getattr(this_data, this_attr, None))
            }
Пример #14
0
def _alarms_present(name, alarms, alarms_from_pillar, region, key, keyid, profile):
    '''helper method for present.  ensure that cloudwatch_alarms are set'''
    # load data from alarms_from_pillar
    tmp = __salt__['config.option'](alarms_from_pillar, {})
    # merge with data from alarms
    if alarms:
        tmp = dictupdate.update(tmp, alarms)
    # set alarms, using boto_cloudwatch_alarm.present
    merged_return_value = {'name': name, 'result': True, 'comment': '', 'changes': {}}
    for _, info in six.iteritems(tmp):
        # add asg to name and description
        info['name'] = name + ' ' + info['name']
        info['attributes']['description'] = name + ' ' + info['attributes']['description']
        # add dimension attribute
        info['attributes']['dimensions'] = {'AutoScalingGroupName': [name]}
        # set alarm
        kwargs = {
            'name': info['name'],
            'attributes': info['attributes'],
            'region': region,
            'key': key,
            'keyid': keyid,
            'profile': profile,
        }
        ret = __salt__['state.single']('boto_cloudwatch_alarm.present', **kwargs)
        results = next(six.itervalues(ret))
        if not results['result']:
            merged_return_value['result'] = False
        if results.get('changes', {}) != {}:
            merged_return_value['changes'][info['name']] = results['changes']
        if 'comment' in results:
            merged_return_value['comment'] += results['comment']
    return merged_return_value
Пример #15
0
def _alarms_present(name, min_size_equals_max_size, alarms, alarms_from_pillar,
                    region, key, keyid, profile):
    '''
    helper method for present.  ensure that cloudwatch_alarms are set
    '''
    # load data from alarms_from_pillar
    tmp = copy.deepcopy(__salt__['config.option'](alarms_from_pillar, {}))
    # merge with data from alarms
    if alarms:
        tmp = dictupdate.update(tmp, alarms)
    # set alarms, using boto_cloudwatch_alarm.present
    merged_return_value = {
        'name': name,
        'result': True,
        'comment': '',
        'changes': {}
    }
    for _, info in six.iteritems(tmp):
        # add asg to name and description
        info['name'] = name + ' ' + info['name']
        info['attributes'][
            'description'] = name + ' ' + info['attributes']['description']
        # add dimension attribute
        if 'dimensions' not in info['attributes']:
            info['attributes']['dimensions'] = {'AutoScalingGroupName': [name]}
        scaling_policy_actions_only = True
        # replace ":self:" with our name
        for action_type in [
                'alarm_actions', 'insufficient_data_actions', 'ok_actions'
        ]:
            if action_type in info['attributes']:
                new_actions = []
                for action in info['attributes'][action_type]:
                    if 'scaling_policy' not in action:
                        scaling_policy_actions_only = False
                    if ':self:' in action:
                        action = action.replace(':self:', ':{0}:'.format(name))
                    new_actions.append(action)
                info['attributes'][action_type] = new_actions
        # skip alarms that only have actions for scaling policy, if min_size == max_size for this ASG
        if scaling_policy_actions_only and min_size_equals_max_size:
            continue
        # set alarm
        kwargs = {
            'name': info['name'],
            'attributes': info['attributes'],
            'region': region,
            'key': key,
            'keyid': keyid,
            'profile': profile,
        }
        results = __states__['boto_cloudwatch_alarm.present'](**kwargs)
        if not results['result']:
            merged_return_value['result'] = False
        if results.get('changes', {}) != {}:
            merged_return_value['changes'][info['name']] = results['changes']
        if 'comment' in results:
            merged_return_value['comment'] += results['comment']
    return merged_return_value
Пример #16
0
def _alarms_present(
    name,
    min_size_equals_max_size,
    alarms,
    alarms_from_pillar,
    region,
    key,
    keyid,
    profile,
):
    """
    helper method for present.  ensure that cloudwatch_alarms are set
    """
    # load data from alarms_from_pillar
    tmp = copy.deepcopy(__salt__["config.option"](alarms_from_pillar, {}))
    # merge with data from alarms
    if alarms:
        tmp = dictupdate.update(tmp, alarms)
    # set alarms, using boto_cloudwatch_alarm.present
    merged_return_value = {"name": name, "result": True, "comment": "", "changes": {}}
    for _, info in tmp.items():
        # add asg to name and description
        info["name"] = name + " " + info["name"]
        info["attributes"]["description"] = name + " " + info["attributes"]["description"]
        # add dimension attribute
        if "dimensions" not in info["attributes"]:
            info["attributes"]["dimensions"] = {"AutoScalingGroupName": [name]}
        scaling_policy_actions_only = True
        # replace ":self:" with our name
        for action_type in ["alarm_actions", "insufficient_data_actions", "ok_actions"]:
            if action_type in info["attributes"]:
                new_actions = []
                for action in info["attributes"][action_type]:
                    if "scaling_policy" not in action:
                        scaling_policy_actions_only = False
                    if ":self:" in action:
                        action = action.replace(":self:", ":{}:".format(name))
                    new_actions.append(action)
                info["attributes"][action_type] = new_actions
        # skip alarms that only have actions for scaling policy, if min_size == max_size for this ASG
        if scaling_policy_actions_only and min_size_equals_max_size:
            continue
        # set alarm
        kwargs = {
            "name": info["name"],
            "attributes": info["attributes"],
            "region": region,
            "key": key,
            "keyid": keyid,
            "profile": profile,
        }
        results = __states__["boto_cloudwatch_alarm.present"](**kwargs)
        if not results["result"]:
            merged_return_value["result"] = False
        if results.get("changes", {}) != {}:
            merged_return_value["changes"][info["name"]] = results["changes"]
        if "comment" in results:
            merged_return_value["comment"] += results["comment"]
    return merged_return_value
Пример #17
0
def _alarms_present(name, alarms, alarms_from_pillar, write_capacity_units,
                    read_capacity_units, region, key, keyid, profile):
    '''helper method for present.  ensure that cloudwatch_alarms are set'''
    # load data from alarms_from_pillar
    tmp = copy.deepcopy(__salt__['config.option'](alarms_from_pillar, {}))
    # merge with data from alarms
    if alarms:
        tmp = dictupdate.update(tmp, alarms)
    # set alarms, using boto_cloudwatch_alarm.present
    merged_return_value = {
        'name': name,
        'result': True,
        'comment': '',
        'changes': {}
    }
    for _, info in six.iteritems(tmp):
        # add dynamodb table to name and description
        info["name"] = name + " " + info["name"]
        info["attributes"][
            "description"] = name + " " + info["attributes"]["description"]
        # add dimension attribute
        info["attributes"]["dimensions"] = {"TableName": [name]}
        if info["attributes"]["metric"] == "ConsumedWriteCapacityUnits" \
           and "threshold" not in info["attributes"]:
            info["attributes"]["threshold"] = math.ceil(
                write_capacity_units * info["attributes"]["threshold_percent"])
            del info["attributes"]["threshold_percent"]
            # the write_capacity_units is given in unit / second. So we need
            # to multiply by the period to get the proper threshold.
            # http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/MonitoringDynamoDB.html
            info["attributes"]["threshold"] *= info["attributes"]["period"]
        if info["attributes"]["metric"] == "ConsumedReadCapacityUnits" \
           and "threshold" not in info["attributes"]:
            info["attributes"]["threshold"] = math.ceil(
                read_capacity_units * info["attributes"]["threshold_percent"])
            del info["attributes"]["threshold_percent"]
            # the read_capacity_units is given in unit / second. So we need
            # to multiply by the period to get the proper threshold.
            # http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/MonitoringDynamoDB.html
            info["attributes"]["threshold"] *= info["attributes"]["period"]
        # set alarm
        kwargs = {
            "name": info["name"],
            "attributes": info["attributes"],
            "region": region,
            "key": key,
            "keyid": keyid,
            "profile": profile,
        }
        results = __states__['boto_cloudwatch_alarm.present'](**kwargs)
        if not results["result"]:
            merged_return_value["result"] = results["result"]
        if results.get("changes", {}) != {}:
            merged_return_value["changes"][info["name"]] = results["changes"]
        if "comment" in results:
            merged_return_value["comment"] += results["comment"]
    return merged_return_value
Пример #18
0
def _determine_scheduled_actions(scheduled_actions, scheduled_actions_from_pillar):
    """
    helper method for present,  ensure scheduled actions are setup
    """
    tmp = copy.deepcopy(__salt__["config.option"](scheduled_actions_from_pillar, {}))
    # merge with data from state
    if scheduled_actions:
        tmp = dictupdate.update(tmp, scheduled_actions)
    return tmp
Пример #19
0
def _determine_scheduled_actions(scheduled_actions, scheduled_actions_from_pillar):
    '''
    helper method for present,  ensure scheduled actions are setup
    '''
    tmp = copy.deepcopy(
        __salt__['config.option'](scheduled_actions_from_pillar, {})
    )
    # merge with data from state
    if scheduled_actions:
        tmp = dictupdate.update(tmp, scheduled_actions)
    return tmp
Пример #20
0
def _alarms_present(name, alarms, alarms_from_pillar,
                    write_capacity_units, read_capacity_units,
                    region, key, keyid, profile):
    '''helper method for present.  ensure that cloudwatch_alarms are set'''
    # load data from alarms_from_pillar
    tmp = copy.deepcopy(
        __salt__['config.option'](alarms_from_pillar, {})
    )
    # merge with data from alarms
    if alarms:
        tmp = dictupdate.update(tmp, alarms)
    # set alarms, using boto_cloudwatch_alarm.present
    merged_return_value = {'name': name, 'result': True, 'comment': '', 'changes': {}}
    for _, info in six.iteritems(tmp):
        # add dynamodb table to name and description
        info["name"] = name + " " + info["name"]
        info["attributes"]["description"] = name + " " + info["attributes"]["description"]
        # add dimension attribute
        info["attributes"]["dimensions"] = {"TableName": [name]}
        if info["attributes"]["metric"] == "ConsumedWriteCapacityUnits" \
           and "threshold" not in info["attributes"]:
            info["attributes"]["threshold"] = math.ceil(write_capacity_units * info["attributes"]["threshold_percent"])
            del info["attributes"]["threshold_percent"]
            # the write_capacity_units is given in unit / second. So we need
            # to multiply by the period to get the proper threshold.
            # http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/MonitoringDynamoDB.html
            info["attributes"]["threshold"] *= info["attributes"]["period"]
        if info["attributes"]["metric"] == "ConsumedReadCapacityUnits" \
           and "threshold" not in info["attributes"]:
            info["attributes"]["threshold"] = math.ceil(read_capacity_units * info["attributes"]["threshold_percent"])
            del info["attributes"]["threshold_percent"]
            # the read_capacity_units is given in unit / second. So we need
            # to multiply by the period to get the proper threshold.
            # http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/MonitoringDynamoDB.html
            info["attributes"]["threshold"] *= info["attributes"]["period"]
        # set alarm
        kwargs = {
            "name": info["name"],
            "attributes": info["attributes"],
            "region": region,
            "key": key,
            "keyid": keyid,
            "profile": profile,
        }
        results = __states__['boto_cloudwatch_alarm.present'](**kwargs)
        if not results["result"]:
            merged_return_value["result"] = results["result"]
        if results.get("changes", {}) != {}:
            merged_return_value["changes"][info["name"]] = results["changes"]
        if "comment" in results:
            merged_return_value["comment"] += results["comment"]
    return merged_return_value
Пример #21
0
    def test_update(self):

        # level 1 value changes
        mdict = copy.deepcopy(self.dict1)
        mdict['A'] = 'Z'
        res = dictupdate.update(copy.deepcopy(self.dict1), {'A': 'Z'})
        self.assertEqual(res, mdict)

        # level 2 value changes
        mdict = copy.deepcopy(self.dict1)
        mdict['C']['D'] = 'Z'
        res = dictupdate.update(copy.deepcopy(self.dict1), {'C': {'D': 'Z'}})
        self.assertEqual(res, mdict)

        # level 3 value changes
        mdict = copy.deepcopy(self.dict1)
        mdict['C']['F']['G'] = 'Z'
        res = dictupdate.update(copy.deepcopy(self.dict1),
                                {'C': {
                                    'F': {
                                        'G': 'Z'
                                    }
                                }})
        self.assertEqual(res, mdict)

        # replace a sub-dictionary
        mdict = copy.deepcopy(self.dict1)
        mdict['C'] = 'Z'
        res = dictupdate.update(copy.deepcopy(self.dict1), {'C': 'Z'})
        self.assertEqual(res, mdict)

        # add a new scalar value
        mdict = copy.deepcopy(self.dict1)
        mdict['Z'] = 'Y'
        res = dictupdate.update(copy.deepcopy(self.dict1), {'Z': 'Y'})
        self.assertEqual(res, mdict)

        # add a dictionary
        mdict = copy.deepcopy(self.dict1)
        mdict['Z'] = {'Y': 'X'}
        res = dictupdate.update(copy.deepcopy(self.dict1), {'Z': {'Y': 'X'}})
        self.assertEqual(res, mdict)

        # add a nested dictionary
        mdict = copy.deepcopy(self.dict1)
        mdict['Z'] = {'Y': {'X': 'W'}}
        res = dictupdate.update(copy.deepcopy(self.dict1),
                                {'Z': {
                                    'Y': {
                                        'X': 'W'
                                    }
                                }})
        self.assertEqual(res, mdict)
Пример #22
0
def _alarms_present(name, min_size_equals_max_size, alarms, alarms_from_pillar, region, key, keyid, profile):
    '''
    helper method for present.  ensure that cloudwatch_alarms are set
    '''
    # load data from alarms_from_pillar
    tmp = copy.deepcopy(__salt__['config.option'](alarms_from_pillar, {}))
    # merge with data from alarms
    if alarms:
        tmp = dictupdate.update(tmp, alarms)
    # set alarms, using boto_cloudwatch_alarm.present
    merged_return_value = {'name': name, 'result': True, 'comment': '', 'changes': {}}
    for _, info in six.iteritems(tmp):
        # add asg to name and description
        info['name'] = name + ' ' + info['name']
        info['attributes']['description'] = name + ' ' + info['attributes']['description']
        # add dimension attribute
        if 'dimensions' not in info['attributes']:
            info['attributes']['dimensions'] = {'AutoScalingGroupName': [name]}
        scaling_policy_actions_only = True
        # replace ":self:" with our name
        for action_type in ['alarm_actions', 'insufficient_data_actions', 'ok_actions']:
            if action_type in info['attributes']:
                new_actions = []
                for action in info['attributes'][action_type]:
                    if 'scaling_policy' not in action:
                        scaling_policy_actions_only = False
                    if ':self:' in action:
                        action = action.replace(':self:', ':{0}:'.format(name))
                    new_actions.append(action)
                info['attributes'][action_type] = new_actions
        # skip alarms that only have actions for scaling policy, if min_size == max_size for this ASG
        if scaling_policy_actions_only and min_size_equals_max_size:
            continue
        # set alarm
        kwargs = {
            'name': info['name'],
            'attributes': info['attributes'],
            'region': region,
            'key': key,
            'keyid': keyid,
            'profile': profile,
        }
        results = __states__['boto_cloudwatch_alarm.present'](**kwargs)
        if not results['result']:
            merged_return_value['result'] = False
        if results.get('changes', {}) != {}:
            merged_return_value['changes'][info['name']] = results['changes']
        if 'comment' in results:
            merged_return_value['comment'] += results['comment']
    return merged_return_value
Пример #23
0
def merge(dest, upd):
    '''
    defaults.merge
        Allows deep merging of dicts in formulas.

        CLI Example:
        .. code-block:: bash

        salt '*' default.merge a=b d=e

    It is more typical to use this in a templating language in formulas,
    instead of directly on the command-line.
    '''
    return dictupdate.update(dest, upd)
Пример #24
0
def merge(dest, upd):
    '''
    defaults.merge
        Allows deep merging of dicts in formulas.

        CLI Example:
        .. code-block:: bash

        salt '*' default.merge a=b d=e

    It is more typical to use this in a templating language in formulas,
    instead of directly on the command-line.
    '''
    return dictupdate.update(dest, upd)
Пример #25
0
    def test_update(self):

        # level 1 value changes
        mdict = copy.deepcopy(self.dict1)
        mdict['A'] = 'Z'
        res = dictupdate.update(copy.deepcopy(self.dict1), {'A': 'Z'})
        self.assertEqual(res, mdict)

        # level 2 value changes
        mdict = copy.deepcopy(self.dict1)
        mdict['C']['D'] = 'Z'
        res = dictupdate.update(copy.deepcopy(self.dict1), {'C': {'D': 'Z'}})
        self.assertEqual(res, mdict)

        # level 3 value changes
        mdict = copy.deepcopy(self.dict1)
        mdict['C']['F']['G'] = 'Z'
        res = dictupdate.update(
            copy.deepcopy(self.dict1),
            {'C': {'F': {'G': 'Z'}}}
        )
        self.assertEqual(res, mdict)

        # replace a sub-dictionary
        mdict = copy.deepcopy(self.dict1)
        mdict['C'] = 'Z'
        res = dictupdate.update(copy.deepcopy(self.dict1), {'C': 'Z'})
        self.assertEqual(res, mdict)

        # add a new scalar value
        mdict = copy.deepcopy(self.dict1)
        mdict['Z'] = 'Y'
        res = dictupdate.update(copy.deepcopy(self.dict1), {'Z': 'Y'})
        self.assertEqual(res, mdict)

        # add a dictionary
        mdict = copy.deepcopy(self.dict1)
        mdict['Z'] = {'Y': 'X'}
        res = dictupdate.update(copy.deepcopy(self.dict1), {'Z': {'Y': 'X'}})
        self.assertEqual(res, mdict)

        # add a nested dictionary
        mdict = copy.deepcopy(self.dict1)
        mdict['Z'] = {'Y': {'X': 'W'}}
        res = dictupdate.update(
            copy.deepcopy(self.dict1),
            {'Z': {'Y': {'X': 'W'}}}
        )
        self.assertEqual(res, mdict)
Пример #26
0
def merge(dest, src, merge_lists=False, in_place=True, convert_none=True):
    """
    defaults.merge
        Allows deep merging of dicts in formulas.

    merge_lists : False
        If True, it will also merge lists instead of replace their items.

    in_place : True
        If True, it will merge into dest dict,
        if not it will make a new copy from that dict and return it.

    convert_none : True
        If True, it will convert src and dest to empty dicts if they are None.
        If True and dest is None but in_place is True, raises TypeError.
        If False it will make a new copy from that dict and return it.

        .. versionadded:: 3005

    CLI Example:

    .. code-block:: bash

        salt '*' defaults.merge '{a: b}' '{d: e}'

    It is more typical to use this in a templating language in formulas,
    instead of directly on the command-line.
    """
    # Force empty dicts if applicable (useful for cleaner templating)
    src = {} if (src is None and convert_none) else src
    if dest is None and convert_none:
        if in_place:
            raise TypeError("Can't perform in-place merge into NoneType")
        else:
            dest = {}

    if in_place:
        merged = dest
    else:
        merged = copy.deepcopy(dest)
    return dictupdate.update(merged, src, merge_lists=merge_lists)
Пример #27
0
def present(
        name,
        policy_document=None,
        policy_document_from_pillars=None,
        path=None,
        policies=None,
        policies_from_pillars=None,
        managed_policies=None,
        create_instance_profile=True,
        region=None,
        key=None,
        keyid=None,
        profile=None,
        delete_policies=True):
    '''
    Ensure the IAM role exists.

    name
        Name of the IAM role.

    policy_document
        The policy that grants an entity permission to assume the role. (See https://boto.readthedocs.io/en/latest/ref/iam.html#boto.iam.connection.IAMConnection.create_role)

    policy_document_from_pillars
        A pillar key that contains a role policy document. The statements
        defined here will be appended with the policy document statements
        defined in the policy_document argument.

        .. versionadded:: Nitrogen

    path
        The path to the role/instance profile. (See https://boto.readthedocs.io/en/latest/ref/iam.html#boto.iam.connection.IAMConnection.create_role)

    policies
        A dict of IAM role policies.

    policies_from_pillars
        A list of pillars that contain role policy dicts. Policies in the
        pillars will be merged in the order defined in the list and key
        conflicts will be handled by later defined keys overriding earlier
        defined keys. The policies defined here will be merged with the
        policies defined in the policies argument. If keys conflict, the keys
        in the policies argument will override the keys defined in
        policies_from_pillars.

    managed_policies
        A list of (AWS or Customer) managed policies to be attached to the role.

    create_instance_profile
        A boolean of whether or not to create an instance profile and associate
        it with this role.

    region
        Region to connect to.

    key
        Secret key to be used.

    keyid
        Access key to be used.

    profile
        A dict with region, key and keyid, or a pillar key (string)
        that contains a dict with region, key and keyid.

    delete_policies
        Deletes existing policies that are not in the given list of policies. Default
        value is ``True``. If ``False`` is specified, existing policies will not be deleted
        allowing manual modifications on the IAM role to be persistent.

        .. versionadded:: 2015.8.0
    '''
    ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
    # Build up _policy_document
    _policy_document = {}
    if policy_document_from_pillars:
        from_pillars = __salt__['pillar.get'](policy_document_from_pillars)
        if from_pillars:
            _policy_document['Version'] = from_pillars['Version']
            _policy_document.setdefault('Statement', [])
            _policy_document['Statement'].extend(from_pillars['Statement'])
    if policy_document:
        _policy_document['Version'] = policy_document['Version']
        _policy_document.setdefault('Statement', [])
        _policy_document['Statement'].extend(policy_document['Statement'])
    _ret = _role_present(name, _policy_document, path, region, key, keyid,
                         profile)

    # Build up _policies
    if not policies:
        policies = {}
    if not policies_from_pillars:
        policies_from_pillars = []
    if not managed_policies:
        managed_policies = []
    _policies = {}
    for policy in policies_from_pillars:
        _policy = __salt__['pillar.get'](policy)
        _policies.update(_policy)
    _policies.update(policies)
    ret['changes'] = _ret['changes']
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    if create_instance_profile:
        _ret = _instance_profile_present(name, region, key, keyid, profile)
        ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
        ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
        if not _ret['result']:
            ret['result'] = _ret['result']
            if ret['result'] is False:
                return ret
        _ret = _instance_profile_associated(name, region, key, keyid, profile)
        ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
        ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
        if not _ret['result']:
            ret['result'] = _ret['result']
            if ret['result'] is False:
                return ret
    _ret = _policies_present(name, _policies, region, key, keyid, profile,
                             delete_policies)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
    _ret = _policies_attached(name, managed_policies, region, key, keyid, profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
    return ret
Пример #28
0
def hosted_zone_present(name, Name=None, PrivateZone=False,
                        CallerReference=None, Comment='', VPCs=None,
                        region=None, key=None, keyid=None, profile=None):
    '''
    Ensure a hosted zone exists with the given attributes.

    name
        The name of the state definition.  This will be used as the 'CallerReference' param when
        creating the hosted zone to help ensure idempotency.

    Name
        The name of the domain. This should be a fully-specified domain, and should terminate with a
        period. This is the name you have registered with your DNS registrar. It is also the name
        you will delegate from your registrar to the Amazon Route 53 delegation servers returned in
        response to this request.  If not provided, the value of name will be used.

    PrivateZone
        Set True if creating a private hosted zone.  If true, then 'VPCs' is also required.

    Comment
        Any comments you want to include about the hosted zone.

    CallerReference
        A unique string that identifies the request and that allows create_hosted_zone() calls to be
        retried without the risk of executing the operation twice.  This helps ensure idempotency
        across state calls, but can cause issues if a zone is deleted and then an attempt is made
        to recreate it with the same CallerReference.  If not provided, a unique UUID will be
        generated at each state run, which can potentially lead to duplicate zones being created if
        the state is run again while the previous zone creation is still in PENDING status (which
        can occasionally take several minutes to clear).  Maximum length of 128.

    VPCs
        A list of dicts, each dict composed of a VPCRegion, and either a VPCId or a VPCName.
        Note that this param is ONLY used if PrivateZone == True

        VPCId
            When creating a private hosted zone, either the VPC ID or VPC Name to associate with is
            required.  Exclusive with VPCName.

        VPCName
            When creating a private hosted zone, either the VPC ID or VPC Name to associate with is
            required.  Exclusive with VPCId.

        VPCRegion
            When creating a private hosted zone, the region of the associated VPC is required.  If
            not provided, an effort will be made to determine it from VPCId or VPCName, if
            possible.  This will fail if a given VPCName exists in multiple regions visible to the
            bound account, in which case you'll need to provide an explicit value for VPCRegion.
    '''
    Name = Name if Name else name
    Name = _to_aws_encoding(Name)

    ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}

    if not PrivateZone and VPCs:
        raise SaltInvocationError("Parameter 'VPCs' is invalid when creating a public zone.")
    if PrivateZone and not VPCs:
        raise SaltInvocationError("Parameter 'VPCs' is required when creating a private zone.")
    if VPCs:
        if not isinstance(VPCs, list):
            raise SaltInvocationError("Parameter 'VPCs' must be a list of dicts.")
        for v in VPCs:
            if not isinstance(v, dict) or not exactly_one((v.get('VPCId'), v.get('VPCName'))):
                raise SaltInvocationError("Parameter 'VPCs' must be a list of dicts, each composed "
                                      "of either a 'VPCId' or a 'VPCName', and optionally a "
                                      "'VPCRegion', to help distinguish between multitple matches.")
    # Massage VPCs into something AWS will accept...
    fixed_vpcs = []
    if PrivateZone:
        for v in VPCs:
            VPCId = v.get('VPCId')
            VPCName = v.get('VPCName')
            VPCRegion = v.get('VPCRegion')
            VPCs = __salt__['boto_vpc.describe_vpcs'](vpc_id=VPCId, name=VPCName, region=region,
                    key=key, keyid=keyid, profile=profile).get('vpcs', [])
            if VPCRegion and VPCs:
                VPCs = [v for v in VPCs if v['region'] == VPCRegion]
            if not VPCs:
                ret['comment'] = ('A VPC matching given criteria (vpc: {0} / vpc_region: {1}) not '
                                  'found.'.format(VPCName or VPCId, VPCRegion))
                log.error(ret['comment'])
                ret['result'] = False
                return ret
            if len(VPCs) > 1:
                ret['comment'] = ('Multiple VPCs matching given criteria (vpc: {0} / vpc_region: '
                                  '{1}) found: {2}.'.format(VPCName or VPCId, VPCRegion,
                                  ', '.join([v['id'] for v in VPCs])))
                log.error(ret['comment'])
                ret['result'] = False
                return ret
            vpc = VPCs[0]
            if VPCName:
                VPCId = vpc['id']
            if not VPCRegion:
                VPCRegion = vpc['region']
            fixed_vpcs += [{'VPCId': VPCId, 'VPCRegion': VPCRegion}]

    create = False
    update_comment = False
    add_vpcs = []
    del_vpcs = []
    args = {'Name': Name, 'PrivateZone': PrivateZone,
            'region': region, 'key': key, 'keyid': keyid, 'profile': profile}
    zone = __salt__['boto3_route53.find_hosted_zone'](**args)
    if not zone:
        create = True
        # Grrrr - can only pass one VPC when initially creating a private zone...
        # The rest have to be added (one-by-one) later in a separate step.
        if len(fixed_vpcs) > 1:
            add_vpcs = fixed_vpcs[1:]
            fixed_vpcs = fixed_vpcs[:1]
        CallerReference = CallerReference if CallerReference else str(uuid.uuid4())
    else:
        # Currently the only modifiable traits about a zone are associated VPCs and the comment.
        zone = zone[0]
        if PrivateZone:
            for z in zone.get('VPCs'):
                if z not in fixed_vpcs:
                    del_vpcs += [z]
            for z in fixed_vpcs:
                if z not in zone.get('VPCs'):
                    add_vpcs += [z]
        if zone['HostedZone']['Config'].get('Comment') != Comment:
            update_comment = True

    if not (create or add_vpcs or del_vpcs or update_comment):
        ret['comment'] = 'Hostd Zone {0} already in desired state'.format(Name)
        return ret

    if create:
        if __opts__['test']:
            ret['comment'] = 'Route 53 {} hosted zone {} would be created.'.format('private' if
                    PrivateZone else 'public', Name)
            ret['result'] = None
            return ret
        vpc_id = fixed_vpcs[0].get('VPCId') if fixed_vpcs else None
        vpc_region = fixed_vpcs[0].get('VPCRegion') if fixed_vpcs else None
        newzone = __salt__['boto3_route53.create_hosted_zone'](Name=Name,
                CallerReference=CallerReference, Comment=Comment,
                PrivateZone=PrivateZone, VPCId=vpc_id, VPCRegion=vpc_region,
                region=region, key=key, keyid=keyid, profile=profile)
        if newzone:
            newzone = newzone[0]
            ret['comment'] = 'Route 53 {} hosted zone {} successfully created'.format('private' if
                    PrivateZone else 'public', Name)
            log.info(ret['comment'])
            ret['changes']['new'] = newzone
        else:
            ret['comment'] = 'Creation of Route 53 {} hosted zone {} failed'.format('private' if
                    PrivateZone else 'public', Name)
            log.error(ret['comment'])
            ret['result'] = False
            return ret

    if update_comment:
        if __opts__['test']:
            ret['comment'] = 'Route 53 {} hosted zone {} comment would be updated.'.format('private'
                    if PrivateZone else 'public', Name)
            ret['result'] = None
            return ret
        r = __salt__['boto3_route53.update_hosted_zone_comment'](Name=Name,
                Comment=Comment, PrivateZone=PrivateZone, region=region, key=key, keyid=keyid,
                profile=profile)
        if r:
            r = r[0]
            msg = 'Route 53 {} hosted zone {} comment successfully updated'.format('private' if
                    PrivateZone else 'public', Name)
            log.info(msg)
            ret['comment'] = '  '.join([ret['comment'], msg])
            ret['changes']['old'] = zone
            ret['changes']['new'] = dictupdate.update(ret['changes'].get('new', {}), r)
        else:
            ret['comment'] = 'Update of Route 53 {} hosted zone {} comment failed'.format('private'
                    if PrivateZone else 'public', Name)
            log.error(ret['comment'])
            ret['result'] = False
            return ret

    if add_vpcs or del_vpcs:
        if __opts__['test']:
            ret['comment'] = 'Route 53 {} hosted zone {} associated VPCs would be updated.'.format(
                    'private' if PrivateZone else 'public', Name)
            ret['result'] = None
            return ret
        all_added = True
        all_deled = True
        for vpc in add_vpcs:  # Add any new first to avoid the "can't delete last VPC" errors.
            r = __salt__['boto3_route53.associate_vpc_with_hosted_zone'](Name=Name,
                    VPCId=vpc['VPCId'], VPCRegion=vpc['VPCRegion'], region=region, key=key,
                    keyid=keyid, profile=profile)
            if not r:
                all_added = False
        for vpc in del_vpcs:
            r = __salt__['boto3_route53.disassociate_vpc_from_hosted_zone'](Name=Name,
                    VPCId=vpc['VPCId'], VPCRegion=vpc['VPCRegion'], region=region, key=key,
                    keyid=keyid, profile=profile)
            if not r:
                all_deled = False

        ret['changes']['old'] = zone
        ret['changes']['new'] = __salt__['boto3_route53.find_hosted_zone'](**args)
        if all_added and all_deled:
            msg = 'Route 53 {} hosted zone {} associated VPCs successfully updated'.format('private'
                    if PrivateZone else 'public', Name)
            log.info(msg)
            ret['comment'] = '  '.join([ret['comment'], msg])
        else:
            ret['comment'] = 'Update of Route 53 {} hosted zone {} associated VPCs failed'.format(
                    'private' if PrivateZone else 'public', Name)
            log.error(ret['comment'])
            ret['result'] = False
            return ret

    return ret
Пример #29
0
def _tags_present(
    name,
    tags,
    vpc_id=None,
    vpc_name=None,
    region=None,
    key=None,
    keyid=None,
    profile=None,
):
    """
    helper function to validate tags are correct
    """
    ret = {"result": True, "comment": "", "changes": {}}
    if tags:
        sg = __salt__["boto_secgroup.get_config"](
            name=name,
            group_id=None,
            region=region,
            key=key,
            keyid=keyid,
            profile=profile,
            vpc_id=vpc_id,
            vpc_name=vpc_name,
        )
        if not sg:
            ret["comment"] = "{} security group configuration could not be retrieved.".format(
                name)
            ret["result"] = False
            return ret
        tags_to_add = tags
        tags_to_update = {}
        tags_to_remove = []
        if sg.get("tags"):
            for existing_tag in sg["tags"]:
                if existing_tag not in tags:
                    if existing_tag not in tags_to_remove:
                        tags_to_remove.append(existing_tag)
                else:
                    if tags[existing_tag] != sg["tags"][existing_tag]:
                        tags_to_update[existing_tag] = tags[existing_tag]
                    tags_to_add.pop(existing_tag)
        if tags_to_remove:
            if __opts__["test"]:
                msg = "The following tag{} set to be removed: {}.".format(
                    ("s are" if len(tags_to_remove) > 1 else " is"),
                    ", ".join(tags_to_remove),
                )
                ret["comment"] = " ".join([ret["comment"], msg])
                ret["result"] = None
            else:
                temp_ret = __salt__["boto_secgroup.delete_tags"](
                    tags_to_remove,
                    name=name,
                    group_id=None,
                    vpc_name=vpc_name,
                    vpc_id=vpc_id,
                    region=region,
                    key=key,
                    keyid=keyid,
                    profile=profile,
                )
                if not temp_ret:
                    ret["result"] = False
                    ret["comment"] = " ".join([
                        ret["comment"],
                        "Error attempting to delete tags {}.".format(
                            tags_to_remove),
                    ])
                    return ret
                if "old" not in ret["changes"]:
                    ret["changes"] = dictupdate.update(ret["changes"],
                                                       {"old": {
                                                           "tags": {}
                                                       }})
                for rem_tag in tags_to_remove:
                    ret["changes"]["old"]["tags"][rem_tag] = sg["tags"][
                        rem_tag]
        if tags_to_add or tags_to_update:
            if __opts__["test"]:
                if tags_to_add:
                    msg = "The following tag{} set to be added: {}.".format(
                        ("s are" if len(tags_to_add.keys()) > 1 else " is"),
                        ", ".join(tags_to_add.keys()),
                    )
                    ret["comment"] = " ".join([ret["comment"], msg])
                    ret["result"] = None
                if tags_to_update:
                    msg = "The following tag {} set to be updated: {}.".format(
                        ("values are"
                         if len(tags_to_update.keys()) > 1 else "value is"),
                        ", ".join(tags_to_update.keys()),
                    )
                    ret["comment"] = " ".join([ret["comment"], msg])
                    ret["result"] = None
            else:
                all_tag_changes = dictupdate.update(tags_to_add,
                                                    tags_to_update)
                temp_ret = __salt__["boto_secgroup.set_tags"](
                    all_tag_changes,
                    name=name,
                    group_id=None,
                    vpc_name=vpc_name,
                    vpc_id=vpc_id,
                    region=region,
                    key=key,
                    keyid=keyid,
                    profile=profile,
                )
                if not temp_ret:
                    ret["result"] = False
                    msg = "Error attempting to set tags."
                    ret["comment"] = " ".join([ret["comment"], msg])
                    return ret
                if "old" not in ret["changes"]:
                    ret["changes"] = dictupdate.update(ret["changes"],
                                                       {"old": {
                                                           "tags": {}
                                                       }})
                if "new" not in ret["changes"]:
                    ret["changes"] = dictupdate.update(ret["changes"],
                                                       {"new": {
                                                           "tags": {}
                                                       }})
                for tag in all_tag_changes:
                    ret["changes"]["new"]["tags"][tag] = tags[tag]
                    if "tags" in sg:
                        if sg["tags"]:
                            if tag in sg["tags"]:
                                ret["changes"]["old"]["tags"][tag] = sg[
                                    "tags"][tag]
        if not tags_to_update and not tags_to_remove and not tags_to_add:
            ret["comment"] = " ".join(
                [ret["comment"], "Tags are already set."])
    return ret
Пример #30
0
def present(name=None,
            table_name=None,
            region=None,
            key=None,
            keyid=None,
            profile=None,
            read_capacity_units=None,
            write_capacity_units=None,
            alarms=None,
            alarms_from_pillar="boto_dynamodb_alarms",
            hash_key=None,
            hash_key_data_type=None,
            range_key=None,
            range_key_data_type=None,
            local_indexes=None,
            global_indexes=None):
    '''
    Ensure the DynamoDB table exists.  Note: all properties of the table
    can only be set during table creation.  Adding or changing
    indexes or key schema cannot be done after table creation

    name
        Name of the DynamoDB table

    table_name
        Name of the DynamoDB table (deprecated)

    region
        Region to connect to.

    key
        Secret key to be used.

    keyid
        Access key to be used.

    profile
        A dict with region, key and keyid, or a pillar key (string)
        that contains a dict with region, key and keyid.

    read_capacity_units
        The read throughput for this table

    write_capacity_units
        The write throughput for this table

    hash_key
        The name of the attribute that will be used as the hash key
        for this table

    hash_key_data_type
        The DynamoDB datatype of the hash key

    range_key
        The name of the attribute that will be used as the range key
        for this table

    range_key_data_type
        The DynamoDB datatype of the range key

    local_indexes
        The local indexes you would like to create

    global_indexes
        The local indexes you would like to create
    '''
    ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
    if table_name:
        ret['warnings'] = ['boto_dynamodb.present: `table_name` is deprecated.'
                           ' Please use `name` instead.']
        ret['name'] = table_name
        name = table_name

    comments = []
    changes_old = {}
    changes_new = {}

    # Ensure DynamoDB table exists
    table_exists = __salt__['boto_dynamodb.exists'](
        name,
        region,
        key,
        keyid,
        profile
    )
    if not table_exists:
        if __opts__['test']:
            ret['result'] = None
            ret['comment'] = 'DynamoDB table {0} is set to be created.'.format(name)
            return ret

        is_created = __salt__['boto_dynamodb.create_table'](
            name,
            region,
            key,
            keyid,
            profile,
            read_capacity_units,
            write_capacity_units,
            hash_key,
            hash_key_data_type,
            range_key,
            range_key_data_type,
            local_indexes,
            global_indexes
        )
        if not is_created:
            ret['result'] = False
            ret['comment'] = 'Failed to create table {0}'.format(name)
            return ret

        comments.append('DynamoDB table {0} was successfully created'.format(name))
        changes_new['table'] = name
        changes_new['read_capacity_units'] = read_capacity_units
        changes_new['write_capacity_units'] = write_capacity_units
        changes_new['hash_key'] = hash_key
        changes_new['hash_key_data_type'] = hash_key_data_type
        changes_new['range_key'] = range_key
        changes_new['range_key_data_type'] = range_key_data_type
        changes_new['local_indexes'] = local_indexes
        changes_new['global_indexes'] = global_indexes
    else:
        comments.append('DynamoDB table {0} exists'.format(name))

    # Ensure DynamoDB table provisioned throughput matches
    description = __salt__['boto_dynamodb.describe'](
        name,
        region,
        key,
        keyid,
        profile
    )
    provisioned_throughput = description.get('Table', {}).get('ProvisionedThroughput', {})
    current_write_capacity_units = provisioned_throughput.get('WriteCapacityUnits')
    current_read_capacity_units = provisioned_throughput.get('ReadCapacityUnits')
    throughput_matches = (current_write_capacity_units == write_capacity_units and
                          current_read_capacity_units == read_capacity_units)
    if not throughput_matches:
        if __opts__['test']:
            ret['result'] = None
            ret['comment'] = 'DynamoDB table {0} is set to be updated.'.format(name)
            return ret

        is_updated = __salt__['boto_dynamodb.update'](
            name,
            throughput={
                'read': read_capacity_units,
                'write': write_capacity_units,
            },
            region=region,
            key=key,
            keyid=keyid,
            profile=profile,
        )
        if not is_updated:
            ret['result'] = False
            ret['comment'] = 'Failed to update table {0}'.format(name)
            return ret

        comments.append('DynamoDB table {0} was successfully updated'.format(name))
        changes_old['read_capacity_units'] = current_read_capacity_units,
        changes_old['write_capacity_units'] = current_write_capacity_units,
        changes_new['read_capacity_units'] = read_capacity_units,
        changes_new['write_capacity_units'] = write_capacity_units,
    else:
        comments.append('DynamoDB table {0} throughput matches'.format(name))

    _ret = _alarms_present(name, alarms, alarms_from_pillar,
                           write_capacity_units, read_capacity_units,
                           region, key, keyid, profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret

    # Ensure backup datapipeline is present
    datapipeline_configs = copy.deepcopy(
        __salt__['pillar.get']('boto_dynamodb_backup_configs', [])
    )
    for config in datapipeline_configs:
        datapipeline_ret = _ensure_backup_datapipeline_present(
            name=name,
            schedule_name=config['name'],
            period=config['period'],
            utc_hour=config['utc_hour'],
            s3_base_location=config['s3_base_location'],
        )
        if datapipeline_ret['result']:
            comments.append(datapipeline_ret['comment'])
            if datapipeline_ret.get('changes'):
                ret['changes']['backup_datapipeline_{0}'.format(config['name'])] = \
                    datapipeline_ret.get('changes'),
        else:
            ret['comment'] = datapipeline_ret['comment']
            return ret

    if changes_old:
        ret['changes']['old'] = changes_old
    if changes_new:
        ret['changes']['new'] = changes_new
    ret['comment'] = ',\n'.join(comments)
    return ret
Пример #31
0
def route_table_present(name,
                        vpc_name=None,
                        vpc_id=None,
                        routes=None,
                        subnet_ids=None,
                        subnet_names=None,
                        tags=None,
                        region=None,
                        key=None,
                        keyid=None,
                        profile=None):
    '''
    Ensure route table with routes exists and is associated to a VPC.


    Example::

    .. code-block:: yaml

        boto_vpc.route_table_present:
            - name: my_route_table
            - vpc_id: vpc-123456
            - routes:
              - destination_cidr_block: 0.0.0.0/0
                instance_id: i-123456
                interface_id: eni-123456
            - subnet_names:
              - subnet1
              - subnet2

    name
        Name of the route table.

    vpc_name
        Name of the VPC with which the route table should be associated.

    vpc_id
        Id of the VPC with which the route table should be associated.
        Either vpc_name or vpc_id must be provided.

    routes
        A list of routes.

    subnet_ids
        A list of subnet ids to associate

    subnet_names
        A list of subnet names to associate

    tags
        A list of tags.

    region
        Region to connect to.

    key
        Secret key to be used.

    keyid
        Access key to be used.

    profile
        A dict with region, key and keyid, or a pillar key (string) that
        contains a dict with region, key and keyid.
    '''
    ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}

    _ret = _route_table_present(name=name,
                                vpc_name=vpc_name,
                                vpc_id=vpc_id,
                                tags=tags,
                                region=region,
                                key=key,
                                keyid=keyid,
                                profile=profile)
    ret['changes'] = _ret['changes']
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    _ret = _routes_present(route_table_name=name,
                           routes=routes,
                           tags=tags,
                           region=region,
                           key=key,
                           keyid=keyid,
                           profile=profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    _ret = _subnets_present(route_table_name=name,
                            subnet_ids=subnet_ids,
                            subnet_names=subnet_names,
                            tags=tags,
                            region=region,
                            key=key,
                            keyid=keyid,
                            profile=profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    return ret
Пример #32
0
def present(
        name,
        launch_config_name,
        availability_zones,
        min_size,
        max_size,
        launch_config=None,
        desired_capacity=None,
        load_balancers=None,
        default_cooldown=None,
        health_check_type=None,
        health_check_period=None,
        placement_group=None,
        vpc_zone_identifier=None,
        subnet_names=None,
        tags=None,
        termination_policies=None,
        termination_policies_from_pillar='boto_asg_termination_policies',
        suspended_processes=None,
        scaling_policies=None,
        scaling_policies_from_pillar='boto_asg_scaling_policies',
        scheduled_actions=None,
        scheduled_actions_from_pillar='boto_asg_scheduled_actions',
        alarms=None,
        alarms_from_pillar='boto_asg_alarms',
        region=None,
        key=None,
        keyid=None,
        profile=None,
        notification_arn=None,
        notification_arn_from_pillar='boto_asg_notification_arn',
        notification_types=None,
        notification_types_from_pillar='boto_asg_notification_types'):
    '''
    Ensure the autoscale group exists.

    name
        Name of the autoscale group.

    launch_config_name
        Name of the launch config to use for the group.  Or, if
        ``launch_config`` is specified, this will be the launch config
        name's prefix.  (see below)

    launch_config
        A dictionary of launch config attributes.  If specified, a
        launch config will be used or created, matching this set
        of attributes, and the autoscale group will be set to use
        that launch config.  The launch config name will be the
        ``launch_config_name`` followed by a hyphen followed by a hash
        of the ``launch_config`` dict contents.
        Example:

        .. code-block:: yaml

            my_asg:
              boto_asg.present:
              - launch_config:
                - ebs_optimized: false
                - instance_profile_name: my_iam_profile
                - kernel_id: ''
                - ramdisk_id: ''
                - key_name: my_ssh_key
                - image_name: aws2015091-hvm
                - instance_type: c3.xlarge
                - instance_monitoring: false
                - security_groups:
                  - my_sec_group_01
                  - my_sec_group_02

    availability_zones
        List of availability zones for the group.

    min_size
        Minimum size of the group.

    max_size
        Maximum size of the group.

    desired_capacity
        The desired capacity of the group.

    load_balancers
        List of load balancers for the group. Once set this can not be
        updated (Amazon restriction).

    default_cooldown
        Number of seconds after a Scaling Activity completes before any further
        scaling activities can start.

    health_check_type
        The service you want the health status from, Amazon EC2 or Elastic Load
        Balancer (EC2 or ELB).

    health_check_period
        Length of time in seconds after a new EC2 instance comes into service
        that Auto Scaling starts checking its health.

    placement_group
        Physical location of your cluster placement group created in Amazon
        EC2. Once set this can not be updated (Amazon restriction).

    vpc_zone_identifier
        A list of the subnet identifiers of the Virtual Private Cloud.

    subnet_names
        For VPC, a list of subnet names (NOT subnet IDs) to deploy into.
        Exclusive with vpc_zone_identifier.

    tags
        A list of tags. Example:

        .. code-block:: yaml

            - key: 'key'
              value: 'value'
              propagate_at_launch: true

    termination_policies
        A list of termination policies. Valid values are:

        * ``OldestInstance``
        * ``NewestInstance``
        * ``OldestLaunchConfiguration``
        * ``ClosestToNextInstanceHour``
        * ``Default``

        If no value is specified, the ``Default`` value is used.

    termination_policies_from_pillar:
        name of pillar dict that contains termination policy settings.   Termination policies
        defined for this specific state will override those from pillar.

    suspended_processes
        List of processes to be suspended. see
        http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US_SuspendResume.html

    scaling_policies
        List of scaling policies.  Each policy is a dict of key-values described by
        https://boto.readthedocs.io/en/latest/ref/autoscale.html#boto.ec2.autoscale.policy.ScalingPolicy

    scaling_policies_from_pillar:
        name of pillar dict that contains scaling policy settings.   Scaling policies defined for
        this specific state will override those from pillar.

    scheduled_actions:
        a dictionary of scheduled actions. Each key is the name of scheduled action and each value
        is dictionary of options. For example:

        .. code-block:: yaml

            - scheduled_actions:
                scale_up_at_10:
                    desired_capacity: 4
                    min_size: 3
                    max_size: 5
                    recurrence: "0 9 * * 1-5"
                scale_down_at_7:
                    desired_capacity: 1
                    min_size: 1
                    max_size: 1
                    recurrence: "0 19 * * 1-5"

    scheduled_actions_from_pillar:
        name of pillar dict that contains scheduled_actions settings. Scheduled actions
        for this specific state will override those from pillar.

    alarms:
        a dictionary of name->boto_cloudwatch_alarm sections to be associated with this ASG.
        All attributes should be specified except for dimension which will be
        automatically set to this ASG.

        See the :mod:`salt.states.boto_cloudwatch_alarm` state for information
        about these attributes.

        If any alarm actions include  ":self:" this will be replaced with the asg name.
        For example, alarm_actions reading "['scaling_policy:self:ScaleUp']" will
        map to the arn for this asg's scaling policy named "ScaleUp".
        In addition, any alarms that have only scaling_policy as actions will be ignored if
        min_size is equal to max_size for this ASG.

    alarms_from_pillar:
        name of pillar dict that contains alarm settings.   Alarms defined for this specific
        state will override those from pillar.

    region
        The region to connect to.

    key
        Secret key to be used.

    keyid
        Access key to be used.

    profile
        A dict with region, key and keyid, or a pillar key (string)
        that contains a dict with region, key and keyid.

    notification_arn
        The AWS arn that notifications will be sent to

    notification_arn_from_pillar
        name of the pillar dict that contains ``notifcation_arn`` settings.  A
        ``notification_arn`` defined for this specific state will override the
        one from pillar.

    notification_types
        A list of event names that will trigger a notification.  The list of valid
        notification types is:

        * ``autoscaling:EC2_INSTANCE_LAUNCH``
        * ``autoscaling:EC2_INSTANCE_LAUNCH_ERROR``
        * ``autoscaling:EC2_INSTANCE_TERMINATE``
        * ``autoscaling:EC2_INSTANCE_TERMINATE_ERROR``
        * ``autoscaling:TEST_NOTIFICATION``

    notification_types_from_pillar
        name of the pillar dict that contains ``notifcation_types`` settings.
        ``notification_types`` defined for this specific state will override those
        from the pillar.
    '''
    if vpc_zone_identifier and subnet_names:
        raise SaltInvocationError('vpc_zone_identifier and subnet_names are '
                                  'mutually exclusive options.')
    ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
    if subnet_names:
        vpc_zone_identifier = []
        for i in subnet_names:
            r = __salt__['boto_vpc.get_resource_id']('subnet', name=i, region=region,
                                                     key=key, keyid=keyid, profile=profile)
            if 'error' in r:
                ret['comment'] = 'Error looking up subnet ids: {0}'.format(r['error'])
                ret['result'] = False
                return ret
            if 'id' not in r:
                ret['comment'] = 'Subnet {0} does not exist.'.format(i)
                ret['result'] = False
                return ret
            vpc_zone_identifier.append(r['id'])
    if vpc_zone_identifier:
        vpc_id = __salt__['boto_vpc.get_subnet_association'](
            vpc_zone_identifier,
            region,
            key,
            keyid,
            profile
        )
        vpc_id = vpc_id.get('vpc_id')
        log.debug('Auto Scaling Group {0} is associated with VPC ID {1}'
                  .format(name, vpc_id))
    else:
        vpc_id = None
        log.debug('Auto Scaling Group {0} has no VPC Association'
                  .format(name))
    # if launch_config is defined, manage the launch config first.
    # hash the launch_config dict to create a unique name suffix and then
    # ensure it is present
    if launch_config:
        launch_config_name = launch_config_name + '-' + hashlib.md5(str(launch_config)).hexdigest()
        args = {
            'name': launch_config_name,
            'region': region,
            'key': key,
            'keyid': keyid,
            'profile': profile
        }

        for index, item in enumerate(launch_config):
            if 'image_name' in item:
                image_name = item['image_name']
                iargs = {'ami_name': image_name, 'region': region, 'key': key,
                         'keyid': keyid, 'profile': profile}
                image_ids = __salt__['boto_ec2.find_images'](**iargs)
                if len(image_ids):
                    launch_config[index]['image_id'] = image_ids[0]
                else:
                    launch_config[index]['image_id'] = image_name
                del launch_config[index]['image_name']
                break

        if vpc_id:
            log.debug('Auto Scaling Group {0} is a associated with a vpc')
            # locate the security groups attribute of a launch config
            sg_index = None
            for index, item in enumerate(launch_config):
                if 'security_groups' in item:
                    sg_index = index
                    break
            # if security groups exist within launch_config then convert
            # to group ids
            if sg_index is not None:
                log.debug('security group associations found in launch config')
                _group_ids = __salt__['boto_secgroup.convert_to_group_ids'](
                    launch_config[sg_index]['security_groups'], vpc_id=vpc_id,
                    region=region, key=key, keyid=keyid, profile=profile
                )
                launch_config[sg_index]['security_groups'] = _group_ids

        for d in launch_config:
            args.update(d)
        if not __opts__['test']:
            lc_ret = __states__['boto_lc.present'](**args)
            if lc_ret['result'] is True and lc_ret['changes']:
                if 'launch_config' not in ret['changes']:
                    ret['changes']['launch_config'] = {}
                ret['changes']['launch_config'] = lc_ret['changes']

    asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile)
    termination_policies = _determine_termination_policies(
        termination_policies,
        termination_policies_from_pillar
    )
    scaling_policies = _determine_scaling_policies(
        scaling_policies,
        scaling_policies_from_pillar
    )
    scheduled_actions = _determine_scheduled_actions(
        scheduled_actions,
        scheduled_actions_from_pillar
    )
    if asg is None:
        ret['result'] = False
        ret['comment'] = 'Failed to check autoscale group existence.'
    elif not asg:
        if __opts__['test']:
            msg = 'Autoscale group set to be created.'
            ret['comment'] = msg
            ret['result'] = None
            return ret
        notification_arn, notification_types = _determine_notification_info(
            notification_arn,
            notification_arn_from_pillar,
            notification_types,
            notification_types_from_pillar
        )
        created = __salt__['boto_asg.create'](name, launch_config_name,
                                              availability_zones, min_size,
                                              max_size, desired_capacity,
                                              load_balancers, default_cooldown,
                                              health_check_type,
                                              health_check_period,
                                              placement_group,
                                              vpc_zone_identifier, tags,
                                              termination_policies,
                                              suspended_processes,
                                              scaling_policies, scheduled_actions,
                                              region, notification_arn,
                                              notification_types,
                                              key, keyid, profile)
        if created:
            ret['changes']['old'] = None
            asg = __salt__['boto_asg.get_config'](name, region, key, keyid,
                                                  profile)
            ret['changes']['new'] = asg
        else:
            ret['result'] = False
            ret['comment'] = 'Failed to create autoscale group'
    else:
        need_update = False
        # If any of these attributes can't be modified after creation
        # time, we should remove them from the dict.
        if scaling_policies:
            for policy in scaling_policies:
                if 'min_adjustment_step' not in policy:
                    policy['min_adjustment_step'] = None
        if scheduled_actions:
            for s_name, action in six.iteritems(scheduled_actions):
                if 'end_time' not in action:
                    action['end_time'] = None
        config = {
            'launch_config_name': launch_config_name,
            'availability_zones': availability_zones,
            'min_size': min_size,
            'max_size': max_size,
            'desired_capacity': desired_capacity,
            'default_cooldown': default_cooldown,
            'health_check_type': health_check_type,
            'health_check_period': health_check_period,
            'vpc_zone_identifier': vpc_zone_identifier,
            'tags': tags,
            'termination_policies': termination_policies,
            'suspended_processes': suspended_processes,
            'scaling_policies': scaling_policies,
            'scheduled_actions': scheduled_actions
        }
        #ensure that we reset termination_policies to default if none are specified
        if not termination_policies:
            config['termination_policies'] = ['Default']
        if suspended_processes is None:
            config['suspended_processes'] = []
        # ensure that we delete scaling_policies if none are specified
        if scaling_policies is None:
            config['scaling_policies'] = []
        # ensure that we delete scheduled_actions if none are specified
        if scheduled_actions is None:
            config['scheduled_actions'] = {}
        # allow defaults on start_time
        for s_name, action in six.iteritems(scheduled_actions):
            if 'start_time' not in action:
                asg_action = asg['scheduled_actions'].get(s_name, {})
                if 'start_time' in asg_action:
                    del asg_action['start_time']
        # note: do not loop using "key, value" - this can modify the value of
        # the aws access key
        for asg_property, value in six.iteritems(config):
            # Only modify values being specified; introspection is difficult
            # otherwise since it's hard to track default values, which will
            # always be returned from AWS.
            if value is None:
                continue
            if asg_property in asg:
                _value = asg[asg_property]
                if not _recursive_compare(value, _value):
                    log_msg = '{0} asg_property differs from {1}'
                    log.debug(log_msg.format(value, _value))
                    need_update = True
                    break
        if need_update:
            if __opts__['test']:
                msg = 'Autoscale group set to be updated.'
                ret['comment'] = msg
                ret['result'] = None
                return ret
            # add in alarms
            notification_arn, notification_types = _determine_notification_info(
                notification_arn,
                notification_arn_from_pillar,
                notification_types,
                notification_types_from_pillar
            )
            updated, msg = __salt__['boto_asg.update'](
                name,
                launch_config_name,
                availability_zones,
                min_size,
                max_size,
                desired_capacity=desired_capacity,
                load_balancers=load_balancers,
                default_cooldown=default_cooldown,
                health_check_type=health_check_type,
                health_check_period=health_check_period,
                placement_group=placement_group,
                vpc_zone_identifier=vpc_zone_identifier,
                tags=tags,
                termination_policies=termination_policies,
                suspended_processes=suspended_processes,
                scaling_policies=scaling_policies,
                scheduled_actions=scheduled_actions,
                region=region,
                notification_arn=notification_arn,
                notification_types=notification_types,
                key=key,
                keyid=keyid,
                profile=profile
            )
            if asg['launch_config_name'] != launch_config_name:
                # delete the old launch_config_name
                deleted = __salt__['boto_asg.delete_launch_configuration'](
                    asg['launch_config_name'],
                    region=region,
                    key=key,
                    keyid=keyid,
                    profile=profile
                )
                if deleted:
                    if 'launch_config' not in ret['changes']:
                        ret['changes']['launch_config'] = {}
                    ret['changes']['launch_config']['deleted'] = asg['launch_config_name']
            if updated:
                ret['changes']['old'] = asg
                asg = __salt__['boto_asg.get_config'](name, region, key, keyid,
                                                      profile)
                ret['changes']['new'] = asg
                ret['comment'] = 'Updated autoscale group.'
            else:
                ret['result'] = False
                ret['comment'] = msg
        else:
            ret['comment'] = 'Autoscale group present.'
    # add in alarms
    _ret = _alarms_present(
        name, min_size == max_size, alarms, alarms_from_pillar, region, key,
        keyid, profile
    )
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
    return ret
Пример #33
0
def function_present(name, FunctionName, Runtime, Role, Handler, ZipFile=None, S3Bucket=None,
            S3Key=None, S3ObjectVersion=None,
            Description='', Timeout=3, MemorySize=128,
            Permissions=None, RoleRetries=5,
            region=None, key=None, keyid=None, profile=None):
    '''
    Ensure function exists.

    name
        The name of the state definition

    FunctionName
        Name of the Function.

    Runtime
        The Runtime environment for the function. One of
        'nodejs', 'java8', or 'python2.7'

    Role
        The name or ARN of the IAM role that the function assumes when it executes your
        function to access any other AWS resources.

    Handler
        The function within your code that Lambda calls to begin execution. For Node.js it is the
        module-name.*export* value in your function. For Java, it can be package.classname::handler or
        package.class-name.

    ZipFile
        A path to a .zip file containing your deployment package. If this is
        specified, S3Bucket and S3Key must not be specified.

    S3Bucket
        Amazon S3 bucket name where the .zip file containing your package is
        stored. If this is specified, S3Key must be specified and ZipFile must
        NOT be specified.

    S3Key
        The Amazon S3 object (the deployment package) key name you want to
        upload. If this is specified, S3Key must be specified and ZipFile must
        NOT be specified.

    S3ObjectVersion
        The version of S3 object to use. Optional, should only be specified if
        S3Bucket and S3Key are specified.

    Description
        A short, user-defined function description. Lambda does not use this value. Assign a meaningful
        description as you see fit.

    Timeout
        The function execution time at which Lambda should terminate this function. Because the execution
        time has cost implications, we recommend you set this value based on your expected execution time.
        The default is 3 seconds.

    MemorySize
        The amount of memory, in MB, your function is given. Lambda uses this memory size to infer
        the amount of CPU and memory allocated to your function. Your function use-case determines your
        CPU and memory requirements. For example, a database operation might need less memory compared
        to an image processing function. The default value is 128 MB. The value must be a multiple of
        64 MB.

    Permissions
        A list of permission definitions to be added to the function's policy

    RoleRetries
        IAM Roles may take some time to propagate to all regions once created.
        During that time function creation may fail; this state will
        atuomatically retry this number of times. The default is 5.

    region
        Region to connect to.

    key
        Secret key to be used.

    keyid
        Access key to be used.

    profile
        A dict with region, key and keyid, or a pillar key (string) that
        contains a dict with region, key and keyid.
    '''
    ret = {'name': FunctionName,
           'result': True,
           'comment': '',
           'changes': {}
           }

    if Permissions is not None:
        if isinstance(Permissions, string_types):
            Permissions = json.loads(Permissions)
        required_keys = set(('Action', 'Principal'))
        optional_keys = set(('SourceArn', 'SourceAccount'))
        for sid, permission in Permissions.iteritems():
            keyset = set(permission.keys())
            if not keyset.issuperset(required_keys):
                raise SaltInvocationError('{0} are required for each permission '
                           'specification'.format(', '.join(required_keys)))
            keyset = keyset - required_keys
            keyset = keyset - optional_keys
            if bool(keyset):
                raise SaltInvocationError('Invalid permission value {0}'.format(', '.join(keyset)))

    r = __salt__['boto_lambda.function_exists'](FunctionName=FunctionName, region=region,
                                    key=key, keyid=keyid, profile=profile)

    if 'error' in r:
        ret['result'] = False
        ret['comment'] = 'Failed to create function: {0}.'.format(r['error']['message'])
        return ret

    if not r.get('exists'):
        if __opts__['test']:
            ret['comment'] = 'Function {0} is set to be created.'.format(FunctionName)
            ret['result'] = None
            return ret
        r = __salt__['boto_lambda.create_function'](FunctionName=FunctionName, Runtime=Runtime,
                                                    Role=Role, Handler=Handler,
                                                    ZipFile=ZipFile, S3Bucket=S3Bucket,
                                                    S3Key=S3Key,
                                                    S3ObjectVersion=S3ObjectVersion,
                                                    Description=Description,
                                                    Timeout=Timeout, MemorySize=MemorySize,
                                                    WaitForRole=True,
                                                    RoleRetries=RoleRetries,
                                                    region=region, key=key,
                                                    keyid=keyid, profile=profile)
        if not r.get('created'):
            ret['result'] = False
            ret['comment'] = 'Failed to create function: {0}.'.format(r['error']['message'])
            return ret

        if Permissions:
            for sid, permission in Permissions.iteritems():
                r = __salt__['boto_lambda.add_permission'](FunctionName=FunctionName,
                                                       StatementId=sid,
                                                       **permission)
                if not r.get('updated'):
                    ret['result'] = False
                    ret['comment'] = 'Failed to create function: {0}.'.format(r['error']['message'])

        _describe = __salt__['boto_lambda.describe_function'](FunctionName,
                           region=region, key=key, keyid=keyid, profile=profile)
        _describe['function']['Permissions'] = __salt__['boto_lambda.get_permissions'](FunctionName,
                           region=region, key=key, keyid=keyid, profile=profile)['permissions']
        ret['changes']['old'] = {'function': None}
        ret['changes']['new'] = _describe
        ret['comment'] = 'Function {0} created.'.format(FunctionName)
        return ret

    ret['comment'] = os.linesep.join([ret['comment'], 'Function {0} is present.'.format(FunctionName)])
    ret['changes'] = {}
    # function exists, ensure config matches
    _ret = _function_config_present(FunctionName, Role, Handler, Description, Timeout,
                                  MemorySize, region, key, keyid, profile)
    if not _ret.get('result'):
        ret['result'] = False
        ret['comment'] = _ret['comment']
        ret['changes'] = {}
        return ret
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    _ret = _function_code_present(FunctionName, ZipFile, S3Bucket, S3Key, S3ObjectVersion,
                                 region, key, keyid, profile)
    if not _ret.get('result'):
        ret['result'] = False
        ret['comment'] = _ret['comment']
        ret['changes'] = {}
        return ret
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    _ret = _function_permissions_present(FunctionName, Permissions,
                                 region, key, keyid, profile)
    if not _ret.get('result'):
        ret['result'] = False
        ret['comment'] = _ret['comment']
        ret['changes'] = {}
        return ret
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    return ret
Пример #34
0
def key_present(
        name,
        policy,
        description=None,
        key_usage=None,
        grants=None,
        manage_grants=False,
        key_rotation=False,
        enabled=True,
        region=None,
        key=None,
        keyid=None,
        profile=None):
    '''
    Ensure the KMS key exists. KMS keys can not be deleted, so this function
    must be used to ensure the key is enabled or disabled.

    name
        Name of the key.

    policy
        Key usage policy.

    description
        Description of the key.

    key_usage
        Specifies the intended use of the key. Can only be set on creation,
        defaults to ENCRYPT_DECRYPT, which is also the only supported option.

    grants
        A list of grants to apply to the key. Not currently implemented.

    manage_grants
        Whether or not to manage grants. False by default, which will not
        manage any grants.

    key_rotation
        Whether or not key rotation is enabled for the key. False by default.

    enabled
        Whether or not the key is enabled. True by default.

    region
        Region to connect to.

    key
        Secret key to be used.

    keyid
        Access key to be used.

    profile
        A dict with region, key and keyid, or a pillar key (string)
        that contains a dict with region, key and keyid.
    '''
    if not policy:
        raise SaltInvocationError('policy is a required argument.')
    if grants and not isinstance(grants, list):
        raise SaltInvocationError('manage_grants must be a list.')
    if not isinstance(manage_grants, bool):
        raise SaltInvocationError('manage_grants must be true or false.')
    if not isinstance(key_rotation, bool):
        raise SaltInvocationError('key_rotation must be true or false.')
    if not isinstance(enabled, bool):
        raise SaltInvocationError('enabled must be true or false.')
    # TODO: support grant from pillars.
    # TODO: support key policy from pillars.
    ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
    _ret = _key_present(
        name, policy, description, key_usage, key_rotation, enabled, region,
        key, keyid, profile
    )
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    # TODO: add grants_present function
    return ret
Пример #35
0
def function_present(name,
                     FunctionName,
                     Runtime,
                     Role,
                     Handler,
                     ZipFile=None,
                     S3Bucket=None,
                     S3Key=None,
                     S3ObjectVersion=None,
                     Description='',
                     Timeout=3,
                     MemorySize=128,
                     Permissions=None,
                     RoleRetries=5,
                     region=None,
                     key=None,
                     keyid=None,
                     profile=None,
                     VpcConfig=None,
                     Environment=None):
    '''
    Ensure function exists.

    name
        The name of the state definition

    FunctionName
        Name of the Function.

    Runtime
        The Runtime environment for the function. One of
        'nodejs', 'java8', or 'python2.7'

    Role
        The name or ARN of the IAM role that the function assumes when it executes your
        function to access any other AWS resources.

    Handler
        The function within your code that Lambda calls to begin execution. For Node.js it is the
        module-name.*export* value in your function. For Java, it can be package.classname::handler or
        package.class-name.

    ZipFile
        A path to a .zip file containing your deployment package. If this is
        specified, S3Bucket and S3Key must not be specified.

    S3Bucket
        Amazon S3 bucket name where the .zip file containing your package is
        stored. If this is specified, S3Key must be specified and ZipFile must
        NOT be specified.

    S3Key
        The Amazon S3 object (the deployment package) key name you want to
        upload. If this is specified, S3Key must be specified and ZipFile must
        NOT be specified.

    S3ObjectVersion
        The version of S3 object to use. Optional, should only be specified if
        S3Bucket and S3Key are specified.

    Description
        A short, user-defined function description. Lambda does not use this value. Assign a meaningful
        description as you see fit.

    Timeout
        The function execution time at which Lambda should terminate this function. Because the execution
        time has cost implications, we recommend you set this value based on your expected execution time.
        The default is 3 seconds.

    MemorySize
        The amount of memory, in MB, your function is given. Lambda uses this memory size to infer
        the amount of CPU and memory allocated to your function. Your function use-case determines your
        CPU and memory requirements. For example, a database operation might need less memory compared
        to an image processing function. The default value is 128 MB. The value must be a multiple of
        64 MB.

    VpcConfig
        If your Lambda function accesses resources in a VPC, you must provide this parameter
        identifying the list of security group IDs/Names and subnet IDs/Name.  These must all belong
        to the same VPC.  This is a dict of the form:

        .. code-block:: yaml
            VpcConfig:
                SecurityGroupNames:
                - mysecgroup1
                - mysecgroup2
                SecurityGroupIds:
                - sg-abcdef1234
                SubnetNames:
                - mysubnet1
                SubnetIds:
                - subnet-1234abcd
                - subnet-abcd1234

        If VpcConfig is provided at all, you MUST pass at least one security group and one subnet.

    Permissions
        A list of permission definitions to be added to the function's policy

    RoleRetries
        IAM Roles may take some time to propagate to all regions once created.
        During that time function creation may fail; this state will
        atuomatically retry this number of times. The default is 5.

    Environment
        The parent object that contains your environment's configuration
        settings.  This is a dictionary of the form:
        {
            'Variables': {
                'VariableName': 'VariableValue'
            }
        }

        .. versionadded:: 2017.7.0

    region
        Region to connect to.

    key
        Secret key to be used.

    keyid
        Access key to be used.

    profile
        A dict with region, key and keyid, or a pillar key (string) that
        contains a dict with region, key and keyid.
    '''
    ret = {'name': FunctionName, 'result': True, 'comment': '', 'changes': {}}

    if Permissions is not None:
        if isinstance(Permissions, six.string_types):
            Permissions = json.loads(Permissions)
        required_keys = set(('Action', 'Principal'))
        optional_keys = set(('SourceArn', 'SourceAccount', 'Qualifier'))
        for sid, permission in six.iteritems(Permissions):
            keyset = set(permission.keys())
            if not keyset.issuperset(required_keys):
                raise SaltInvocationError(
                    '{0} are required for each permission '
                    'specification'.format(', '.join(required_keys)))
            keyset = keyset - required_keys
            keyset = keyset - optional_keys
            if bool(keyset):
                raise SaltInvocationError(
                    'Invalid permission value {0}'.format(', '.join(keyset)))

    r = __salt__['boto_lambda.function_exists'](FunctionName=FunctionName,
                                                region=region,
                                                key=key,
                                                keyid=keyid,
                                                profile=profile)

    if 'error' in r:
        ret['result'] = False
        ret['comment'] = ('Failed to create function: '
                          '{0}.'.format(r['error']['message']))
        return ret

    if not r.get('exists'):
        if __opts__['test']:
            ret['comment'] = 'Function {0} is set to be created.'.format(
                FunctionName)
            ret['result'] = None
            return ret
        r = __salt__['boto_lambda.create_function'](
            FunctionName=FunctionName,
            Runtime=Runtime,
            Role=Role,
            Handler=Handler,
            ZipFile=ZipFile,
            S3Bucket=S3Bucket,
            S3Key=S3Key,
            S3ObjectVersion=S3ObjectVersion,
            Description=Description,
            Timeout=Timeout,
            MemorySize=MemorySize,
            VpcConfig=VpcConfig,
            Environment=Environment,
            WaitForRole=True,
            RoleRetries=RoleRetries,
            region=region,
            key=key,
            keyid=keyid,
            profile=profile)
        if not r.get('created'):
            ret['result'] = False
            ret['comment'] = ('Failed to create function: '
                              '{0}.'.format(r['error']['message']))
            return ret

        if Permissions:
            for sid, permission in six.iteritems(Permissions):
                r = __salt__['boto_lambda.add_permission'](
                    FunctionName=FunctionName,
                    StatementId=sid,
                    region=region,
                    key=key,
                    keyid=keyid,
                    profile=profile,
                    **permission)
                if not r.get('updated'):
                    ret['result'] = False
                    ret['comment'] = ('Failed to create function: '
                                      '{0}.'.format(r['error']['message']))

        _describe = __salt__['boto_lambda.describe_function'](FunctionName,
                                                              region=region,
                                                              key=key,
                                                              keyid=keyid,
                                                              profile=profile)
        _describe['function']['Permissions'] = (
            __salt__['boto_lambda.get_permissions'](
                FunctionName,
                region=region,
                key=key,
                keyid=keyid,
                profile=profile)['permissions'])
        ret['changes']['old'] = {'function': None}
        ret['changes']['new'] = _describe
        ret['comment'] = 'Function {0} created.'.format(FunctionName)
        return ret

    ret['comment'] = os.linesep.join(
        [ret['comment'], 'Function {0} is present.'.format(FunctionName)])
    ret['changes'] = {}
    # function exists, ensure config matches
    _ret = _function_config_present(FunctionName, Role, Handler, Description,
                                    Timeout, MemorySize, VpcConfig,
                                    Environment, region, key, keyid, profile,
                                    RoleRetries)
    if not _ret.get('result'):
        ret['result'] = _ret.get('result', False)
        ret['comment'] = _ret['comment']
        ret['changes'] = {}
        return ret
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    _ret = _function_code_present(FunctionName, ZipFile, S3Bucket, S3Key,
                                  S3ObjectVersion, region, key, keyid, profile)
    if not _ret.get('result'):
        ret['result'] = _ret.get('result', False)
        ret['comment'] = _ret['comment']
        ret['changes'] = {}
        return ret
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    _ret = _function_permissions_present(FunctionName, Permissions, region,
                                         key, keyid, profile)
    if not _ret.get('result'):
        ret['result'] = _ret.get('result', False)
        ret['comment'] = _ret['comment']
        ret['changes'] = {}
        return ret
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    return ret
Пример #36
0
def present(name=None,
            table_name=None,
            region=None,
            key=None,
            keyid=None,
            profile=None,
            read_capacity_units=None,
            write_capacity_units=None,
            alarms=None,
            alarms_from_pillar="boto_dynamodb_alarms",
            hash_key=None,
            hash_key_data_type=None,
            range_key=None,
            range_key_data_type=None,
            local_indexes=None,
            global_indexes=None,
            backup_configs_from_pillars='boto_dynamodb_backup_configs'):
    '''
    Ensure the DynamoDB table exists. Table throughput can be updated after
    table creation.

    Global secondary indexes (GSIs) are managed with some exceptions:
    * If a GSI deletion is detected, a failure will occur (deletes should be
      done manually in the AWS console).
    * If multiple GSIs are added in a single Salt call, a failure will occur
      (boto supports one creation at a time). Note that this only applies after
      table creation; multiple GSIs can be created during table creation.
    * Updates to existing GSIs are limited to read/write capacity only
      (DynamoDB limitation).

    name
        Name of the DynamoDB table

    table_name
        Name of the DynamoDB table (deprecated)

    region
        Region to connect to.

    key
        Secret key to be used.

    keyid
        Access key to be used.

    profile
        A dict with region, key and keyid, or a pillar key (string)
        that contains a dict with region, key and keyid.

    read_capacity_units
        The read throughput for this table

    write_capacity_units
        The write throughput for this table

    hash_key
        The name of the attribute that will be used as the hash key
        for this table

    hash_key_data_type
        The DynamoDB datatype of the hash key

    range_key
        The name of the attribute that will be used as the range key
        for this table

    range_key_data_type
        The DynamoDB datatype of the range key

    local_indexes
        The local indexes you would like to create

    global_indexes
        The global indexes you would like to create

    backup_configs_from_pillars
        Pillars to use to configure DataPipeline backups
    '''
    ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
    if table_name:
        ret['warnings'] = ['boto_dynamodb.present: `table_name` is deprecated.'
                           ' Please use `name` instead.']
        ret['name'] = table_name
        name = table_name

    comments = []
    changes_old = {}
    changes_new = {}

    # Ensure DynamoDB table exists
    table_exists = __salt__['boto_dynamodb.exists'](
        name,
        region,
        key,
        keyid,
        profile
    )
    if not table_exists:
        if __opts__['test']:
            ret['result'] = None
            comments.append('DynamoDB table {0} is set to be created.'.format(name))
        else:
            is_created = __salt__['boto_dynamodb.create_table'](
                name,
                region,
                key,
                keyid,
                profile,
                read_capacity_units,
                write_capacity_units,
                hash_key,
                hash_key_data_type,
                range_key,
                range_key_data_type,
                local_indexes,
                global_indexes
            )
            if not is_created:
                ret['result'] = False
                ret['comment'] = 'Failed to create table {0}'.format(name)
                _add_changes(ret, changes_old, changes_new)
                return ret

            comments.append('DynamoDB table {0} was successfully created'.format(name))
            changes_new['table'] = name
            changes_new['read_capacity_units'] = read_capacity_units
            changes_new['write_capacity_units'] = write_capacity_units
            changes_new['hash_key'] = hash_key
            changes_new['hash_key_data_type'] = hash_key_data_type
            changes_new['range_key'] = range_key
            changes_new['range_key_data_type'] = range_key_data_type
            changes_new['local_indexes'] = local_indexes
            changes_new['global_indexes'] = global_indexes
    else:
        comments.append('DynamoDB table {0} exists'.format(name))

    # Ensure DynamoDB table provisioned throughput matches
    description = __salt__['boto_dynamodb.describe'](
        name,
        region,
        key,
        keyid,
        profile
    )
    provisioned_throughput = description.get('Table', {}).get('ProvisionedThroughput', {})
    current_write_capacity_units = provisioned_throughput.get('WriteCapacityUnits')
    current_read_capacity_units = provisioned_throughput.get('ReadCapacityUnits')
    throughput_matches = (current_write_capacity_units == write_capacity_units and
                          current_read_capacity_units == read_capacity_units)
    if not throughput_matches:
        if __opts__['test']:
            ret['result'] = None
            comments.append('DynamoDB table {0} is set to be updated.'.format(name))
        else:
            is_updated = __salt__['boto_dynamodb.update'](
                name,
                throughput={
                    'read': read_capacity_units,
                    'write': write_capacity_units,
                },
                region=region,
                key=key,
                keyid=keyid,
                profile=profile,
            )
            if not is_updated:
                ret['result'] = False
                ret['comment'] = 'Failed to update table {0}'.format(name)
                _add_changes(ret, changes_old, changes_new)
                return ret

            comments.append('DynamoDB table {0} was successfully updated'.format(name))
            changes_old['read_capacity_units'] = current_read_capacity_units,
            changes_old['write_capacity_units'] = current_write_capacity_units,
            changes_new['read_capacity_units'] = read_capacity_units,
            changes_new['write_capacity_units'] = write_capacity_units,
    else:
        comments.append('DynamoDB table {0} throughput matches'.format(name))

    provisioned_indexes = description.get('Table', {}).get('GlobalSecondaryIndexes', [])

    _ret = _global_indexes_present(provisioned_indexes, global_indexes, changes_old,
                                   changes_new, comments, name, region, key, keyid,
                                   profile)
    if not _ret['result']:
        comments.append(_ret['comment'])
        ret['result'] = _ret['result']
        if ret['result'] is False:
            ret['comment'] = ',\n'.join(comments)
            _add_changes(ret, changes_old, changes_new)
            return ret

    _ret = _alarms_present(name, alarms, alarms_from_pillar,
                           write_capacity_units, read_capacity_units,
                           region, key, keyid, profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    comments.append(_ret['comment'])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            ret['comment'] = ',\n'.join(comments)
            _add_changes(ret, changes_old, changes_new)
            return ret

    # Ensure backup datapipeline is present
    datapipeline_configs = copy.deepcopy(
        __salt__['pillar.get'](backup_configs_from_pillars, [])
    )
    for config in datapipeline_configs:
        datapipeline_ret = _ensure_backup_datapipeline_present(
            name=name,
            schedule_name=config['name'],
            period=config['period'],
            utc_hour=config['utc_hour'],
            s3_base_location=config['s3_base_location'],
        )
        # Add comments and changes if successful changes were made (True for live mode,
        # None for test mode).
        if datapipeline_ret['result'] in [True, None]:
            ret['result'] = datapipeline_ret['result']
            comments.append(datapipeline_ret['comment'])
            if datapipeline_ret.get('changes'):
                ret['changes']['backup_datapipeline_{0}'.format(config['name'])] = \
                    datapipeline_ret.get('changes'),
        else:
            ret['comment'] = ',\n'.join([ret['comment'], datapipeline_ret['comment']])
            _add_changes(ret, changes_old, changes_new)
            return ret

    ret['comment'] = ',\n'.join(comments)
    _add_changes(ret, changes_old, changes_new)
    return ret
Пример #37
0
def _key_present(
        name,
        policy,
        description,
        key_usage,
        key_rotation,
        enabled,
        region,
        key,
        keyid,
        profile):
    ret = {'result': True, 'comment': '', 'changes': {}}
    alias = 'alias/{0}'.format(name)
    r = __salt__['boto_kms.key_exists'](alias, region, key, keyid, profile)
    if 'error' in r:
        ret['result'] = False
        ret['comment'] = 'Error when attempting to find key: {0}.'.format(
            r['error']['message']
        )
        return ret
    if not r['result']:
        if __opts__['test']:
            ret['comment'] = 'Key is set to be created.'
            ret['result'] = None
            return ret
        rc = __salt__['boto_kms.create_key'](
            policy, description, key_usage, region, key, keyid, profile
        )
        key_metadata = rc['key_metadata']
        kms_key_id = key_metadata['KeyId']
        if 'error' in rc:
            ret['result'] = False
            ret['comment'] = 'Failed to create key: {0}'.format(
                rc['error']['message']
            )
            return ret
        rn = __salt__['boto_kms.create_alias'](
            alias, kms_key_id, region, key, keyid, profile
        )
        if 'error' in rn:
            # We can't recover from this. KMS only exposes enable/disable
            # and disable is not necessarily a great action here. AWS sucks
            # for not including alias in the create_key call.
            msg = ('Failed to create key alias for key_id {0}.'
                   ' This resource will be left dangling. Please clean'
                   ' manually. Error: {1}')
            ret['result'] = False
            ret['comment'] = msg.format(kms_key_id, rn['error']['message'])
            return ret
        ret['changes']['old'] = {'key': None}
        ret['changes']['new'] = {'key': name}
        ret['comment'] = 'Key {0} created.'.format(name)
    else:
        rd = __salt__['boto_kms.describe_key'](
            alias, region, key, keyid, profile
        )
        key_metadata = rd['key_metadata']
        if 'error' in rd:
            ret['result'] = False
            ret['comment'] = 'Failed to update key: {0}.'.format(
                rd['error']['message']
            )
            return ret
        _ret = _key_description(
            key_metadata, description, region, key, keyid, profile
        )
        ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
        ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
        if not _ret['result']:
            ret['result'] = _ret['result']
            if ret['result'] is False:
                return ret
        _ret = _key_policy(
            key_metadata, policy, region, key, keyid, profile
        )
        ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
        ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
        if not _ret['result']:
            ret['result'] = _ret['result']
            if ret['result'] is False:
                return ret
    # Actions that need to occur whether creating or updating
    _ret = _key_enabled(
        key_metadata, enabled, region, key, keyid, profile
    )
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    _ret = _key_rotation(
        key_metadata, key_rotation, region, key, keyid, profile
    )
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
    return ret
Пример #38
0
def present(
        name,
        policy_document=None,
        path=None,
        policies=None,
        region=None,
        key=None,
        keyid=None,
        profile=None):
    '''
    Ensure the IAM role exists.

    name
        Name of the IAM role.

    policy_document
        The policy that grants an entity permission to assume the role. (See http://boto.readthedocs.org/en/latest/ref/iam.html#boto.iam.connection.IAMConnection.create_role)

    path
        The path to the instance profile. (See http://boto.readthedocs.org/en/latest/ref/iam.html#boto.iam.connection.IAMConnection.create_role)

    policies
        A dict of IAM role policies.

    region
        Region to connect to.

    key
        Secret key to be used.

    keyid
        Access key to be used.

    profile
        A dict with region, key and keyid, or a pillar key (string)
        that contains a dict with region, key and keyid.
    '''
    ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
    _ret = _role_present(name, policy_document, path, region, key, keyid,
                         profile)
    ret['changes'] = _ret['changes']
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if _ret['result'] is not None:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    _ret = _instance_profile_present(name, region, key, keyid, profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if _ret['result'] is not None:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    _ret = _instance_profile_associated(name, region, key, keyid, profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if _ret['result'] is not None:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    _ret = _policies_present(name, policies, region, key, keyid, profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if _ret['result'] is not None:
        ret['result'] = _ret['result']
    return ret
Пример #39
0
def group_present(name, policies=None, policies_from_pillars=None, users=None, region=None, key=None, keyid=None, profile=None):
    '''
    Ensure the IAM group is present

    name (string)
        The name of the new group.

    policies (dict)
        A dict of IAM group policy documents.

    policies_from_pillars (list)
        A list of pillars that contain role policy dicts. Policies in the
        pillars will be merged in the order defined in the list and key
        conflicts will be handled by later defined keys overriding earlier
        defined keys. The policies defined here will be merged with the
        policies defined in the policies argument. If keys conflict, the keys
        in the policies argument will override the keys defined in
        policies_from_pillars.

    users (list)
        A list of users to be added to the group.

    region (string)
        Region to connect to.

    key (string)
        Secret key to be used.

    keyid (string)
        Access key to be used.

    profile (dict)
        A dict with region, key and keyid, or a pillar key (string) that
        contains a dict with region, key and keyid.
    '''
    ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
    if not policies:
        policies = {}
    if not policies_from_pillars:
        policies_from_pillars = []
    _policies = {}
    for policy in policies_from_pillars:
        _policy = __salt__['pillar.get'](policy)
        _policies.update(_policy)
    _policies.update(policies)
    exists = __salt__['boto_iam.get_group'](group_name=name, region=region, key=key, keyid=keyid, profile=profile)
    if not exists:
        if __opts__['test']:
            ret['comment'] = 'IAM group {0} is set to be created.'.format(name)
            ret['result'] = None
            return ret
        created = __salt__['boto_iam.create_group'](group_name=name, region=region, key=key, keyid=keyid, profile=profile)
        if not created:
            ret['comment'] = 'Failed to create IAM group {0}.'.format(name)
            ret['result'] = False
            return ret
        ret['changes']['group'] = created
        ret['comment'] = os.linesep.join([ret['comment'], 'Group {0} has been created.'.format(name)])
    else:
        ret['comment'] = os.linesep.join([ret['comment'], 'Group {0} is present.'.format(name)])
    # Group exists, ensure group policies and users are set.
    _ret = _group_policies_present(
        name, _policies, region, key, keyid, profile
    )
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        return ret
    if users:
        log.debug('Users are : {0}.'.format(users))
        group_result = __salt__['boto_iam.get_group'](group_name=name, region=region, key=key, keyid=keyid, profile=profile)
        ret = _case_group(ret, users, name, group_result, region, key, keyid, profile)
    return ret
Пример #40
0
def user_present(name, policies=None, policies_from_pillars=None, password=None, path=None, region=None, key=None,
                 keyid=None, profile=None):
    '''
    Ensure the IAM user is present

    name (string)
        The name of the new user.

    policies (dict)
        A dict of IAM group policy documents.

    policies_from_pillars (list)
        A list of pillars that contain role policy dicts. Policies in the
        pillars will be merged in the order defined in the list and key
        conflicts will be handled by later defined keys overriding earlier
        defined keys. The policies defined here will be merged with the
        policies defined in the policies argument. If keys conflict, the keys
        in the policies argument will override the keys defined in
        policies_from_pillars.

    password (string)
        The password for the new user. Must comply with account policy.

    path (string)
        The path of the user. Default is '/'

    region (string)
        Region to connect to.

    key (string)
        Secret key to be used.

    keyid (string)
        Access key to be used.

    profile (dict)
        A dict with region, key and keyid, or a pillar key (string)
        that contains a dict with region, key and keyid.
    '''
    ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
    if not policies:
        policies = {}
    if not policies_from_pillars:
        policies_from_pillars = []
    _policies = {}
    for policy in policies_from_pillars:
        _policy = __salt__['pillar.get'](policy)
        _policies.update(_policy)
    _policies.update(policies)
    exists = __salt__['boto_iam.get_user'](name, region, key, keyid, profile)
    if not exists:
        if __opts__['test']:
            ret['comment'] = 'IAM user {0} is set to be created.'.format(name)
            ret['result'] = None
            return ret
        created = __salt__['boto_iam.create_user'](name, path, region, key, keyid, profile)
        if created:
            ret['changes']['user'] = created
            ret['comment'] = os.linesep.join([ret['comment'], 'User {0} has been created.'.format(name)])
            if password:
                ret = _case_password(ret, name, password, region, key, keyid, profile)
            _ret = _user_policies_present(name, _policies, region, key, keyid, profile)
            ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
            ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    else:
        ret['comment'] = os.linesep.join([ret['comment'], 'User {0} is present.'.format(name)])
        if password:
            ret = _case_password(ret, name, password, region, key, keyid, profile)
        _ret = _user_policies_present(name, _policies, region, key, keyid, profile)
        ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
        ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    return ret
Пример #41
0
def present(
        name,
        description,
        vpc_id=None,
        vpc_name=None,
        rules=None,
        rules_egress=None,
        region=None,
        key=None,
        keyid=None,
        profile=None,
        tags=None):
    '''
    Ensure the security group exists with the specified rules.

    name
        Name of the security group.

    description
        A description of this security group.

    vpc_id
        The ID of the VPC to create the security group in, if any. Exclusive with vpc_name.

    vpc_name
        The name of the VPC to create the security group in, if any. Exlusive with vpc_id.

        .. versionadded:: Boron

        .. versionadded:: 2015.8.2

    rules
        A list of ingress rule dicts. If not specified, ``rules=None``,
        the ingress rules will be unmanaged. If set to an empty list, ``[]``,
        then all ingress rules will be removed.

    rules_egress
        A list of egress rule dicts. If not specified, ``rules_egress=None``,
        the egress rules will be unmanaged. If set to an empty list, ``[]``,
        then all egress rules will be removed.

    region
        Region to connect to.

    key
        Secret key to be used.

    keyid
        Access key to be used.

    profile
        A dict with region, key and keyid, or a pillar key (string)
        that contains a dict with region, key and keyid.

    tags
        List of key:value pairs of tags to set on the security group

        .. versionadded:: Boron
    '''
    ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
    _ret = _security_group_present(name, description, vpc_id=vpc_id,
                                   vpc_name=vpc_name, region=region,
                                   key=key, keyid=keyid, profile=profile)
    ret['changes'] = _ret['changes']
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
        elif ret['result'] is None:
            return ret
    if rules is not None:
        _ret = _rules_present(name, rules, vpc_id=vpc_id, vpc_name=vpc_name,
                              region=region, key=key, keyid=keyid,
                              profile=profile)
        ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
        ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
        if not _ret['result']:
            ret['result'] = _ret['result']
    if rules_egress is not None:
        _ret = _rules_egress_present(name, rules_egress, vpc_id=vpc_id,
                                     vpc_name=vpc_name, region=region, key=key,
                                     keyid=keyid, profile=profile)
        ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
        ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
        if not _ret['result']:
            ret['result'] = _ret['result']
    _ret = _tags_present(
        name=name, tags=tags, vpc_id=vpc_id, vpc_name=vpc_name,
        region=region, key=key, keyid=keyid, profile=profile
    )
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
    return ret
Пример #42
0
def present(
        name,
        listeners,
        availability_zones=None,
        subnets=None,
        security_groups=None,
        scheme='internet-facing',
        health_check=None,
        attributes=None,
        attributes_from_pillar="boto_elb_attributes",
        cnames=None,
        alarms=None,
        alarms_from_pillar="boto_elb_alarms",
        region=None,
        key=None,
        keyid=None,
        profile=None):
    '''
    Ensure the IAM role exists.

    name
        Name of the IAM role.

    availability_zones
        A list of availability zones for this ELB.

    listeners
        A list of listener lists; example:
        [
            ['443', 'HTTPS', 'arn:aws:iam::1111111:server-certificate/mycert'],
            ['8443', '80', 'HTTPS', 'HTTP', 'arn:aws:iam::1111111:server-certificate/mycert']
        ]

    subnets
        A list of subnet IDs in your VPC to attach to your LoadBalancer.

    security_groups
        The security groups assigned to your LoadBalancer within your VPC.

    scheme
        The type of a LoadBalancer. internet-facing or internal. Once set, can not be modified.

    health_check
        A dict defining the health check for this ELB.

    attributes
        A dict defining the attributes to set on this ELB.

    attributes_from_pillar
        name of pillar dict that contains attributes.   Attributes defined for this specific
        state will override those from pillar.

    cnames
        A list of cname dicts with attributes: name, zone, ttl, and identifier.
        See the boto_route53 state for information about these attributes.

    alarms:
        a dictionary of name->boto_cloudwatch_alarm sections to be associated with this ELB.
        All attributes should be specified except for dimension which will be
        automatically set to this ELB.
        See the boto_cloudwatch_alarm state for information about these attributes.

    alarms_from_pillar:
        name of pillar dict that contains alarm settings.   Alarms defined for this specific
        state will override those from pillar.

    region
        Region to connect to.

    key
        Secret key to be used.

    keyid
        Access key to be used.

    profile
        A dict with region, key and keyid, or a pillar key (string)
        that contains a dict with region, key and keyid.
    '''

    # load data from attributes_from_pillar and merge with attributes
    tmp = __salt__['config.option'](attributes_from_pillar, {})
    if attributes:
        attributes = dictupdate.update(tmp, attributes)
    else:
        attributes = tmp

    ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
    _ret = _elb_present(name, availability_zones, listeners, subnets,
                        security_groups, scheme, region, key, keyid, profile)
    ret['changes'] = _ret['changes']
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    _ret = _attributes_present(name, attributes, region, key, keyid, profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    _ret = _health_check_present(name, health_check, region, key, keyid,
                                 profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    if cnames:
        lb = __salt__['boto_elb.get_elb_config'](
            name, region, key, keyid, profile
        )
        for cname in cnames:
            _ret = __salt__['state.single'](
                'boto_route53.present',
                name=cname.get('name'),
                value=lb['dns_name'],
                zone=cname.get('zone'),
                record_type='CNAME',
                identifier=cname.get('identifier', None),
                ttl=cname.get('ttl', None),
                region=region,
                key=key,
                keyid=keyid,
                profile=profile
            )
            _ret = _ret.values()[0]
            ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
            ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
            if not _ret['result']:
                ret['result'] = _ret['result']
                if ret['result'] is False:
                    return ret
    _ret = _alarms_present(name, alarms, alarms_from_pillar, region, key, keyid, profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    return ret
Пример #43
0
def _elb_present(
        name,
        availability_zones,
        listeners,
        subnets,
        security_groups,
        scheme,
        region,
        key,
        keyid,
        profile):
    ret = {'result': True, 'comment': '', 'changes': {}}
    if not (availability_zones or subnets):
        raise SaltInvocationError('Either availability_zones or subnets must'
                                  ' be provided as arguments.')
    if availability_zones and subnets:
        raise SaltInvocationError('availability_zones and subnets are mutually'
                                  ' exclusive arguments.')
    if not listeners:
        listeners = []
    _listeners = []
    for listener in listeners:
        if len(listener) < 3:
            raise SaltInvocationError('Listeners must have at minimum port,'
                                      ' instance_port and protocol values in'
                                      ' the provided list.')
        if 'elb_port' not in listener:
            raise SaltInvocationError('elb_port is a required value for'
                                      ' listeners.')
        if 'instance_port' not in listener:
            raise SaltInvocationError('instance_port is a required value for'
                                      ' listeners.')
        if 'elb_protocol' not in listener:
            raise SaltInvocationError('elb_protocol is a required value for'
                                      ' listeners.')
        listener['elb_protocol'] = listener['elb_protocol'].upper()
        if listener['elb_protocol'] == 'HTTPS' and 'certificate' not in listener:
            raise SaltInvocationError('certificate is a required value for'
                                      ' listeners if HTTPS is set for'
                                      ' elb_protocol.')
        # We define all listeners as complex listeners.
        if 'instance_protocol' not in listener:
            listener['instance_protocol'] = listener['elb_protocol'].upper()
        else:
            listener['instance_protocol'] = listener['instance_protocol'].upper()
        _listener = [listener['elb_port'], listener['instance_port'],
                     listener['elb_protocol'], listener['instance_protocol']]
        if 'certificate' in listener:
            _listener.append(listener['certificate'])
        _listeners.append(_listener)
    if subnets:
        vpc_id = __salt__['boto_vpc.get_subnet_association'](
            subnets, region, key, keyid, profile
        )
        if not vpc_id:
            msg = 'Subnets {0} do not map to a valid vpc id.'.format(subnets)
            raise SaltInvocationError(msg)
        security_groups = __salt__['boto_secgroup.convert_to_group_ids'](
            security_groups, vpc_id, region, key, keyid, profile
        )
        if not security_groups:
            msg = 'Security groups {0} do not map to valid security group ids.'
            msg = msg.format(security_groups)
            raise SaltInvocationError(msg)
    exists = __salt__['boto_elb.exists'](name, region, key, keyid, profile)
    if not exists:
        if __opts__['test']:
            ret['comment'] = 'ELB {0} is set to be created.'.format(name)
            ret['result'] = None
            return ret
        created = __salt__['boto_elb.create'](name, availability_zones,
                                              _listeners, subnets,
                                              security_groups, scheme, region,
                                              key, keyid, profile)
        if created:
            ret['changes']['old'] = {'elb': None}
            ret['changes']['new'] = {'elb': name}
            ret['comment'] = 'ELB {0} created.'.format(name)
        else:
            ret['result'] = False
            ret['comment'] = 'Failed to create {0} ELB.'.format(name)
    else:
        ret['comment'] = 'ELB {0} present.'.format(name)
        _ret = _listeners_present(name, _listeners, region, key, keyid,
                                  profile)
        ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
        ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
        if not _ret['result']:
            ret['result'] = _ret['result']
            if ret['result'] is False:
                return ret
        if availability_zones:
            _ret = _zones_present(name, availability_zones, region, key, keyid,
                                  profile)
            ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
            ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
            if not _ret['result']:
                ret['result'] = _ret['result']
                if ret['result'] is False:
                    return ret
        elif subnets:
            _ret = _subnets_present(name, subnets, region, key, keyid,
                                    profile)
            ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
            ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
            if not _ret['result']:
                ret['result'] = _ret['result']
    return ret
Пример #44
0
def eni_present(name,
                subnet_id=None,
                subnet_name=None,
                private_ip_address=None,
                description=None,
                groups=None,
                source_dest_check=True,
                allocate_eip=False,
                arecords=None,
                region=None,
                key=None,
                keyid=None,
                profile=None):
    '''
    Ensure the EC2 ENI exists.

    .. versionadded:: Boron

    name
        Name tag associated with the ENI.

    subnet_id
        The VPC subnet ID the ENI will exist within.

    subnet_name
        The VPC subnet name the ENI will exist within.


    private_ip_address
        The private ip address to use for this ENI. If this is not specified
        AWS will automatically assign a private IP address to the ENI. Must be
        specified at creation time; will be ignored afterward.

    description
        Description of the key.

    groups
        A list of security groups to apply to the ENI.

    source_dest_check
        Boolean specifying whether source/destination checking is enabled on
        the ENI.

    allocate_eip
        True/False - allocate and associate an EIP to the ENI

        .. versionadded:: Boron

    arecords
        A list of arecord dicts with attributes needed for the DNS add_record state.
        By default the boto_route53.add_record state will be used, which requires: name, zone, ttl, and identifier.
        See the boto_route53 state for information about these attributes.
        Other DNS modules can be called by specifying the provider keyword.
        By default, the private ENI IP address will be used, set 'public: True' in the arecord dict to use the ENI's public IP address

        .. versionadded:: Boron

    region
        Region to connect to.

    key
        Secret key to be used.

    keyid
        Access key to be used.

    profile
        A dict with region, key and keyid, or a pillar key (string)
        that contains a dict with region, key and keyid.
    '''
    if not exactly_one((subnet_id, subnet_name)):
        raise SaltInvocationError('One (but not both) of subnet_id or '
                                  'subnet_name must be provided.')
    if not groups:
        raise SaltInvocationError('groups is a required argument.')
    if not isinstance(groups, list):
        raise SaltInvocationError('groups must be a list.')
    if not isinstance(source_dest_check, bool):
        raise SaltInvocationError('source_dest_check must be a bool.')
    ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
    r = __salt__['boto_ec2.get_network_interface'](name=name,
                                                   region=region,
                                                   key=key,
                                                   keyid=keyid,
                                                   profile=profile)
    if 'error' in r:
        ret['result'] = False
        ret['comment'] = 'Error when attempting to find eni: {0}.'.format(
            r['error']['message'])
        return ret
    if not r['result']:
        if __opts__['test']:
            ret['comment'] = 'ENI is set to be created.'
            if allocate_eip:
                ret['comment'] = ' '.join([
                    ret['comment'],
                    'An EIP is set to be allocated/assocaited to the ENI.'
                ])
            if arecords:
                ret['comment'] = ' '.join(
                    [ret['comment'], 'A records are set to be created.'])
            ret['result'] = None
            return ret
        result_create = __salt__['boto_ec2.create_network_interface'](
            name,
            subnet_id=subnet_id,
            subnet_name=subnet_name,
            private_ip_address=private_ip_address,
            description=description,
            groups=groups,
            region=region,
            key=key,
            keyid=keyid,
            profile=profile)
        if 'error' in result_create:
            ret['result'] = False
            ret['comment'] = 'Failed to create ENI: {0}'.format(
                result_create['error']['message'])
            return ret
        r['result'] = result_create['result']
        ret['comment'] = 'Created ENI {0}'.format(name)
        ret['changes']['id'] = r['result']['id']
    else:
        _ret = _eni_attribute(r['result'], 'description', description, region,
                              key, keyid, profile)
        ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
        ret['comment'] = _ret['comment']
        if not _ret['result']:
            ret['result'] = _ret['result']
            if ret['result'] is False:
                return ret
        _ret = _eni_groups(r['result'], groups, region, key, keyid, profile)
        ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
        ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
        if not _ret['result']:
            ret['result'] = _ret['result']
            if ret['result'] is False:
                return ret
    # Actions that need to occur whether creating or updating
    _ret = _eni_attribute(r['result'], 'source_dest_check', source_dest_check,
                          region, key, keyid, profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        return ret
    if allocate_eip:
        if 'allocationId' not in r['result']:
            if __opts__['test']:
                ret['comment'] = ' '.join([
                    ret['comment'],
                    'An EIP is set to be allocated and assocaited to the ENI.'
                ])
            else:
                eip_alloc = __salt__['boto_ec2.allocate_eip_address'](
                    domain=None,
                    region=region,
                    key=key,
                    keyid=keyid,
                    profile=profile)
                if eip_alloc:
                    _ret = __salt__['boto_ec2.associate_eip_address'](
                        instance_id=None,
                        instance_name=None,
                        public_ip=None,
                        allocation_id=eip_alloc['allocation_id'],
                        network_interface_id=r['result']['id'],
                        private_ip_address=None,
                        allow_reassociation=False,
                        region=region,
                        key=key,
                        keyid=keyid,
                        profile=profile)
                    if not _ret:
                        _ret = __salt__['boto_ec2.release_eip_address'](
                            public_ip=None,
                            allocation_id=eip_alloc['allocation_id'],
                            region=region,
                            key=key,
                            keyid=keyid,
                            profile=profile)
                        ret['result'] = False
                        msg = 'Failed to assocaite the allocated EIP address with the ENI.  The EIP {0}'.format(
                            'was successfully released.'
                            if _ret else 'was NOT RELEASED.')
                        ret['comment'] = ' '.join([ret['comment'], msg])
                        return ret
                else:
                    ret['result'] = False
                    ret['comment'] = ' '.join(
                        [ret['comment'], 'Failed to allocate an EIP address'])
                    return ret
        else:
            ret['comment'] = ' '.join([
                ret['comment'],
                'An EIP is already allocated/assocaited to the ENI'
            ])
    if arecords:
        for arecord in arecords:
            if 'name' not in arecord:
                msg = 'The arecord must contain a "name" property.'
                raise SaltInvocationError(msg)
            log.debug('processing arecord {0}'.format(arecord))
            _ret = None
            dns_provider = 'boto_route53'
            arecord['record_type'] = 'A'
            public_ip_arecord = False
            if 'public' in arecord:
                public_ip_arecord = arecord.pop('public')
            if public_ip_arecord:
                if 'publicIp' in r['result']:
                    arecord['value'] = r['result']['publicIp']
                elif 'public_ip' in eip_alloc:
                    arecord['value'] = eip_alloc['public_ip']
                else:
                    msg = 'Unable to add an A record for the public IP address, a public IP address does not seem to be allocated to this ENI.'
                    raise CommandExecutionError(msg)
            else:
                arecord['value'] = r['result']['private_ip_address']
            if 'provider' in arecord:
                dns_provider = arecord.pop('provider')
            if dns_provider == 'boto_route53':
                if 'profile' not in arecord:
                    arecord['profile'] = profile
                if 'key' not in arecord:
                    arecord['key'] = key
                if 'keyid' not in arecord:
                    arecord['keyid'] = keyid
                if 'region' not in arecord:
                    arecord['region'] = region
            _ret = __states__['.'.join([dns_provider, 'present'])](**arecord)
            log.debug('ret from dns_provider.present = {0}'.format(_ret))
            ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
            ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
            if not _ret['result']:
                ret['result'] = _ret['result']
                if ret['result'] is False:
                    return ret
    return ret
Пример #45
0
def route_table_present(name, vpc_name=None, vpc_id=None, routes=None,
                        subnet_ids=None, subnet_names=None, tags=None,
                        region=None, key=None, keyid=None, profile=None):
    '''
    Ensure route table with routes exists and is associated to a VPC.


    Example::

    .. code-block:: yaml

        boto_vpc.route_table_present:
            - name: my_route_table
            - vpc_id: vpc-123456
            - routes:
              - destination_cidr_block: 0.0.0.0/0
                instance_id: i-123456
                interface_id: eni-123456
            - subnet_names:
              - subnet1
              - subnet2

    name
        Name of the route table.

    vpc_name
        Name of the VPC with which the route table should be associated.

    vpc_id
        Id of the VPC with which the route table should be associated.
        Either vpc_name or vpc_id must be provided.

    routes
        A list of routes.

    subnet_ids
        A list of subnet ids to associate

    subnet_names
        A list of subnet names to associate

    tags
        A list of tags.

    region
        Region to connect to.

    key
        Secret key to be used.

    keyid
        Access key to be used.

    profile
        A dict with region, key and keyid, or a pillar key (string) that
        contains a dict with region, key and keyid.
    '''
    ret = {'name': name,
           'result': True,
           'comment': '',
           'changes': {}
           }

    _ret = _route_table_present(name=name, vpc_name=vpc_name, vpc_id=vpc_id,
                                tags=tags, region=region, key=key,
                                keyid=keyid, profile=profile)
    ret['changes'] = _ret['changes']
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    _ret = _routes_present(route_table_name=name, routes=routes, tags=tags, region=region, key=key,
                           keyid=keyid, profile=profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    _ret = _subnets_present(route_table_name=name, subnet_ids=subnet_ids, subnet_names=subnet_names, tags=tags, region=region, key=key,
                            keyid=keyid, profile=profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    return ret
Пример #46
0
def present(name,
            directory,
            options=None,
            owner=None,
            user=None,
            maintenance_db=None,
            db_password=None,
            db_host=None,
            db_port=None,
            db_user=None):
    '''
    Ensure that the named tablespace is present with the specified properties.
    For more information about all of these options see man
    ``create_tablespace``(7).

    name
        The name of the tablespace to create/manage.

    directory
        The directory where the tablespace will be located, must already exist

    options
        A dictionary of options to specify for the tablespace.
        Currently, the only tablespace options supported are ``seq_page_cost``
        and ``random_page_cost``. Default values are shown in the example below:

        .. code-block:: yaml

            my_space:
              postgres_tablespace.present:
                - directory: /srv/my_tablespace
                - options:
                    seq_page_cost: 1.0
                    random_page_cost: 4.0

    owner
        The database user that will be the owner of the tablespace.
        Defaults to the user executing the command (i.e. the `user` option)

    user
        System user all operations should be performed on behalf of

    maintenance_db
        Database to act on

    db_user
        Database username if different from config or default

    db_password
        User password if any password for a specified user

    db_host
        Database host if different from config or default

    db_port
        Database port if different from config or default
    '''
    ret = {
        'name': name,
        'changes': {},
        'result': True,
        'comment': 'Tablespace {0} is already present'.format(name)
    }
    dbargs = {
        'maintenance_db': maintenance_db,
        'runas': user,
        'host': db_host,
        'user': db_user,
        'port': db_port,
        'password': db_password,
    }
    tblspaces = __salt__['postgres.tablespace_list'](**dbargs)
    if name not in tblspaces:
        # not there, create it
        if __opts__['test']:
            ret['result'] = None
            ret['comment'] = 'Tablespace {0} is set to be created'.format(name)
            return ret
        if __salt__['postgres.tablespace_create'](name, directory, options,
                                                  owner, **dbargs):
            ret['comment'] = 'The tablespace {0} has been created'.format(name)
            ret['changes'][name] = 'Present'
            return ret

    # already exists, make sure it's got the right config
    if tblspaces[name]['Location'] != directory and not __opts__['test']:
        ret['comment'] = """Tablespace {0} is not at the right location. This is
            unfixable without dropping and recreating the tablespace.""".format(
            name)
        ret['result'] = False
        return ret

    if owner and not tblspaces[name]['Owner'] == owner:
        if __opts__['test']:
            ret['result'] = None
            ret['comment'] = 'Tablespace {0} owner to be altered'.format(name)
        if (__salt__['postgres.tablespace_alter'](name, new_owner=owner)
                and not __opts__['test']):
            ret['comment'] = 'Tablespace {0} owner changed'.format(name)
            ret['changes'][name] = {'owner': owner}
            ret['result'] = True

    if options:
        # options comes from postgres as a sort of json(ish) string, but it
        # can't really be parsed out, but it's in a fairly consistent format
        # that we should be able to string check:
        # {seq_page_cost=1.1,random_page_cost=3.9}
        # TODO remove options that exist if possible
        for k, v in iteritems(options):
            # if 'seq_page_cost=1.1' not in '{seq_page_cost=1.1,...}'
            if '{0}={1}'.format(k, v) not in tblspaces[name]['Opts']:
                if __opts__['test']:
                    ret['result'] = None
                    ret['comment'] = """Tablespace {0} options to be
                        altered""".format(name)
                    break  # we know it's going to be altered, no reason to cont
                if __salt__['postgres.tablespace_alter'](name,
                                                         set_option={
                                                             k: v
                                                         }):
                    ret['comment'] = 'Tablespace {0} opts changed'.format(name)
                    dictupdate.update(ret['changes'],
                                      {name: {
                                          'options': {
                                              k: v
                                          }
                                      }})
                    ret['result'] = True

    return ret
Пример #47
0
    def test_filter_by(self):
        grainsmod.__grains__ = {
          'os_family': 'MockedOS',
          '1': '1',
          '2': '2',
        }

        dict1 = {'A': 'B', 'C': {'D': {'E': 'F', 'G': 'H'}}}
        dict2 = {
            'default': {
                'A': 'B',
                'C': {
                    'D': 'E'
                },
            },
            '1': {
                'A': 'X',
            },
            '2': {
                'C': {
                    'D': 'H',
                },
            },
            'MockedOS': {
                'A': 'Z',
            },
        }

        mdict1 = {'D': {'E': 'I'}, 'J': 'K'}
        mdict2 = {'A': 'Z'}
        mdict3 = {'C': {'D': 'J'}}

        # test None result with non existent grain and no default
        res = grainsmod.filter_by(dict1, grain='xxx')
        self.assertIs(res, None)

        # test None result with os_family grain and no matching result
        res = grainsmod.filter_by(dict1)
        self.assertIs(res, None)

        # test with non existent grain, and a given default key
        res = grainsmod.filter_by(dict1, grain='xxx', default='C')
        self.assertEqual(res, {'D': {'E': 'F', 'G': 'H'}})

        # add a merge dictionary, F disappears
        res = grainsmod.filter_by(dict1, grain='xxx', merge=mdict1, default='C')
        self.assertEqual(res, {'D': {'E': 'I', 'G': 'H'}, 'J': 'K'})
        # dict1 was altered, reestablish
        dict1 = {'A': 'B', 'C': {'D': {'E': 'F', 'G': 'H'}}}

        # default is not present in dict1, check we only have merge in result
        res = grainsmod.filter_by(dict1, grain='xxx', merge=mdict1, default='Z')
        self.assertEqual(res, mdict1)

        # default is not present in dict1, and no merge, should get None
        res = grainsmod.filter_by(dict1, grain='xxx', default='Z')
        self.assertIs(res, None)

        #test giving a list as merge argument raise exception
        self.assertRaises(
            SaltException,
            grainsmod.filter_by,
            dict1,
            'xxx',
            ['foo'],
            'C'
        )

        #Now, re-test with an existing grain (os_family), but with no match.
        res = grainsmod.filter_by(dict1)
        self.assertIs(res, None)
        res = grainsmod.filter_by(dict1, default='C')
        self.assertEqual(res, {'D': {'E': 'F', 'G': 'H'}})
        res = grainsmod.filter_by(dict1, merge=mdict1, default='C')
        self.assertEqual(res, {'D': {'E': 'I', 'G': 'H'}, 'J': 'K'})
        # dict1 was altered, reestablish
        dict1 = {'A': 'B', 'C': {'D': {'E': 'F', 'G': 'H'}}}
        res = grainsmod.filter_by(dict1, merge=mdict1, default='Z')
        self.assertEqual(res, mdict1)
        res = grainsmod.filter_by(dict1, default='Z')
        self.assertIs(res, None)
        # this one is in fact a traceback in updatedict, merging a string with a dictionary
        self.assertRaises(
            TypeError,
            grainsmod.filter_by,
            dict1,
            merge=mdict1,
            default='A'
        )

        #Now, re-test with a matching grain.
        dict1 = {'A': 'B', 'MockedOS': {'D': {'E': 'F', 'G': 'H'}}}
        res = grainsmod.filter_by(dict1)
        self.assertEqual(res, {'D': {'E': 'F', 'G': 'H'}})
        res = grainsmod.filter_by(dict1, default='A')
        self.assertEqual(res, {'D': {'E': 'F', 'G': 'H'}})
        res = grainsmod.filter_by(dict1, merge=mdict1, default='A')
        self.assertEqual(res, {'D': {'E': 'I', 'G': 'H'}, 'J': 'K'})
        # dict1 was altered, reestablish
        dict1 = {'A': 'B', 'MockedOS': {'D': {'E': 'F', 'G': 'H'}}}
        res = grainsmod.filter_by(dict1, merge=mdict1, default='Z')
        self.assertEqual(res, {'D': {'E': 'I', 'G': 'H'}, 'J': 'K'})
        # dict1 was altered, reestablish
        dict1 = {'A': 'B', 'MockedOS': {'D': {'E': 'F', 'G': 'H'}}}
        res = grainsmod.filter_by(dict1, default='Z')
        self.assertEqual(res, {'D': {'E': 'F', 'G': 'H'}})

        # Base tests
        # NOTE: these may fail to detect errors if dictupdate.update() is broken
        # but then the unit test for dictupdate.update() should fail and expose
        # that.  The purpose of these tests is it validate the logic of how
        # in filter_by() processes its arguments.

        # Test with just the base
        res = grainsmod.filter_by(dict2, grain='xxx', default='xxx', base='default')
        self.assertEqual(res, dict2['default'])

        # Test the base with the OS grain look-up
        res = grainsmod.filter_by(dict2, default='xxx', base='default')
        self.assertEqual(
            res,
            dictupdate.update(copy.deepcopy(dict2['default']), dict2['MockedOS'])
        )

        # Test the base with default
        res = grainsmod.filter_by(dict2, grain='xxx', base='default')
        self.assertEqual(res, dict2['default'])

        res = grainsmod.filter_by(dict2, grain='1', base='default')
        self.assertEqual(
            res,
            dictupdate.update(copy.deepcopy(dict2['default']), dict2['1'])
        )

        res = grainsmod.filter_by(dict2, base='default', merge=mdict2)
        self.assertEqual(
            res,
            dictupdate.update(
                dictupdate.update(
                    copy.deepcopy(dict2['default']),
                    dict2['MockedOS']),
                mdict2
            )
        )

        res = grainsmod.filter_by(dict2, base='default', merge=mdict3)
        self.assertEqual(
            res,
            dictupdate.update(
                dictupdate.update(
                    copy.deepcopy(dict2['default']),
                    dict2['MockedOS']),
                mdict3
            )
        )
Пример #48
0
def present(name=None,
            table_name=None,
            region=None,
            key=None,
            keyid=None,
            profile=None,
            read_capacity_units=None,
            write_capacity_units=None,
            alarms=None,
            alarms_from_pillar="boto_dynamodb_alarms",
            hash_key=None,
            hash_key_data_type=None,
            range_key=None,
            range_key_data_type=None,
            local_indexes=None,
            global_indexes=None,
            backup_configs_from_pillars='boto_dynamodb_backup_configs'):
    '''
    Ensure the DynamoDB table exists. Table throughput can be updated after
    table creation.

    Global secondary indexes (GSIs) are managed with some exceptions:

    - If a GSI deletion is detected, a failure will occur (deletes should be
      done manually in the AWS console).

    - If multiple GSIs are added in a single Salt call, a failure will occur
      (boto supports one creation at a time). Note that this only applies after
      table creation; multiple GSIs can be created during table creation.

    - Updates to existing GSIs are limited to read/write capacity only
      (DynamoDB limitation).

    name
        Name of the DynamoDB table

    table_name
        Name of the DynamoDB table (deprecated)

    region
        Region to connect to.

    key
        Secret key to be used.

    keyid
        Access key to be used.

    profile
        A dict with region, key and keyid, or a pillar key (string)
        that contains a dict with region, key and keyid.

    read_capacity_units
        The read throughput for this table

    write_capacity_units
        The write throughput for this table

    hash_key
        The name of the attribute that will be used as the hash key
        for this table

    hash_key_data_type
        The DynamoDB datatype of the hash key

    range_key
        The name of the attribute that will be used as the range key
        for this table

    range_key_data_type
        The DynamoDB datatype of the range key

    local_indexes
        The local indexes you would like to create

    global_indexes
        The global indexes you would like to create

    backup_configs_from_pillars
        Pillars to use to configure DataPipeline backups
    '''
    ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
    if table_name:
        ret['warnings'] = [
            'boto_dynamodb.present: `table_name` is deprecated.'
            ' Please use `name` instead.'
        ]
        ret['name'] = table_name
        name = table_name

    comments = []
    changes_old = {}
    changes_new = {}

    # Ensure DynamoDB table exists
    table_exists = __salt__['boto_dynamodb.exists'](name, region, key, keyid,
                                                    profile)
    if not table_exists:
        if __opts__['test']:
            ret['result'] = None
            ret['comment'] = 'DynamoDB table {0} would be created.'.format(
                name)
            return ret
        else:
            is_created = __salt__['boto_dynamodb.create_table'](
                name, region, key, keyid, profile, read_capacity_units,
                write_capacity_units, hash_key, hash_key_data_type, range_key,
                range_key_data_type, local_indexes, global_indexes)
            if not is_created:
                ret['result'] = False
                ret['comment'] = 'Failed to create table {0}'.format(name)
                _add_changes(ret, changes_old, changes_new)
                return ret

            comments.append(
                'DynamoDB table {0} was successfully created'.format(name))
            changes_new['table'] = name
            changes_new['read_capacity_units'] = read_capacity_units
            changes_new['write_capacity_units'] = write_capacity_units
            changes_new['hash_key'] = hash_key
            changes_new['hash_key_data_type'] = hash_key_data_type
            changes_new['range_key'] = range_key
            changes_new['range_key_data_type'] = range_key_data_type
            changes_new['local_indexes'] = local_indexes
            changes_new['global_indexes'] = global_indexes
    else:
        comments.append('DynamoDB table {0} exists'.format(name))

    # Ensure DynamoDB table provisioned throughput matches
    description = __salt__['boto_dynamodb.describe'](name, region, key, keyid,
                                                     profile)
    provisioned_throughput = description.get('Table',
                                             {}).get('ProvisionedThroughput',
                                                     {})
    current_write_capacity_units = provisioned_throughput.get(
        'WriteCapacityUnits')
    current_read_capacity_units = provisioned_throughput.get(
        'ReadCapacityUnits')
    throughput_matches = (current_write_capacity_units == write_capacity_units
                          and current_read_capacity_units
                          == read_capacity_units)
    if not throughput_matches:
        if __opts__['test']:
            ret['result'] = None
            comments.append(
                'DynamoDB table {0} is set to be updated.'.format(name))
        else:
            is_updated = __salt__['boto_dynamodb.update'](
                name,
                throughput={
                    'read': read_capacity_units,
                    'write': write_capacity_units,
                },
                region=region,
                key=key,
                keyid=keyid,
                profile=profile,
            )
            if not is_updated:
                ret['result'] = False
                ret['comment'] = 'Failed to update table {0}'.format(name)
                _add_changes(ret, changes_old, changes_new)
                return ret

            comments.append(
                'DynamoDB table {0} was successfully updated'.format(name))
            changes_old['read_capacity_units'] = current_read_capacity_units,
            changes_old['write_capacity_units'] = current_write_capacity_units,
            changes_new['read_capacity_units'] = read_capacity_units,
            changes_new['write_capacity_units'] = write_capacity_units,
    else:
        comments.append('DynamoDB table {0} throughput matches'.format(name))

    provisioned_indexes = description.get('Table',
                                          {}).get('GlobalSecondaryIndexes', [])

    _ret = _global_indexes_present(provisioned_indexes, global_indexes,
                                   changes_old, changes_new, comments, name,
                                   region, key, keyid, profile)
    if not _ret['result']:
        comments.append(_ret['comment'])
        ret['result'] = _ret['result']
        if ret['result'] is False:
            ret['comment'] = ',\n'.join(comments)
            _add_changes(ret, changes_old, changes_new)
            return ret

    _ret = _alarms_present(name, alarms, alarms_from_pillar,
                           write_capacity_units, read_capacity_units, region,
                           key, keyid, profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    comments.append(_ret['comment'])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            ret['comment'] = ',\n'.join(comments)
            _add_changes(ret, changes_old, changes_new)
            return ret

    # Ensure backup datapipeline is present
    datapipeline_configs = copy.deepcopy(__salt__['pillar.get'](
        backup_configs_from_pillars, []))
    for config in datapipeline_configs:
        datapipeline_ret = _ensure_backup_datapipeline_present(
            name=name,
            schedule_name=config['name'],
            period=config['period'],
            utc_hour=config['utc_hour'],
            s3_base_location=config['s3_base_location'],
        )
        # Add comments and changes if successful changes were made (True for live mode,
        # None for test mode).
        if datapipeline_ret['result'] in [True, None]:
            ret['result'] = datapipeline_ret['result']
            comments.append(datapipeline_ret['comment'])
            if datapipeline_ret.get('changes'):
                ret['changes']['backup_datapipeline_{0}'.format(config['name'])] = \
                    datapipeline_ret.get('changes'),
        else:
            ret['comment'] = ',\n'.join(
                [ret['comment'], datapipeline_ret['comment']])
            _add_changes(ret, changes_old, changes_new)
            return ret

    ret['comment'] = ',\n'.join(comments)
    _add_changes(ret, changes_old, changes_new)
    return ret
Пример #49
0
def present(name,
            launch_config_name,
            availability_zones,
            min_size,
            max_size,
            launch_config=None,
            desired_capacity=None,
            load_balancers=None,
            default_cooldown=None,
            health_check_type=None,
            health_check_period=None,
            placement_group=None,
            vpc_zone_identifier=None,
            subnet_names=None,
            tags=None,
            termination_policies=None,
            termination_policies_from_pillar='boto_asg_termination_policies',
            suspended_processes=None,
            scaling_policies=None,
            scaling_policies_from_pillar='boto_asg_scaling_policies',
            scheduled_actions=None,
            scheduled_actions_from_pillar='boto_asg_scheduled_actions',
            alarms=None,
            alarms_from_pillar='boto_asg_alarms',
            region=None,
            key=None,
            keyid=None,
            profile=None,
            notification_arn=None,
            notification_arn_from_pillar='boto_asg_notification_arn',
            notification_types=None,
            notification_types_from_pillar='boto_asg_notification_types'):
    '''
    Ensure the autoscale group exists.

    name
        Name of the autoscale group.

    launch_config_name
        Name of the launch config to use for the group.  Or, if
        ``launch_config`` is specified, this will be the launch config
        name's prefix.  (see below)

    launch_config
        A dictionary of launch config attributes.  If specified, a
        launch config will be used or created, matching this set
        of attributes, and the autoscale group will be set to use
        that launch config.  The launch config name will be the
        ``launch_config_name`` followed by a hyphen followed by a hash
        of the ``launch_config`` dict contents.
        Example:

        .. code-block:: yaml

            my_asg:
              boto_asg.present:
              - launch_config:
                - ebs_optimized: false
                - instance_profile_name: my_iam_profile
                - kernel_id: ''
                - ramdisk_id: ''
                - key_name: my_ssh_key
                - image_name: aws2015091-hvm
                - instance_type: c3.xlarge
                - instance_monitoring: false
                - security_groups:
                  - my_sec_group_01
                  - my_sec_group_02

    availability_zones
        List of availability zones for the group.

    min_size
        Minimum size of the group.

    max_size
        Maximum size of the group.

    desired_capacity
        The desired capacity of the group.

    load_balancers
        List of load balancers for the group. Once set this can not be
        updated (Amazon restriction).

    default_cooldown
        Number of seconds after a Scaling Activity completes before any further
        scaling activities can start.

    health_check_type
        The service you want the health status from, Amazon EC2 or Elastic Load
        Balancer (EC2 or ELB).

    health_check_period
        Length of time in seconds after a new EC2 instance comes into service
        that Auto Scaling starts checking its health.

    placement_group
        Physical location of your cluster placement group created in Amazon
        EC2. Once set this can not be updated (Amazon restriction).

    vpc_zone_identifier
        A list of the subnet identifiers of the Virtual Private Cloud.

    subnet_names
        For VPC, a list of subnet names (NOT subnet IDs) to deploy into.
        Exclusive with vpc_zone_identifier.

    tags
        A list of tags. Example:

        .. code-block:: yaml

            - key: 'key'
              value: 'value'
              propagate_at_launch: true

    termination_policies
        A list of termination policies. Valid values are:

        * ``OldestInstance``
        * ``NewestInstance``
        * ``OldestLaunchConfiguration``
        * ``ClosestToNextInstanceHour``
        * ``Default``

        If no value is specified, the ``Default`` value is used.

    termination_policies_from_pillar:
        name of pillar dict that contains termination policy settings.   Termination policies
        defined for this specific state will override those from pillar.

    suspended_processes
        List of processes to be suspended. see
        http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US_SuspendResume.html

    scaling_policies
        List of scaling policies.  Each policy is a dict of key-values described by
        https://boto.readthedocs.io/en/latest/ref/autoscale.html#boto.ec2.autoscale.policy.ScalingPolicy

    scaling_policies_from_pillar:
        name of pillar dict that contains scaling policy settings.   Scaling policies defined for
        this specific state will override those from pillar.

    scheduled_actions:
        a dictionary of scheduled actions. Each key is the name of scheduled action and each value
        is dictionary of options. For example:

        .. code-block:: yaml

            - scheduled_actions:
                scale_up_at_10:
                    desired_capacity: 4
                    min_size: 3
                    max_size: 5
                    recurrence: "0 9 * * 1-5"
                scale_down_at_7:
                    desired_capacity: 1
                    min_size: 1
                    max_size: 1
                    recurrence: "0 19 * * 1-5"

    scheduled_actions_from_pillar:
        name of pillar dict that contains scheduled_actions settings. Scheduled actions
        for this specific state will override those from pillar.

    alarms:
        a dictionary of name->boto_cloudwatch_alarm sections to be associated with this ASG.
        All attributes should be specified except for dimension which will be
        automatically set to this ASG.

        See the :mod:`salt.states.boto_cloudwatch_alarm` state for information
        about these attributes.

        If any alarm actions include  ":self:" this will be replaced with the asg name.
        For example, alarm_actions reading "['scaling_policy:self:ScaleUp']" will
        map to the arn for this asg's scaling policy named "ScaleUp".
        In addition, any alarms that have only scaling_policy as actions will be ignored if
        min_size is equal to max_size for this ASG.

    alarms_from_pillar:
        name of pillar dict that contains alarm settings.   Alarms defined for this specific
        state will override those from pillar.

    region
        The region to connect to.

    key
        Secret key to be used.

    keyid
        Access key to be used.

    profile
        A dict with region, key and keyid, or a pillar key (string)
        that contains a dict with region, key and keyid.

    notification_arn
        The AWS arn that notifications will be sent to

    notification_arn_from_pillar
        name of the pillar dict that contains ``notifcation_arn`` settings.  A
        ``notification_arn`` defined for this specific state will override the
        one from pillar.

    notification_types
        A list of event names that will trigger a notification.  The list of valid
        notification types is:

        * ``autoscaling:EC2_INSTANCE_LAUNCH``
        * ``autoscaling:EC2_INSTANCE_LAUNCH_ERROR``
        * ``autoscaling:EC2_INSTANCE_TERMINATE``
        * ``autoscaling:EC2_INSTANCE_TERMINATE_ERROR``
        * ``autoscaling:TEST_NOTIFICATION``

    notification_types_from_pillar
        name of the pillar dict that contains ``notifcation_types`` settings.
        ``notification_types`` defined for this specific state will override those
        from the pillar.
    '''
    if vpc_zone_identifier and subnet_names:
        raise SaltInvocationError('vpc_zone_identifier and subnet_names are '
                                  'mutually exclusive options.')
    ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
    if subnet_names:
        vpc_zone_identifier = []
        for i in subnet_names:
            r = __salt__['boto_vpc.get_resource_id']('subnet',
                                                     name=i,
                                                     region=region,
                                                     key=key,
                                                     keyid=keyid,
                                                     profile=profile)
            if 'error' in r:
                ret['comment'] = 'Error looking up subnet ids: {0}'.format(
                    r['error'])
                ret['result'] = False
                return ret
            if 'id' not in r:
                ret['comment'] = 'Subnet {0} does not exist.'.format(i)
                ret['result'] = False
                return ret
            vpc_zone_identifier.append(r['id'])
    if vpc_zone_identifier:
        vpc_id = __salt__['boto_vpc.get_subnet_association'](
            vpc_zone_identifier, region, key, keyid, profile)
        vpc_id = vpc_id.get('vpc_id')
        log.debug('Auto Scaling Group %s is associated with VPC ID %s', name,
                  vpc_id)
    else:
        vpc_id = None
        log.debug('Auto Scaling Group %s has no VPC Association', name)
    # if launch_config is defined, manage the launch config first.
    # hash the launch_config dict to create a unique name suffix and then
    # ensure it is present
    if launch_config:
        launch_config_bytes = salt.utils.stringutils.to_bytes(
            str(launch_config))  # future lint: disable=blacklisted-function
        launch_config_name = launch_config_name + '-' + hashlib.md5(
            launch_config_bytes).hexdigest()
        args = {
            'name': launch_config_name,
            'region': region,
            'key': key,
            'keyid': keyid,
            'profile': profile
        }

        for index, item in enumerate(launch_config):
            if 'image_name' in item:
                image_name = item['image_name']
                iargs = {
                    'ami_name': image_name,
                    'region': region,
                    'key': key,
                    'keyid': keyid,
                    'profile': profile
                }
                image_ids = __salt__['boto_ec2.find_images'](**iargs)
                if len(image_ids):
                    launch_config[index]['image_id'] = image_ids[0]
                else:
                    launch_config[index]['image_id'] = image_name
                del launch_config[index]['image_name']
                break

        if vpc_id:
            log.debug('Auto Scaling Group {0} is a associated with a vpc')
            # locate the security groups attribute of a launch config
            sg_index = None
            for index, item in enumerate(launch_config):
                if 'security_groups' in item:
                    sg_index = index
                    break
            # if security groups exist within launch_config then convert
            # to group ids
            if sg_index is not None:
                log.debug('security group associations found in launch config')
                _group_ids = __salt__['boto_secgroup.convert_to_group_ids'](
                    launch_config[sg_index]['security_groups'],
                    vpc_id=vpc_id,
                    region=region,
                    key=key,
                    keyid=keyid,
                    profile=profile)
                launch_config[sg_index]['security_groups'] = _group_ids

        for d in launch_config:
            args.update(d)
        if not __opts__['test']:
            lc_ret = __states__['boto_lc.present'](**args)
            if lc_ret['result'] is True and lc_ret['changes']:
                if 'launch_config' not in ret['changes']:
                    ret['changes']['launch_config'] = {}
                ret['changes']['launch_config'] = lc_ret['changes']

    asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile)
    termination_policies = _determine_termination_policies(
        termination_policies, termination_policies_from_pillar)
    scaling_policies = _determine_scaling_policies(
        scaling_policies, scaling_policies_from_pillar)
    scheduled_actions = _determine_scheduled_actions(
        scheduled_actions, scheduled_actions_from_pillar)
    if asg is None:
        ret['result'] = False
        ret['comment'] = 'Failed to check autoscale group existence.'
    elif not asg:
        if __opts__['test']:
            msg = 'Autoscale group set to be created.'
            ret['comment'] = msg
            ret['result'] = None
            return ret
        notification_arn, notification_types = _determine_notification_info(
            notification_arn, notification_arn_from_pillar, notification_types,
            notification_types_from_pillar)
        created = __salt__['boto_asg.create'](
            name, launch_config_name, availability_zones, min_size, max_size,
            desired_capacity, load_balancers, default_cooldown,
            health_check_type, health_check_period, placement_group,
            vpc_zone_identifier, tags, termination_policies,
            suspended_processes, scaling_policies, scheduled_actions, region,
            notification_arn, notification_types, key, keyid, profile)
        if created:
            ret['changes']['old'] = None
            asg = __salt__['boto_asg.get_config'](name, region, key, keyid,
                                                  profile)
            ret['changes']['new'] = asg
        else:
            ret['result'] = False
            ret['comment'] = 'Failed to create autoscale group'
    else:
        need_update = False
        # If any of these attributes can't be modified after creation
        # time, we should remove them from the dict.
        if scaling_policies:
            for policy in scaling_policies:
                if 'min_adjustment_step' not in policy:
                    policy['min_adjustment_step'] = None
        if scheduled_actions:
            for s_name, action in six.iteritems(scheduled_actions):
                if 'end_time' not in action:
                    action['end_time'] = None
        config = {
            'launch_config_name': launch_config_name,
            'availability_zones': availability_zones,
            'min_size': min_size,
            'max_size': max_size,
            'desired_capacity': desired_capacity,
            'default_cooldown': default_cooldown,
            'health_check_type': health_check_type,
            'health_check_period': health_check_period,
            'vpc_zone_identifier': vpc_zone_identifier,
            'tags': tags,
            'termination_policies': termination_policies,
            'suspended_processes': suspended_processes,
            'scaling_policies': scaling_policies,
            'scheduled_actions': scheduled_actions
        }
        #ensure that we reset termination_policies to default if none are specified
        if not termination_policies:
            config['termination_policies'] = ['Default']
        if suspended_processes is None:
            config['suspended_processes'] = []
        # ensure that we delete scaling_policies if none are specified
        if scaling_policies is None:
            config['scaling_policies'] = []
        # ensure that we delete scheduled_actions if none are specified
        if scheduled_actions is None:
            config['scheduled_actions'] = {}
        # allow defaults on start_time
        for s_name, action in six.iteritems(scheduled_actions):
            if 'start_time' not in action:
                asg_action = asg['scheduled_actions'].get(s_name, {})
                if 'start_time' in asg_action:
                    del asg_action['start_time']
        proposed = {}
        # note: do not loop using "key, value" - this can modify the value of
        # the aws access key
        for asg_property, value in six.iteritems(config):
            # Only modify values being specified; introspection is difficult
            # otherwise since it's hard to track default values, which will
            # always be returned from AWS.
            if value is None:
                continue
            value = __utils__['boto3.ordered'](value)
            if asg_property in asg:
                _value = __utils__['boto3.ordered'](asg[asg_property])
                if not value == _value:
                    log.debug('%s asg_property differs from %s', value, _value)
                    proposed.setdefault('old',
                                        {}).update({asg_property: _value})
                    proposed.setdefault('new',
                                        {}).update({asg_property: value})
                    need_update = True
        if need_update:
            if __opts__['test']:
                msg = 'Autoscale group set to be updated.'
                ret['comment'] = msg
                ret['result'] = None
                ret['changes'] = proposed
                return ret
            # add in alarms
            notification_arn, notification_types = _determine_notification_info(
                notification_arn, notification_arn_from_pillar,
                notification_types, notification_types_from_pillar)
            updated, msg = __salt__['boto_asg.update'](
                name,
                launch_config_name,
                availability_zones,
                min_size,
                max_size,
                desired_capacity=desired_capacity,
                load_balancers=load_balancers,
                default_cooldown=default_cooldown,
                health_check_type=health_check_type,
                health_check_period=health_check_period,
                placement_group=placement_group,
                vpc_zone_identifier=vpc_zone_identifier,
                tags=tags,
                termination_policies=termination_policies,
                suspended_processes=suspended_processes,
                scaling_policies=scaling_policies,
                scheduled_actions=scheduled_actions,
                region=region,
                notification_arn=notification_arn,
                notification_types=notification_types,
                key=key,
                keyid=keyid,
                profile=profile)
            if asg['launch_config_name'] != launch_config_name:
                # delete the old launch_config_name
                deleted = __salt__['boto_asg.delete_launch_configuration'](
                    asg['launch_config_name'],
                    region=region,
                    key=key,
                    keyid=keyid,
                    profile=profile)
                if deleted:
                    if 'launch_config' not in ret['changes']:
                        ret['changes']['launch_config'] = {}
                    ret['changes']['launch_config']['deleted'] = asg[
                        'launch_config_name']
            if updated:
                ret['changes']['old'] = asg
                asg = __salt__['boto_asg.get_config'](name, region, key, keyid,
                                                      profile)
                ret['changes']['new'] = asg
                ret['comment'] = 'Updated autoscale group.'
            else:
                ret['result'] = False
                ret['comment'] = msg
        else:
            ret['comment'] = 'Autoscale group present.'
    # add in alarms
    _ret = _alarms_present(name, min_size == max_size, alarms,
                           alarms_from_pillar, region, key, keyid, profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
    return ret
Пример #50
0
def present(
        name,
        listeners,
        availability_zones=None,
        subnets=None,
        security_groups=None,
        scheme='internet-facing',
        health_check=None,
        attributes=None,
        cnames=None,
        region=None,
        key=None,
        keyid=None,
        profile=None):
    '''
    Ensure the IAM role exists.

    name
        Name of the IAM role.

    availability_zones
        A list of availability zones for this ELB.

    listeners
        A list of listener lists; example: [['443', 'HTTPS', 'arn:aws:iam::1111111:server-certificate/mycert'], ['8443', '80', 'HTTPS', 'HTTP', 'arn:aws:iam::1111111:server-certificate/mycert']]

    subnets
        A list of subnet IDs in your VPC to attach to your LoadBalancer.

    security_groups
        The security groups assigned to your LoadBalancer within your VPC.

    scheme
        The type of a LoadBalancer. internet-facing or internal. Once set, can not be modified.

    health_check
        A dict defining the health check for this ELB.

    attributes
        A dict defining the attributes to set on this ELB.

    cnames
        A list of cname dicts with attributes: name, zone, ttl, and identifier.
        See the boto_route53 state for information about these attributes.

    region
        Region to connect to.

    key
        Secret key to be used.

    keyid
        Access key to be used.

    profile
        A dict with region, key and keyid, or a pillar key (string)
        that contains a dict with region, key and keyid.
    '''
    ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
    _ret = _elb_present(name, availability_zones, listeners, subnets,
                        security_groups, scheme, region, key, keyid, profile)
    ret['changes'] = _ret['changes']
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    _ret = _attributes_present(name, attributes, region, key, keyid, profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    _ret = _health_check_present(name, health_check, region, key, keyid,
                                 profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    _ret = _cnames_present(name, cnames, region, key, keyid, profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    return ret
Пример #51
0
def _tags_present(name, tags, vpc_id=None, vpc_name=None, region=None,
                  key=None, keyid=None, profile=None):
    '''
    helper function to validate tags are correct
    '''
    ret = {'result': True, 'comment': '', 'changes': {}}
    if tags:
        sg = __salt__['boto_secgroup.get_config'](name, None, region, key,
                                                  keyid, profile, vpc_id, vpc_name)
        if not sg:
            msg = '{0} security group configuration could not be retrieved.'
            ret['comment'] = msg.format(name)
            ret['result'] = False
            return ret
        tags_to_add = tags
        tags_to_update = {}
        tags_to_remove = []
        if sg.get('tags'):
            for existing_tag in sg['tags'].keys():
                if existing_tag not in tags:
                    if existing_tag not in tags_to_remove:
                        tags_to_remove.append(existing_tag)
                else:
                    if tags[existing_tag] != sg['tags'][existing_tag]:
                        tags_to_update[existing_tag] = tags[existing_tag]
                    tags_to_add.pop(existing_tag)
        if tags_to_remove:
            if __opts__['test']:
                msg = 'The following tag{0} set to be removed: {1}.'.format(
                        ('s are' if len(tags_to_remove) > 1 else ' is'), ', '.join(tags_to_remove))
                ret['comment'] = ' '.join([ret['comment'], msg])
                ret['result'] = None
            else:
                temp_ret = __salt__['boto_secgroup.delete_tags'](tags_to_remove,
                                                                 name=name,
                                                                 group_id=None,
                                                                 vpc_name=vpc_name,
                                                                 vpc_id=vpc_id,
                                                                 region=region,
                                                                 key=key,
                                                                 keyid=keyid,
                                                                 profile=profile)
                if not temp_ret:
                    ret['result'] = False
                    msg = 'Error attempting to delete tags {1}.'.format(tags_to_remove)
                    ret['comment'] = ' '.join([ret['comment'], msg])
                    return ret
                if 'old' not in ret['changes']:
                    ret['changes'] = dictupdate.update(ret['changes'], {'old': {'tags': {}}})
                for rem_tag in tags_to_remove:
                    ret['changes']['old']['tags'][rem_tag] = sg['tags'][rem_tag]
        if tags_to_add or tags_to_update:
            if __opts__['test']:
                if tags_to_add:
                    msg = 'The following tag{0} set to be added: {1}.'.format(
                            ('s are' if len(tags_to_add.keys()) > 1 else ' is'),
                            ', '.join(tags_to_add.keys()))
                    ret['comment'] = ' '.join([ret['comment'], msg])
                    ret['result'] = None
                if tags_to_update:
                    msg = 'The following tag {0} set to be updated: {1}.'.format(
                            ('values are' if len(tags_to_update.keys()) > 1 else 'value is'),
                            ', '.join(tags_to_update.keys()))
                    ret['comment'] = ' '.join([ret['comment'], msg])
                    ret['result'] = None
            else:
                all_tag_changes = dictupdate.update(tags_to_add, tags_to_update)
                temp_ret = __salt__['boto_secgroup.set_tags'](all_tag_changes,
                                                              name=name,
                                                              group_id=None,
                                                              vpc_name=vpc_name,
                                                              vpc_id=vpc_id,
                                                              region=region,
                                                              key=key,
                                                              keyid=keyid,
                                                              profile=profile)
                if not temp_ret:
                    ret['result'] = False
                    msg = 'Error attempting to set tags.'
                    ret['comment'] = ' '.join([ret['comment'], msg])
                    return ret
                if 'old' not in ret['changes']:
                    ret['changes'] = dictupdate.update(ret['changes'], {'old': {'tags': {}}})
                if 'new' not in ret['changes']:
                    ret['changes'] = dictupdate.update(ret['changes'], {'new': {'tags': {}}})
                for tag in all_tag_changes:
                    ret['changes']['new']['tags'][tag] = tags[tag]
                    if 'tags' in sg:
                        if sg['tags']:
                            if tag in sg['tags']:
                                ret['changes']['old']['tags'][tag] = sg['tags'][tag]
        if not tags_to_update and not tags_to_remove and not tags_to_add:
            msg = 'Tags are already set.'
            ret['comment'] = ' '.join([ret['comment'], msg])
    return ret
Пример #52
0
def merge_recurse(obj_a, obj_b):
    copied = copy(obj_a)
    return update(copied, obj_b)
Пример #53
0
def present(
    name,
    description,
    vpc_id=None,
    vpc_name=None,
    rules=None,
    rules_egress=None,
    delete_ingress_rules=True,
    delete_egress_rules=True,
    region=None,
    key=None,
    keyid=None,
    profile=None,
    tags=None,
):
    """
    Ensure the security group exists with the specified rules.

    name
        Name of the security group.

    description
        A description of this security group.

    vpc_id
        The ID of the VPC to create the security group in, if any. Exclusive with vpc_name.

    vpc_name
        The name of the VPC to create the security group in, if any. Exclusive with vpc_id.

        .. versionadded:: 2016.3.0

        .. versionadded:: 2015.8.2

    rules
        A list of ingress rule dicts. If not specified, ``rules=None``,
        the ingress rules will be unmanaged. If set to an empty list, ``[]``,
        then all ingress rules will be removed.

    rules_egress
        A list of egress rule dicts. If not specified, ``rules_egress=None``,
        the egress rules will be unmanaged. If set to an empty list, ``[]``,
        then all egress rules will be removed.

    delete_ingress_rules
        Some tools (EMR comes to mind) insist on adding rules on-the-fly, which
        salt will happily remove on the next run.  Set this param to False to
        avoid deleting rules which were added outside of salt.

    delete_egress_rules
        Some tools (EMR comes to mind) insist on adding rules on-the-fly, which
        salt will happily remove on the next run.  Set this param to False to
        avoid deleting rules which were added outside of salt.

    region
        Region to connect to.

    key
        Secret key to be used.

    keyid
        Access key to be used.

    profile
        A dict with region, key and keyid, or a pillar key (string)
        that contains a dict with region, key, and keyid.

    tags
        List of key:value pairs of tags to set on the security group

        .. versionadded:: 2016.3.0
    """
    ret = {"name": name, "result": True, "comment": "", "changes": {}}
    _ret = _security_group_present(
        name,
        description,
        vpc_id=vpc_id,
        vpc_name=vpc_name,
        region=region,
        key=key,
        keyid=keyid,
        profile=profile,
    )
    ret["changes"] = _ret["changes"]
    ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
    if not _ret["result"]:
        ret["result"] = _ret["result"]
        if ret["result"] is False:
            return ret
        elif ret["result"] is None:
            return ret
    if rules is not None:
        _ret = _rules_present(
            name,
            rules,
            delete_ingress_rules,
            vpc_id=vpc_id,
            vpc_name=vpc_name,
            region=region,
            key=key,
            keyid=keyid,
            profile=profile,
        )
        ret["changes"] = dictupdate.update(ret["changes"], _ret["changes"])
        ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
        if not _ret["result"]:
            ret["result"] = _ret["result"]
    if rules_egress is not None:
        _ret = _rules_egress_present(
            name,
            rules_egress,
            delete_egress_rules,
            vpc_id=vpc_id,
            vpc_name=vpc_name,
            region=region,
            key=key,
            keyid=keyid,
            profile=profile,
        )
        ret["changes"] = dictupdate.update(ret["changes"], _ret["changes"])
        ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
        if not _ret["result"]:
            ret["result"] = _ret["result"]
    _ret = _tags_present(
        name=name,
        tags=tags,
        vpc_id=vpc_id,
        vpc_name=vpc_name,
        region=region,
        key=key,
        keyid=keyid,
        profile=profile,
    )
    ret["changes"] = dictupdate.update(ret["changes"], _ret["changes"])
    ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
    if not _ret["result"]:
        ret["result"] = _ret["result"]
    return ret
Пример #54
0
def present(name=None,
            table_name=None,
            region=None,
            key=None,
            keyid=None,
            profile=None,
            read_capacity_units=None,
            write_capacity_units=None,
            alarms=None,
            alarms_from_pillar="boto_dynamodb_alarms",
            hash_key=None,
            hash_key_data_type=None,
            range_key=None,
            range_key_data_type=None,
            local_indexes=None,
            global_indexes=None,
            backup_configs_from_pillars='boto_dynamodb_backup_configs'):
    '''
    Ensure the DynamoDB table exists.  Note: all properties of the table
    can only be set during table creation.  Adding or changing
    indexes or key schema cannot be done after table creation

    name
        Name of the DynamoDB table

    table_name
        Name of the DynamoDB table (deprecated)

    region
        Region to connect to.

    key
        Secret key to be used.

    keyid
        Access key to be used.

    profile
        A dict with region, key and keyid, or a pillar key (string)
        that contains a dict with region, key and keyid.

    read_capacity_units
        The read throughput for this table

    write_capacity_units
        The write throughput for this table

    hash_key
        The name of the attribute that will be used as the hash key
        for this table

    hash_key_data_type
        The DynamoDB datatype of the hash key

    range_key
        The name of the attribute that will be used as the range key
        for this table

    range_key_data_type
        The DynamoDB datatype of the range key

    local_indexes
        The local indexes you would like to create

    global_indexes
        The local indexes you would like to create

    backup_configs_from_pillars
        Pillars to use to configure DataPipeline backups
    '''
    ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
    if table_name:
        ret['warnings'] = ['boto_dynamodb.present: `table_name` is deprecated.'
                           ' Please use `name` instead.']
        ret['name'] = table_name
        name = table_name

    comments = []
    changes_old = {}
    changes_new = {}

    # Ensure DynamoDB table exists
    table_exists = __salt__['boto_dynamodb.exists'](
        name,
        region,
        key,
        keyid,
        profile
    )
    if not table_exists:
        if __opts__['test']:
            ret['result'] = None
            ret['comment'] = 'DynamoDB table {0} is set to be created.'.format(name)
            return ret

        is_created = __salt__['boto_dynamodb.create_table'](
            name,
            region,
            key,
            keyid,
            profile,
            read_capacity_units,
            write_capacity_units,
            hash_key,
            hash_key_data_type,
            range_key,
            range_key_data_type,
            local_indexes,
            global_indexes
        )
        if not is_created:
            ret['result'] = False
            ret['comment'] = 'Failed to create table {0}'.format(name)
            return ret

        comments.append('DynamoDB table {0} was successfully created'.format(name))
        changes_new['table'] = name
        changes_new['read_capacity_units'] = read_capacity_units
        changes_new['write_capacity_units'] = write_capacity_units
        changes_new['hash_key'] = hash_key
        changes_new['hash_key_data_type'] = hash_key_data_type
        changes_new['range_key'] = range_key
        changes_new['range_key_data_type'] = range_key_data_type
        changes_new['local_indexes'] = local_indexes
        changes_new['global_indexes'] = global_indexes
    else:
        comments.append('DynamoDB table {0} exists'.format(name))

    # Ensure DynamoDB table provisioned throughput matches
    description = __salt__['boto_dynamodb.describe'](
        name,
        region,
        key,
        keyid,
        profile
    )
    provisioned_throughput = description.get('Table', {}).get('ProvisionedThroughput', {})
    current_write_capacity_units = provisioned_throughput.get('WriteCapacityUnits')
    current_read_capacity_units = provisioned_throughput.get('ReadCapacityUnits')
    throughput_matches = (current_write_capacity_units == write_capacity_units and
                          current_read_capacity_units == read_capacity_units)
    if not throughput_matches:
        if __opts__['test']:
            ret['result'] = None
            ret['comment'] = 'DynamoDB table {0} is set to be updated.'.format(name)
            return ret

        is_updated = __salt__['boto_dynamodb.update'](
            name,
            throughput={
                'read': read_capacity_units,
                'write': write_capacity_units,
            },
            region=region,
            key=key,
            keyid=keyid,
            profile=profile,
        )
        if not is_updated:
            ret['result'] = False
            ret['comment'] = 'Failed to update table {0}'.format(name)
            return ret

        comments.append('DynamoDB table {0} was successfully updated'.format(name))
        changes_old['read_capacity_units'] = current_read_capacity_units,
        changes_old['write_capacity_units'] = current_write_capacity_units,
        changes_new['read_capacity_units'] = read_capacity_units,
        changes_new['write_capacity_units'] = write_capacity_units,
    else:
        comments.append('DynamoDB table {0} throughput matches'.format(name))

    _ret = _alarms_present(name, alarms, alarms_from_pillar,
                           write_capacity_units, read_capacity_units,
                           region, key, keyid, profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret

    # Ensure backup datapipeline is present
    datapipeline_configs = copy.deepcopy(
        __salt__['pillar.get'](backup_configs_from_pillars, [])
    )
    for config in datapipeline_configs:
        datapipeline_ret = _ensure_backup_datapipeline_present(
            name=name,
            schedule_name=config['name'],
            period=config['period'],
            utc_hour=config['utc_hour'],
            s3_base_location=config['s3_base_location'],
        )
        if datapipeline_ret['result']:
            comments.append(datapipeline_ret['comment'])
            if datapipeline_ret.get('changes'):
                ret['changes']['backup_datapipeline_{0}'.format(config['name'])] = \
                    datapipeline_ret.get('changes'),
        else:
            ret['comment'] = datapipeline_ret['comment']
            return ret

    if changes_old:
        ret['changes']['old'] = changes_old
    if changes_new:
        ret['changes']['new'] = changes_new
    ret['comment'] = ',\n'.join(comments)
    return ret
Пример #55
0
def present(
        name,
        policy_document=None,
        path=None,
        policies=None,
        policies_from_pillars=None,
        region=None,
        key=None,
        keyid=None,
        profile=None):
    '''
    Ensure the IAM role exists.

    name
        Name of the IAM role.

    policy_document
        The policy that grants an entity permission to assume the role. (See http://boto.readthedocs.org/en/latest/ref/iam.html#boto.iam.connection.IAMConnection.create_role)

    path
        The path to the instance profile. (See http://boto.readthedocs.org/en/latest/ref/iam.html#boto.iam.connection.IAMConnection.create_role)

    policies
        A dict of IAM role policies.

    policies_from_pillars
        A list of pillars that contain role policy dicts. Policies in the
        pillars will be merged in the order defined in the list and key
        conflicts will be handled by later defined keys overriding earlier
        defined keys. The policies defined here will be merged with the
        policies defined in the policies argument. If keys conflict, the keys
        in the policies argument will override the keys defined in
        policies_from_pillars.

    region
        Region to connect to.

    key
        Secret key to be used.

    keyid
        Access key to be used.

    profile
        A dict with region, key and keyid, or a pillar key (string)
        that contains a dict with region, key and keyid.
    '''
    ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
    _ret = _role_present(name, policy_document, path, region, key, keyid,
                         profile)
    if not policies:
        policies = {}
    if not policies_from_pillars:
        policies_from_pillars = []
    _policies = {}
    for policy in policies_from_pillars:
        _policy = __salt__['pillar.get'](policy)
        _policies.update(_policy)
    _policies.update(policies)
    ret['changes'] = _ret['changes']
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    _ret = _instance_profile_present(name, region, key, keyid, profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    _ret = _instance_profile_associated(name, region, key, keyid, profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
        if ret['result'] is False:
            return ret
    _ret = _policies_present(name, _policies, region, key, keyid, profile)
    ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
    ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
    if not _ret['result']:
        ret['result'] = _ret['result']
    return ret