Exemplo n.º 1
0
 def test_type_schema(self):
     self.assertEqual(
         utils.type_schema('tester'),
         {'type': 'object',
          'additionalProperties': False,
          'required': ['type'],
          'properties': {
              'type': {'enum': ['tester']}}})
     res = utils.type_schema('tester', inherits=['tested'])
     self.assertIn({'$ref': 'tested'}, res['allOf'])
Exemplo n.º 2
0
 def test_type_schema(self):
     self.assertEqual(
         utils.type_schema("tester"),
         {
             "type": "object",
             "additionalProperties": False,
             "required": ["type"],
             "properties": {"type": {"enum": ["tester"]}},
         },
     )
     res = utils.type_schema("tester", inherits=["tested"])
     self.assertIn({"$ref": "tested"}, res["allOf"])
Exemplo n.º 3
0
 def test_type_schema(self):
     self.assertEqual(
         utils.type_schema('tester'),
         {'type': 'object',
          'additionalProperties': False,
          'required': ['type'],
          'properties': {
              'type': {'enum': ['tester']}}})
Exemplo n.º 4
0
class IAMSummary(ValueFilter):
    """Return annotated account resource if iam summary filter matches.

    Some use cases include, detecting root api keys or mfa usage.

    Example iam summary wrt to matchable fields::

      {
            "UsersQuota": 5000,
            "GroupsPerUserQuota": 10,
            "AttachedPoliciesPerGroupQuota": 10,
            "PoliciesQuota": 1000,
            "GroupsQuota": 100,
            "InstanceProfiles": 0,
            "SigningCertificatesPerUserQuota": 2,
            "PolicySizeQuota": 5120,
            "PolicyVersionsInUseQuota": 10000,
            "RolePolicySizeQuota": 10240,
            "AccountSigningCertificatesPresent": 0,
            "Users": 5,
            "ServerCertificatesQuota": 20,
            "ServerCertificates": 0,
            "AssumeRolePolicySizeQuota": 2048,
            "Groups": 1,
            "MFADevicesInUse": 2,
            "RolesQuota": 250,
            "VersionsPerPolicyQuota": 5,
            "AccountAccessKeysPresent": 0,
            "Roles": 4,
            "AccountMFAEnabled": 1,
            "MFADevices": 3,
            "Policies": 3,
            "GroupPolicySizeQuota": 5120,
            "InstanceProfilesQuota": 100,
            "AccessKeysPerUserQuota": 2,
            "AttachedPoliciesPerRoleQuota": 10,
            "PolicyVersionsInUse": 5,
            "Providers": 0,
            "AttachedPoliciesPerUserQuota": 10,
            "UserPolicySizeQuota": 2048
        }

    For example to determine if an account has either not been
    enabled with root mfa or has root api keys.

    .. code-block: yaml

      policies:
        - name: root-keys-or-no-mfa
          resource: account
          filters:
            - type: iam-summary
              key: AccountMFAEnabled
              value: true
              op: eq
              value_type: swap
    """
    schema = type_schema('iam-summary', rinherit=ValueFilter.schema)

    permissions = ('iam:GetAccountSummary', )

    def process(self, resources, event=None):
        if not resources[0].get('c7n:iam_summary'):
            client = local_session(self.manager.session_factory).client('iam')
            resources[0]['c7n:iam_summary'] = client.get_account_summary(
            )['SummaryMap']
        if self.match(resources[0]['c7n:iam_summary']):
            return resources
        return []
Exemplo n.º 5
0
class ConfigRuleMode(LambdaMode):
    """a lambda policy that executes as a config service rule.

    The policy is invoked on configuration changes to resources.

    See `AWS Config <https://aws.amazon.com/config/>`_ for more details.
    """
    cfg_event = None
    schema = utils.type_schema('config-rule', rinherit=LambdaMode.schema)

    def validate(self):
        super(ConfigRuleMode, self).validate()
        if not self.policy.resource_manager.resource_type.config_type:
            raise PolicyValidationError(
                "policy:%s AWS Config does not support resource-type:%s" % (
                    self.policy.name, self.policy.resource_type))
        if self.policy.data['mode'].get('pattern'):
            raise PolicyValidationError(
                "policy:%s AWS Config does not support event pattern filtering" % (
                    self.policy.name))

    def resolve_resources(self, event):
        source = self.policy.resource_manager.get_source('config')
        return [source.load_resource(self.cfg_event['configurationItem'])]

    def run(self, event, lambda_context):
        self.cfg_event = json.loads(event['invokingEvent'])
        cfg_item = self.cfg_event['configurationItem']
        evaluation = None
        resources = []

        # TODO config resource type matches policy check
        if event.get('eventLeftScope') or cfg_item['configurationItemStatus'] in (
                "ResourceDeleted",
                "ResourceNotRecorded",
                "ResourceDeletedNotRecorded"):
            evaluation = {
                'annotation': 'The rule does not apply.',
                'compliance_type': 'NOT_APPLICABLE'}

        if evaluation is None:
            resources = super(ConfigRuleMode, self).run(event, lambda_context)
            match = self.policy.data['mode'].get('match-compliant', False)
            self.policy.log.info(
                "found resources:%d match-compliant:%s", len(resources or ()), match)
            if (match and resources) or (not match and not resources):
                evaluation = {
                    'compliance_type': 'COMPLIANT',
                    'annotation': 'The resource is compliant with policy:%s.' % (
                        self.policy.name)}
            else:
                evaluation = {
                    'compliance_type': 'NON_COMPLIANT',
                    'annotation': 'Resource is not compliant with policy:%s' % (
                        self.policy.name)
                }

        client = utils.local_session(
            self.policy.session_factory).client('config')
        client.put_evaluations(
            Evaluations=[{
                'ComplianceResourceType': cfg_item['resourceType'],
                'ComplianceResourceId': cfg_item['resourceId'],
                'ComplianceType': evaluation['compliance_type'],
                'Annotation': evaluation['annotation'],
                # TODO ? if not applicable use current timestamp
                'OrderingTimestamp': cfg_item[
                    'configurationItemCaptureTime']}],
            ResultToken=event.get('resultToken', 'No token found.'))
        return resources
Exemplo n.º 6
0
class TagTrim(Action):
    """Automatically remove tags from an ec2 resource.

    EC2 Resources have a limit of 50 tags, in order to make
    additional tags space on a set of resources, this action can
    be used to remove enough tags to make the desired amount of
    space while preserving a given set of tags.

    .. code-block :: yaml

       policies:
         - name: ec2-tag-trim
           comment: |
             Any instances with 48 or more tags get tags removed until
             they match the target tag count, in this case 47 so we
             that we free up a tag slot for another usage.
           resource: ec2
           filters:
                 # Filter down to resources which already have 8 tags
                 # as we need space for 3 more, this also ensures that
                 # metrics reporting is correct for the policy.
               - type: value
                 key: "length(Tags)"
                 op: ge
                 value: 48
           actions:
              - type: tag-trim
                space: 3
                preserve:
                  - OwnerContact
                  - ASV
                  - CMDBEnvironment
                  - downtime
                  - custodian_status
    """
    max_tag_count = 50

    schema = utils.type_schema('tag-trim',
                               space={'type': 'integer'},
                               preserve={
                                   'type': 'array',
                                   'items': {
                                       'type': 'string'
                                   }
                               })
    schema_alias = True

    permissions = ('ec2:DeleteTags', )

    def process(self, resources):
        self.id_key = self.manager.get_model().id

        self.preserve = set(self.data.get('preserve'))
        self.space = self.data.get('space', 3)

        client = utils.local_session(self.manager.session_factory).client(
            self.manager.resource_type.service)

        futures = {}
        mid = self.manager.get_model().id

        with self.executor_factory(max_workers=2) as w:
            for r in resources:
                futures[w.submit(self.process_resource, client, r)] = r
            for f in as_completed(futures):
                if f.exception():
                    self.log.warning(
                        "Error processing tag-trim on resource:%s",
                        futures[f][mid])

    def process_resource(self, client, i):
        # Can't really go in batch parallel without some heuristics
        # without some more complex matching wrt to grouping resources
        # by common tags populations.
        tag_map = {
            t['Key']: t['Value']
            for t in i.get('Tags', []) if not t['Key'].startswith('aws:')
        }

        # Space == 0 means remove all but specified
        if self.space and len(tag_map) + self.space <= self.max_tag_count:
            return

        keys = set(tag_map)
        preserve = self.preserve.intersection(keys)
        candidates = keys - self.preserve

        if self.space:
            # Free up slots to fit
            remove = len(candidates) - (self.max_tag_count -
                                        (self.space + len(preserve)))
            candidates = list(sorted(candidates))[:remove]

        if not candidates:
            self.log.warning("Could not find any candidates to trim %s" %
                             i[self.id_key])
            return

        self.process_tag_removal(i, candidates)

    def process_tag_removal(self, client, resource, tags):
        self.manager.retry(client.delete_tags,
                           Tags=[{
                               'Key': c
                           } for c in tags],
                           Resources=[resource[self.id_key]],
                           DryRun=self.manager.config.dryrun)
Exemplo n.º 7
0
class PropagateTags(Action):
    """Propagate tags to an asg instances.

    In AWS changing an asg tag does not propagate to instances.

    This action exists to do that, and can also trim older tags
    not present on the asg anymore that are present on instances.


    :example:

        .. code-block: yaml

            policies:
              - name: asg-propagate-required
                resource: asg
                filters:
                  - "tag:OwnerName": present
                actions:
                  - type: propagate-tags
                    tags:
                      - OwnerName
    """

    schema = type_schema('propagate-tags',
                         tags={
                             'type': 'array',
                             'items': {
                                 'type': 'string'
                             }
                         },
                         trim={'type': 'boolean'})
    permissions = ('ec2:DeleteTags', 'ec2:CreateTags')

    def validate(self):
        if not isinstance(self.data.get('tags', []), (list, tuple)):
            raise ValueError("No tags specified")
        return self

    def process(self, asgs):
        if not asgs:
            return
        if self.data.get('trim', False):
            self.instance_map = self.get_instance_map(asgs)
        with self.executor_factory(max_workers=10) as w:
            instance_count = sum(list(w.map(self.process_asg, asgs)))
            self.log.info("Applied tags to %d instances" % instance_count)

    def process_asg(self, asg):
        client = local_session(self.manager.session_factory).client('ec2')
        instance_ids = [i['InstanceId'] for i in asg['Instances']]
        tag_map = {
            t['Key']: t['Value']
            for t in asg.get('Tags', [])
            if t['PropagateAtLaunch'] and not t['Key'].startswith('aws:')
        }

        if self.data.get('tags'):
            tag_map = {
                k: v
                for k, v in tag_map.items() if k in self.data['tags']
            }

        tag_set = set(tag_map)
        if self.data.get('trim', False):
            instances = [self.instance_map[i] for i in instance_ids]
            self.prune_instance_tags(client, asg, tag_set, instances)
        if not self.manager.config.dryrun:
            client.create_tags(Resources=instance_ids,
                               Tags=[{
                                   'Key': k,
                                   'Value': v
                               } for k, v in tag_map.items()])
        return len(instance_ids)

    def prune_instance_tags(self, client, asg, tag_set, instances):
        """Remove tags present on all asg instances which are not present
        on the asg.
        """
        instance_tags = Counter()
        instance_count = len(instances)

        remove_tags = []
        extra_tags = []

        for i in instances:
            instance_tags.update([
                t['Key'] for t in i['Tags'] if not t['Key'].startswith('aws:')
            ])
        for k, v in instance_tags.items():
            if not v >= instance_count:
                extra_tags.append(k)
                continue
            if k not in tag_set:
                remove_tags.append(k)

        if remove_tags:
            log.debug(
                "Pruning asg:%s instances:%d of old tags: %s" %
                (asg['AutoScalingGroupName'], instance_count, remove_tags))
        if extra_tags:
            log.debug("Asg: %s has uneven tags population: %s" %
                      (asg['AutoScalingGroupName'], instance_tags))
        # Remove orphan tags
        remove_tags.extend(extra_tags)

        if not self.manager.config.dryrun:
            client.delete_tags(Resources=[i['InstanceId'] for i in instances],
                               Tags=[{
                                   'Key': t
                               } for t in remove_tags])

    def get_instance_map(self, asgs):
        instance_ids = [
            i['InstanceId'] for i in list(
                itertools.chain(
                    *[g['Instances'] for g in asgs if g['Instances']]))
        ]
        if not instance_ids:
            return {}
        return {
            i['InstanceId']: i
            for i in self.manager.get_resource_manager('ec2').get_resources(
                instance_ids)
        }
Exemplo n.º 8
0
class PullMode(PolicyExecutionMode):
    """Pull mode execution of a policy.

    Queries resources from cloud provider for filtering and actions.
    """

    schema = utils.type_schema('pull')

    def run(self, *args, **kw):
        if not self.policy.is_runnable():
            return []

        with self.policy.ctx:
            self.policy.log.debug(
                "Running policy:%s resource:%s region:%s c7n:%s",
                self.policy.name, self.policy.resource_type,
                self.policy.options.region or 'default',
                version)

            s = time.time()
            try:
                resources = self.policy.resource_manager.resources()
            except ResourceLimitExceeded as e:
                self.policy.log.error(str(e))
                self.policy.ctx.metrics.put_metric(
                    'ResourceLimitExceeded', e.selection_count, "Count")
                raise

            rt = time.time() - s
            self.policy.log.info(
                "policy:%s resource:%s region:%s count:%d time:%0.2f" % (
                    self.policy.name,
                    self.policy.resource_type,
                    self.policy.options.region,
                    len(resources), rt))
            self.policy.ctx.metrics.put_metric(
                "ResourceCount", len(resources), "Count", Scope="Policy")
            self.policy.ctx.metrics.put_metric(
                "ResourceTime", rt, "Seconds", Scope="Policy")
            self.policy._write_file(
                'resources.json', utils.dumps(resources, indent=2))

            if not resources:
                return []

            if self.policy.options.dryrun:
                self.policy.log.debug("dryrun: skipping actions")
                return resources

            at = time.time()
            for a in self.policy.resource_manager.actions:
                s = time.time()
                with self.policy.ctx.tracer.subsegment('action:%s' % a.type):
                    results = a.process(resources)
                self.policy.log.info(
                    "policy:%s action:%s"
                    " resources:%d"
                    " execution_time:%0.2f" % (
                        self.policy.name, a.name,
                        len(resources), time.time() - s))
                if results:
                    self.policy._write_file(
                        "action-%s" % a.name, utils.dumps(results))
            self.policy.ctx.metrics.put_metric(
                "ActionTime", time.time() - at, "Seconds", Scope="Policy")
            return resources
Exemplo n.º 9
0
class Suspend(Action):
    """Action to suspend ASG processes and instances

    AWS ASG suspend/resume and process docs https://goo.gl/XYtKQ8

    :example:

        .. code-block: yaml

            policies:
              - name: asg-suspend-processes
                resource: asg
                filters:
                  - "tag:SuspendTag": present
                actions:
                  - type: suspend
    """
    permissions = ("autoscaling:SuspendProcesses", "ec2:StopInstances")

    ASG_PROCESSES = [
        "Launch", "Terminate", "HealthCheck", "ReplaceUnhealthy",
        "AZRebalance", "AlarmNotification", "ScheduledActions",
        "AddToLoadBalancer"
    ]

    schema = type_schema('suspend',
                         exclude={
                             'type': 'array',
                             'title': 'ASG Processes to not suspend',
                             'items': {
                                 'enum': ASG_PROCESSES
                             }
                         })

    ASG_PROCESSES = set(ASG_PROCESSES)

    def process(self, asgs):
        original_count = len(asgs)
        asgs = [a for a in asgs if a['Instances']]
        self.log.debug("Filtered from %d to %d asgs with instances" %
                       (original_count, len(asgs)))
        with self.executor_factory(max_workers=3) as w:
            list(w.map(self.process_asg, asgs))

    def process_asg(self, asg):
        """Multistep process to stop an asg aprori of setup

        - suspend processes
        - stop instances
        """
        session = local_session(self.manager.session_factory)
        asg_client = session.client('autoscaling')
        processes = list(
            self.ASG_PROCESSES.difference(self.data.get('exclude', ())))

        try:
            self.manager.retry(
                asg_client.suspend_processes,
                ScalingProcesses=processes,
                AutoScalingGroupName=asg['AutoScalingGroupName'])
        except ClientError as e:
            if e.response['Error']['Code'] == 'ValidationError':
                return
            raise
        ec2_client = session.client('ec2')
        try:
            instance_ids = [i['InstanceId'] for i in asg['Instances']]
            if not instance_ids:
                return
            retry = get_retry(
                ('RequestLimitExceeded', 'Client.RequestLimitExceeded'))
            retry(ec2_client.stop_instances, InstanceIds=instance_ids)
        except ClientError as e:
            if e.response['Error']['Code'] in ('InvalidInstanceID.NotFound',
                                               'IncorrectInstanceState'):
                log.warning("Erroring stopping asg instances %s %s" %
                            (asg['AutoScalingGroupName'], e))
                return
            raise
Exemplo n.º 10
0
class NotEncryptedFilter(Filter, LaunchConfigFilterBase):
    """Check if an ASG is configured to have unencrypted volumes.

    Checks both the ami snapshots and the launch configuration.

    :example:

        .. code-block: yaml

            policies:
              - name: asg-unencrypted
                resource: asg
                filters:
                  - type: not-encrypted
                    exclude_image: true
    """
    schema = type_schema('not-encrypted', exclude_image={'type': 'boolean'})
    permissions = ('ec2:DescribeImages', 'ec2:DescribeSnapshots',
                   'autoscaling:DescribeLaunchConfigurations')

    images = unencrypted_configs = unencrypted_images = None

    # TODO: resource-manager, notfound err mgr

    def process(self, asgs, event=None):
        self.initialize(asgs)
        return super(NotEncryptedFilter, self).process(asgs, event)

    def __call__(self, asg):
        cfg = self.configs.get(asg['LaunchConfigurationName'])
        if not cfg:
            self.log.warning("ASG %s instances: %d has missing config: %s",
                             asg['AutoScalingGroupName'],
                             len(asg['Instances']),
                             asg['LaunchConfigurationName'])
            return False
        unencrypted = []
        if (not self.data.get('exclude_image')
                and cfg['ImageId'] in self.unencrypted_images):
            unencrypted.append('Image')
        if cfg['LaunchConfigurationName'] in self.unencrypted_configs:
            unencrypted.append('LaunchConfig')
        if unencrypted:
            asg['Unencrypted'] = unencrypted
        return bool(unencrypted)

    def initialize(self, asgs):
        super(NotEncryptedFilter, self).initialize(asgs)
        ec2 = local_session(self.manager.session_factory).client('ec2')
        self.unencrypted_images = self.get_unencrypted_images(ec2)
        self.unencrypted_configs = self.get_unencrypted_configs(ec2)

    def _fetch_images(self, ec2, image_ids):
        while True:
            try:
                return ec2.describe_images(ImageIds=list(image_ids))
            except ClientError as e:
                if e.response['Error']['Code'] == 'InvalidAMIID.NotFound':
                    msg = e.response['Error']['Message']
                    e_ami_ids = [
                        e_ami_id.strip()
                        for e_ami_id in msg[msg.find("'[") +
                                            2:msg.rfind("]'")].split(',')
                    ]
                    self.log.warning(
                        "asg:not-encrypted filter image not found %s",
                        e_ami_ids)
                    for e_ami_id in e_ami_ids:
                        image_ids.remove(e_ami_id)
                    continue
                raise

    def get_unencrypted_images(self, ec2):
        """retrieve images which have unencrypted snapshots referenced."""
        image_ids = set()
        for cfg in self.configs.values():
            image_ids.add(cfg['ImageId'])

        self.log.debug("querying %d images", len(image_ids))
        results = self._fetch_images(ec2, image_ids)
        self.images = {i['ImageId']: i for i in results['Images']}

        unencrypted_images = set()
        for i in self.images.values():
            for bd in i['BlockDeviceMappings']:
                if 'Ebs' in bd and not bd['Ebs'].get('Encrypted'):
                    unencrypted_images.add(i['ImageId'])
                    break
        return unencrypted_images

    def get_unencrypted_configs(self, ec2):
        """retrieve configs that have unencrypted ebs voluems referenced."""
        unencrypted_configs = set()
        snaps = {}
        for cid, c in self.configs.items():
            image = self.images.get(c['ImageId'])
            # image deregistered/unavailable
            if image is not None:
                image_block_devs = {
                    bd['DeviceName']: bd['Ebs']
                    for bd in image['BlockDeviceMappings'] if 'Ebs' in bd
                }
            else:
                image_block_devs = {}
            for bd in c['BlockDeviceMappings']:
                if 'Ebs' not in bd:
                    continue
                # Launch configs can shadow image devices, images have
                # precedence.
                if bd['DeviceName'] in image_block_devs:
                    continue
                if 'SnapshotId' in bd['Ebs']:
                    snaps.setdefault(bd['Ebs']['SnapshotId'].strip(),
                                     []).append(cid)
                elif not bd['Ebs'].get('Encrypted'):
                    unencrypted_configs.add(cid)
        if not snaps:
            return unencrypted_configs

        self.log.debug("querying %d snapshots", len(snaps))
        for s in self.get_snapshots(ec2, snaps.keys()):
            if not s.get('Encrypted'):
                unencrypted_configs.update(snaps[s['SnapshotId']])
        return unencrypted_configs

    def get_snapshots(self, ec2, snap_ids):
        """get snapshots corresponding to id, but tolerant of missing."""
        while True:
            try:
                result = ec2.describe_snapshots(SnapshotIds=snap_ids)
            except ClientError as e:
                if e.response['Error']['Code'] == 'InvalidSnapshot.NotFound':
                    msg = e.response['Error']['Message']
                    e_snap_id = msg[msg.find("'") + 1:msg.rfind("'")]
                    self.log.warning("Snapshot not found %s" % e_snap_id)
                    snap_ids.remove(e_snap_id)
                    continue
                raise
            else:
                return result.get('Snapshots', ())
Exemplo n.º 11
0
class CopyRelatedResourceTag(Tag):
    """
    Copy a related resource tag to its associated resource

    In some scenarios, resource tags from a related resource should be applied
    to its child resource. For example, EBS Volume tags propogating to their
    snapshots. To use this action, specify the resource type that contains the
    tags that are to be copied, which can be found by using the
    `custodian schema` command.

    Then, specify the key on the resource that references the related resource.
    In the case of ebs-snapshot, the VolumeId attribute would be the key that
    identifies the related resource, ebs.

    Finally, specify a list of tag keys to copy from the related resource onto
    the original resource. The special character "*" can be used to signify that
    all tags from the related resource should be copied to the original resource.

    To raise an error when related resources cannot be found, use the
    `skip_missing` option. By default, this is set to True.

    :example:

    .. code-block:: yaml

            policies:
                - name: copy-tags-from-ebs-volume-to-snapshot
                  resource: ebs-snapshot
                  actions:
                    - type: copy-related-tag
                      resource: ebs
                      skip_missing: True
                      key: VolumeId
                      tags: '*'
    """

    schema = utils.type_schema(
        'copy-related-tag',
        resource={'type': 'string'},
        skip_missing={'type': 'boolean'},
        key={'type': 'string'},
        tags={'oneOf': [{
            'enum': ['*']
        }, {
            'type': 'array'
        }]},
        required=['tags', 'key', 'resource'])
    schema_alias = True

    def get_permissions(self):
        return self.manager.action_registry.get('tag').permissions

    def validate(self):
        related_resource = self.data['resource']
        if related_resource not in aws_resources.keys():
            raise PolicyValidationError(
                "Error: Invalid resource type selected: %s" % related_resource)
        # ideally should never raise here since we shouldn't be applying this
        # action to a resource if it doesn't have a tag action implemented
        if self.manager.action_registry.get('tag') is None:
            raise PolicyValidationError(
                "Error: Tag action missing on resource")
        return self

    def process(self, resources):
        related_resources = []
        for rrid, r in zip(
                jmespath.search('[].[%s]' % self.data['key'], resources),
                resources):
            related_resources.append((rrid[0], r))
        related_ids = set([r[0] for r in related_resources])
        missing = False
        if None in related_ids:
            missing = True
            related_ids.discard(None)
        related_tag_map = self.get_resource_tag_map(self.data['resource'],
                                                    related_ids)

        missing_related_tags = related_ids.difference(related_tag_map.keys())
        if not self.data.get('skip_missing', True) and (missing_related_tags
                                                        or missing):
            raise PolicyExecutionError(
                "Unable to find all %d %s related resources tags %d missing" %
                (len(related_ids), self.data['resource'],
                 len(missing_related_tags) + int(missing)))

        # rely on resource manager tag action implementation as it can differ between resources
        tag_action = self.manager.action_registry.get('tag')({}, self.manager)
        tag_action.id_key = tag_action.manager.get_model().id
        client = tag_action.get_client()

        stats = Counter()

        for related, r in related_resources:
            if (related is None or related in missing_related_tags
                    or not related_tag_map[related]):
                stats['missing'] += 1
            elif self.process_resource(client, r, related_tag_map[related],
                                       self.data['tags'], tag_action):
                stats['tagged'] += 1
            else:
                stats['unchanged'] += 1

        self.log.info(
            'Tagged %d resources from related, missing-skipped %d unchanged %d',
            stats['tagged'], stats['missing'], stats['unchanged'])

    def process_resource(self, client, r, related_tags, tag_keys, tag_action):
        tags = {}
        resource_tags = {
            t['Key']: t['Value']
            for t in r.get('Tags', []) if not t['Key'].startswith('aws:')
        }

        if tag_keys == '*':
            tags = {
                k: v
                for k, v in related_tags.items() if resource_tags.get(k) != v
            }
        else:
            tags = {
                k: v
                for k, v in related_tags.items()
                if k in tag_keys and resource_tags.get(k) != v
            }
        if not tags:
            return
        if not isinstance(tag_action, UniversalTag):
            tags = [{'Key': k, 'Value': v} for k, v in tags.items()]
        tag_action.process_resource_set(client, resource_set=[r], tags=tags)
        return True

    def get_resource_tag_map(self, r_type, ids):
        """
        Returns a mapping of {resource_id: {tagkey: tagvalue}}
        """
        manager = self.manager.get_resource_manager(r_type)
        r_id = manager.resource_type.id

        return {
            r[r_id]: {t['Key']: t['Value']
                      for t in r.get('Tags', [])}
            for r in manager.get_resources(list(ids))
        }

    @classmethod
    def register_resources(klass, registry, resource_class):
        if not resource_class.action_registry.get('tag'):
            return
        resource_class.action_registry.register('copy-related-tag', klass)
Exemplo n.º 12
0
class IsSSLFilter(Filter):

    schema = type_schema('is-ssl')

    def process(self, balancers, event=None):
        return [b for b in balancers if is_ssl(b)]
Exemplo n.º 13
0
class TagDelayedAction(Action):
    """Tag resources for future action.

    The optional 'tz' parameter can be used to adjust the clock to align
    with a given timezone. The default value is 'utc'.

    If neither 'days' nor 'hours' is specified, Cloud Custodian will default
    to marking the resource for action 4 days in the future.

    .. code-block :: yaml

      policies:
        - name: ec2-mark-for-stop-in-future
          resource: ec2
          filters:
            - type: value
              key: Name
              value: instance-to-stop-in-four-days
          actions:
            - type: mark-for-op
              op: stop
    """

    schema = utils.type_schema('mark-for-op',
                               tag={'type': 'string'},
                               msg={'type': 'string'},
                               days={
                                   'type': 'integer',
                                   'minimum': 0,
                                   'exclusiveMinimum': False
                               },
                               hours={
                                   'type': 'integer',
                                   'minimum': 0,
                                   'exclusiveMinimum': False
                               },
                               tz={'type': 'string'},
                               op={'type': 'string'})
    schema_alias = True

    batch_size = 200
    concurrency = 2

    default_template = 'Resource does not meet policy: {op}@{action_date}'

    def get_permissions(self):
        return self.manager.action_registry['tag'].permissions

    def validate(self):
        op = self.data.get('op')
        if self.manager and op not in self.manager.action_registry.keys():
            raise PolicyValidationError(
                "mark-for-op specifies invalid op:%s in %s" %
                (op, self.manager.data))

        self.tz = tzutil.gettz(Time.TZ_ALIASES.get(self.data.get('tz', 'utc')))
        if not self.tz:
            raise PolicyValidationError("Invalid timezone specified %s in %s" %
                                        (self.tz, self.manager.data))
        return self

    def generate_timestamp(self, days, hours):
        n = datetime.now(tz=self.tz)
        if days is None or hours is None:
            # maintains default value of days being 4 if nothing is provided
            days = 4
        action_date = (n + timedelta(days=days, hours=hours))
        if hours > 0:
            action_date_string = action_date.strftime('%Y/%m/%d %H%M %Z')
        else:
            action_date_string = action_date.strftime('%Y/%m/%d')

        return action_date_string

    def get_config_values(self):
        d = {
            'op': self.data.get('op', 'stop'),
            'tag': self.data.get('tag', DEFAULT_TAG),
            'msg': self.data.get('msg', self.default_template),
            'tz': self.data.get('tz', 'utc'),
            'days': self.data.get('days', 0),
            'hours': self.data.get('hours', 0)
        }
        d['action_date'] = self.generate_timestamp(d['days'], d['hours'])
        return d

    def process(self, resources):
        cfg = self.get_config_values()
        self.tz = tzutil.gettz(Time.TZ_ALIASES.get(cfg['tz']))
        self.id_key = self.manager.get_model().id

        msg = cfg['msg'].format(op=cfg['op'], action_date=cfg['action_date'])

        self.log.info("Tagging %d resources for %s on %s" %
                      (len(resources), cfg['op'], cfg['action_date']))

        tags = [{'Key': cfg['tag'], 'Value': msg}]

        # if the tag implementation has a specified batch size, it's typically
        # due to some restraint on the api so we defer to that.
        batch_size = getattr(self.manager.action_registry.get('tag'),
                             'batch_size', self.batch_size)

        client = self.get_client()
        _common_tag_processer(self.executor_factory, batch_size,
                              self.concurrency, client,
                              self.process_resource_set, self.id_key,
                              resources, tags, self.log)

    def process_resource_set(self, client, resource_set, tags):
        tagger = self.manager.action_registry['tag']({}, self.manager)
        tagger.process_resource_set(client, resource_set, tags)

    def get_client(self):
        return utils.local_session(self.manager.session_factory).client(
            self.manager.resource_type.service)
Exemplo n.º 14
0
class NormalizeTag(Action):
    """Transform the value of a tag.

    Set the tag value to uppercase, title, lowercase, or strip text
    from a tag key.

    .. code-block :: yaml

        policies:
          - name: ec2-service-transform-lower
            resource: ec2
            comment: |
              ec2-service-tag-value-to-lower
            query:
              - instance-state-name: running
            filters:
              - "tag:testing8882": present
            actions:
              - type: normalize-tag
                key: lower_key
                action: lower

          - name: ec2-service-strip
            resource: ec2
            comment: |
              ec2-service-tag-strip-blah
            query:
              - instance-state-name: running
            filters:
              - "tag:testing8882": present
            actions:
              - type: normalize-tag
                key: strip_key
                action: strip
                value: blah

    """

    schema_alias = True
    schema = utils.type_schema(
        'normalize-tag',
        key={'type': 'string'},
        action={
            'type': 'string',
            'items': {
                'enum': ['upper', 'lower', 'title'
                         'strip', 'replace']
            }
        },
        value={'type': 'string'})

    permissions = ('ec2:CreateTags', )

    def create_tag(self, client, ids, key, value):

        self.manager.retry(client.create_tags,
                           Resources=ids,
                           Tags=[{
                               'Key': key,
                               'Value': value
                           }])

    def process_transform(self, tag_value, resource_set):
        """
        Transform tag value

        - Collect value from tag
        - Transform Tag value
        - Assign new value for key
        """
        self.log.info("Transforming tag value on %s instances" %
                      (len(resource_set)))
        key = self.data.get('key')

        c = utils.local_session(self.manager.session_factory).client('ec2')

        self.create_tag(c, [
            r[self.id_key] for r in resource_set if len(r.get('Tags', [])) < 50
        ], key, tag_value)

    def create_set(self, instances):
        key = self.data.get('key', None)
        resource_set = {}
        for r in instances:
            tags = {t['Key']: t['Value'] for t in r.get('Tags', [])}
            if tags[key] not in resource_set:
                resource_set[tags[key]] = []
            resource_set[tags[key]].append(r)
        return resource_set

    def filter_resources(self, resources):
        key = self.data.get('key', None)
        res = 0
        for r in resources:
            tags = {t['Key']: t['Value'] for t in r.get('Tags', [])}
            if key not in tags.keys():
                resources.pop(res)
            res += 1
        return resources

    def process(self, resources):
        count = len(resources)
        resources = self.filter_resources(resources)
        self.log.info("Filtered from %s resources to %s" %
                      (count, len(resources)))
        self.id_key = self.manager.get_model().id
        resource_set = self.create_set(resources)
        with self.executor_factory(max_workers=3) as w:
            futures = []
            for r in resource_set:
                action = self.data.get('action')
                value = self.data.get('value')
                new_value = False
                if action == 'lower' and not r.islower():
                    new_value = r.lower()
                elif action == 'upper' and not r.isupper():
                    new_value = r.upper()
                elif action == 'title' and not r.istitle():
                    new_value = r.title()
                elif action == 'strip' and value and value in r:
                    new_value = r.strip(value)
                if new_value:
                    futures.append(
                        w.submit(self.process_transform, new_value,
                                 resource_set[r]))
            for f in as_completed(futures):
                if f.exception():
                    self.log.error("Exception renaming tag set \n %s" %
                                   (f.exception()))
        return resources
Exemplo n.º 15
0
class RenameTag(Action):
    """ Create a new tag with identical value & remove old tag
    """

    schema = utils.type_schema('rename-tag',
                               old_key={'type': 'string'},
                               new_key={'type': 'string'})
    schema_alias = True

    permissions = ('ec2:CreateTags', 'ec2:DeleteTags')

    tag_count_max = 50

    def delete_tag(self, client, ids, key, value):
        client.delete_tags(Resources=ids, Tags=[{'Key': key, 'Value': value}])

    def create_tag(self, client, ids, key, value):
        client.create_tags(Resources=ids, Tags=[{'Key': key, 'Value': value}])

    def process_rename(self, client, tag_value, resource_set):
        """
        Move source tag value to destination tag value

        - Collect value from old tag
        - Delete old tag
        - Create new tag & assign stored value
        """
        self.log.info("Renaming tag on %s instances" % (len(resource_set)))
        old_key = self.data.get('old_key')
        new_key = self.data.get('new_key')

        # We have a preference to creating the new tag when possible first
        resource_ids = [
            r[self.id_key] for r in resource_set
            if len(r.get('Tags', [])) < self.tag_count_max
        ]
        if resource_ids:
            self.create_tag(client, resource_ids, new_key, tag_value)

        self.delete_tag(client, [r[self.id_key] for r in resource_set],
                        old_key, tag_value)

        # For resources with 50 tags, we need to delete first and then create.
        resource_ids = [
            r[self.id_key] for r in resource_set
            if len(r.get('Tags', [])) > self.tag_count_max - 1
        ]
        if resource_ids:
            self.create_tag(client, resource_ids, new_key, tag_value)

    def create_set(self, instances):
        old_key = self.data.get('old_key', None)
        resource_set = {}
        for r in instances:
            tags = {t['Key']: t['Value'] for t in r.get('Tags', [])}
            if tags[old_key] not in resource_set:
                resource_set[tags[old_key]] = []
            resource_set[tags[old_key]].append(r)
        return resource_set

    def filter_resources(self, resources):
        old_key = self.data.get('old_key', None)
        res = 0
        for r in resources:
            tags = {t['Key']: t['Value'] for t in r.get('Tags', [])}
            if old_key not in tags.keys():
                resources.pop(res)
            res += 1
        return resources

    def process(self, resources):
        count = len(resources)
        resources = self.filter_resources(resources)
        self.log.info("Filtered from %s resources to %s" %
                      (count, len(resources)))
        self.id_key = self.manager.get_model().id
        resource_set = self.create_set(resources)

        client = self.get_client()
        with self.executor_factory(max_workers=3) as w:
            futures = []
            for r in resource_set:
                futures.append(
                    w.submit(self.process_rename, client, r, resource_set[r]))
            for f in as_completed(futures):
                if f.exception():
                    self.log.error("Exception renaming tag set \n %s" %
                                   (f.exception()))
        return resources

    def get_client(self):
        return utils.local_session(self.manager.session_factory).client(
            self.manager.resource_type.service)
Exemplo n.º 16
0
class Tag(Action):
    """Tag an ec2 resource.
    """

    batch_size = 25
    concurrency = 2

    schema = utils.type_schema(
        'tag',
        aliases=('mark', ),
        tags={'type': 'object'},
        key={'type': 'string'},
        value={'type': 'string'},
        tag={'type': 'string'},
    )
    schema_alias = True
    permissions = ('ec2:CreateTags', )
    id_key = None

    def validate(self):
        if self.data.get('key') and self.data.get('tag'):
            raise PolicyValidationError(
                "Can't specify both key and tag, choose one in %s" %
                (self.manager.data, ))
        return self

    def process(self, resources):
        # Legacy
        msg = self.data.get('msg')
        msg = self.data.get('value') or msg

        tag = self.data.get('tag', DEFAULT_TAG)
        tag = self.data.get('key') or tag

        # Support setting multiple tags in a single go with a mapping
        tags = self.data.get('tags')

        if tags is None:
            tags = []
        else:
            tags = [{'Key': k, 'Value': v} for k, v in tags.items()]

        if msg:
            tags.append({'Key': tag, 'Value': msg})

        self.interpolate_values(tags)

        batch_size = self.data.get('batch_size', self.batch_size)

        client = self.get_client()
        _common_tag_processer(self.executor_factory, batch_size,
                              self.concurrency, client,
                              self.process_resource_set, self.id_key,
                              resources, tags, self.log)

    def process_resource_set(self, client, resource_set, tags):
        mid = self.manager.get_model().id
        self.manager.retry(client.create_tags,
                           Resources=[v[mid] for v in resource_set],
                           Tags=tags,
                           DryRun=self.manager.config.dryrun)

    def interpolate_values(self, tags):
        params = {
            'account_id': self.manager.config.account_id,
            'now': utils.FormatDate.utcnow(),
            'region': self.manager.config.region
        }
        for t in tags:
            t['Value'] = t['Value'].format(**params)

    def get_client(self):
        return utils.local_session(self.manager.session_factory).client(
            self.manager.resource_type.service)
Exemplo n.º 17
0
class TagActionFilter(Filter):
    """Filter resources for tag specified future action

    Filters resources by a 'custodian_status' tag which specifies a future
    date for an action.

    The filter parses the tag values looking for an 'op@date'
    string. The date is parsed and compared to do today's date, the
    filter succeeds if today's date is gte to the target date.

    The optional 'skew' parameter provides for incrementing today's
    date a number of days into the future. An example use case might
    be sending a final notice email a few days before terminating an
    instance, or snapshotting a volume prior to deletion.

    The optional 'skew_hours' parameter provides for incrementing the current
    time a number of hours into the future.

    Optionally, the 'tz' parameter can get used to specify the timezone
    in which to interpret the clock (default value is 'utc')

    .. code-block :: yaml

      policies:
        - name: ec2-stop-marked
          resource: ec2
          filters:
            - type: marked-for-op
              # The default tag used is custodian_status
              # but that is configurable
              tag: custodian_status
              op: stop
              # Another optional tag is skew
              tz: utc
          actions:
            - type: stop

    """
    schema = utils.type_schema('marked-for-op',
                               tag={'type': 'string'},
                               tz={'type': 'string'},
                               skew={
                                   'type': 'number',
                                   'minimum': 0
                               },
                               skew_hours={
                                   'type': 'number',
                                   'minimum': 0
                               },
                               op={'type': 'string'})
    schema_alias = True

    current_date = None

    def validate(self):
        op = self.data.get('op')
        if self.manager and op not in self.manager.action_registry.keys():
            raise PolicyValidationError("Invalid marked-for-op op:%s in %s" %
                                        (op, self.manager.data))

        tz = tzutil.gettz(Time.TZ_ALIASES.get(self.data.get('tz', 'utc')))
        if not tz:
            raise PolicyValidationError(
                "Invalid timezone specified '%s' in %s" %
                (self.data.get('tz'), self.manager.data))
        return self

    def __call__(self, i):
        tag = self.data.get('tag', DEFAULT_TAG)
        op = self.data.get('op', 'stop')
        skew = self.data.get('skew', 0)
        skew_hours = self.data.get('skew_hours', 0)
        tz = tzutil.gettz(Time.TZ_ALIASES.get(self.data.get('tz', 'utc')))

        v = None
        for n in i.get('Tags', ()):
            if n['Key'] == tag:
                v = n['Value']
                break

        if v is None:
            return False
        if ':' not in v or '@' not in v:
            return False

        msg, tgt = v.rsplit(':', 1)
        action, action_date_str = tgt.strip().split('@', 1)

        if action != op:
            return False

        try:
            action_date = parse(action_date_str)
        except Exception:
            self.log.warning("could not parse tag:%s value:%s on %s" %
                             (tag, v, i['InstanceId']))

        if self.current_date is None:
            self.current_date = datetime.now()

        if action_date.tzinfo:
            # if action_date is timezone aware, set to timezone provided
            action_date = action_date.astimezone(tz)
            self.current_date = datetime.now(tz=tz)

        return self.current_date >= (action_date -
                                     timedelta(days=skew, hours=skew_hours))
Exemplo n.º 18
0
class ServiceLimit(Filter):
    """Check if account's service limits are past a given threshold.

    Supported limits are per trusted advisor, which is variable based
    on usage in the account and support level enabled on the account.

      - service: AutoScaling limit: Auto Scaling groups
      - service: AutoScaling limit: Launch configurations
      - service: EBS limit: Active snapshots
      - service: EBS limit: Active volumes
      - service: EBS limit: General Purpose (SSD) volume storage (GiB)
      - service: EBS limit: Magnetic volume storage (GiB)
      - service: EBS limit: Provisioned IOPS
      - service: EBS limit: Provisioned IOPS (SSD) storage (GiB)
      - service: EC2 limit: Elastic IP addresses (EIPs)

      # Note this is extant for each active instance type in the account
      # however the total value is against sum of all instance types.
      # see issue https://github.com/capitalone/cloud-custodian/issues/516

      - service: EC2 limit: On-Demand instances - m3.medium

      - service: EC2 limit: Reserved Instances - purchase limit (monthly)
      - service: ELB limit: Active load balancers
      - service: IAM limit: Groups
      - service: IAM limit: Instance profiles
      - service: IAM limit: Roles
      - service: IAM limit: Server certificates
      - service: IAM limit: Users
      - service: RDS limit: DB instances
      - service: RDS limit: DB parameter groups
      - service: RDS limit: DB security groups
      - service: RDS limit: DB snapshots per user
      - service: RDS limit: Storage quota (GB)
      - service: RDS limit: Internet gateways
      - service: SES limit: Daily sending quota
      - service: VPC limit: VPCs
      - service: VPC limit: VPC Elastic IP addresses (EIPs)

    :example:

        .. code-block: yaml

            policies:
              - name: account-service-limits
                resource: account
                filters:
                  - type: service-limit
                    services:
                      - IAM
                    threshold: 1.0
    """

    schema = type_schema('service-limit',
                         threshold={'type': 'number'},
                         refresh_period={'type': 'integer'},
                         limits={
                             'type': 'array',
                             'items': {
                                 'type': 'string'
                             }
                         },
                         services={
                             'type': 'array',
                             'items': {
                                 'enum': [
                                     'EC2', 'ELB', 'VPC', 'AutoScaling', 'RDS',
                                     'EBS', 'SES', 'IAM'
                                 ]
                             }
                         })

    permissions = ('support:DescribeTrustedAdvisorCheckResult', )
    check_id = 'eW7HH0l7J9'
    check_limit = ('region', 'service', 'check', 'limit', 'extant', 'color')

    def process(self, resources, event=None):
        client = local_session(self.manager.session_factory).client('support')
        checks = client.describe_trusted_advisor_check_result(
            checkId=self.check_id, language='en')['result']

        resources[0]['c7n:ServiceLimits'] = checks
        delta = timedelta(self.data.get('refresh_period', 1))
        check_date = parse_date(checks['timestamp'])
        if datetime.now(tz=tzutc()) - delta > check_date:
            client.refresh_trusted_advisor_check(checkId=self.check_id)
        threshold = self.data.get('threshold')

        services = self.data.get('services')
        limits = self.data.get('limits')
        exceeded = []

        for resource in checks['flaggedResources']:
            if threshold is None and resource['status'] == 'ok':
                continue
            limit = dict(zip(self.check_limit, resource['metadata']))
            if services and limit['service'] not in services:
                continue
            if limits and limit['check'] not in limits:
                continue
            limit['status'] = resource['status']
            limit['percentage'] = float(limit['extant'] or 0) / float(
                limit['limit']) * 100
            if threshold and limit['percentage'] < threshold:
                continue
            exceeded.append(limit)
        if exceeded:
            resources[0]['c7n:ServiceLimitsExceeded'] = exceeded
            return resources
        return []
Exemplo n.º 19
0
class AutoscalerSet(MethodAction):
    """
    `Patches <https://cloud.google.com/compute/docs/reference/rest/v1/autoscalers/patch>`_
    configuration parameters for the autoscaling algorithm.

    The `coolDownPeriodSec` specifies the number of seconds that the autoscaler
    should wait before it starts collecting information from a new instance.

    The `cpuUtilization.utilizationTarget` specifies the target CPU utilization that the
    autoscaler should maintain.

    The `loadBalancingUtilization.utilizationTarget` specifies fraction of backend capacity
    utilization (set in HTTP(S) load balancing configuration) that autoscaler should maintain.

    The `minNumReplicas` specifies the minimum number of replicas that the autoscaler can
    scale down to.

    The `maxNumReplicas` specifies the maximum number of instances that the autoscaler can
    scale up to.

    :Example:

    .. code-block:: yaml

        policies:
          - name: gcp-autoscaler-set
            resource: gcp.autoscaler
            filters:
              - type: value
                key: name
                value: instance-group-2
            actions:
              - type: set
                coolDownPeriodSec: 20
                cpuUtilization:
                  utilizationTarget: 0.7
                loadBalancingUtilization:
                  utilizationTarget: 0.7
                minNumReplicas: 1
                maxNumReplicas: 4
    """
    schema = type_schema(
        'set', **{
            'coolDownPeriodSec': {
                'type': 'integer',
                'minimum': 15
            },
            'cpuUtilization': {
                'type': 'object',
                'required': ['utilizationTarget'],
                'properties': {
                    'utilizationTarget': {
                        'type': 'number',
                        'exclusiveMinimum': 0,
                        'maximum': 1
                    }
                },
            },
            'loadBalancingUtilization': {
                'type': 'object',
                'required': ['utilizationTarget'],
                'properties': {
                    'utilizationTarget': {
                        'type': 'number',
                        'exclusiveMinimum': 0,
                        'maximum': 1
                    }
                }
            },
            'maxNumReplicas': {
                'type': 'integer',
                'exclusiveMinimum': 0
            },
            'minNumReplicas': {
                'type': 'integer',
                'exclusiveMinimum': 0
            }
        })
    method_spec = {'op': 'patch'}
    path_param_re = re.compile(
        '.*?/projects/(.*?)/zones/(.*?)/autoscalers/(.*)')
    method_perm = 'update'

    def get_resource_params(self, model, resource):
        project, zone, autoscaler = self.path_param_re.match(
            resource['selfLink']).groups()
        body = {}

        if 'coolDownPeriodSec' in self.data:
            body['coolDownPeriodSec'] = self.data['coolDownPeriodSec']

        if 'cpuUtilization' in self.data:
            body['cpuUtilization'] = self.data['cpuUtilization']

        if 'loadBalancingUtilization' in self.data:
            body['loadBalancingUtilization'] = self.data[
                'loadBalancingUtilization']

        if 'maxNumReplicas' in self.data:
            body['maxNumReplicas'] = self.data['maxNumReplicas']

        if 'minNumReplicas' in self.data:
            body['minNumReplicas'] = self.data['minNumReplicas']

        result = {
            'project': project,
            'zone': zone,
            'autoscaler': autoscaler,
            'body': {
                'autoscalingPolicy': body
            }
        }

        return result
Exemplo n.º 20
0
class CloudTrailEnabled(Filter):
    """Verify cloud trail enabled for this account per specifications.

    Returns an annotated account resource if trail is not enabled.

    :example:

        .. code-block: yaml

            policies:
              - name: account-cloudtrail-enabled
                resource: account
                region: us-east-1
                filters:
                  - type: check-cloudtrail
                    global-events: true
                    multi-region: true
                    running: true
    """
    schema = type_schema(
        'check-cloudtrail', **{
            'multi-region': {
                'type': 'boolean'
            },
            'global-events': {
                'type': 'boolean'
            },
            'running': {
                'type': 'boolean'
            },
            'notifies': {
                'type': 'boolean'
            },
            'file-digest': {
                'type': 'boolean'
            },
            'kms': {
                'type': 'boolean'
            },
            'kms-key': {
                'type': 'string'
            }
        })

    permissions = ('cloudtrail:DescribeTrails', 'cloudtrail:GetTrailStatus')

    def process(self, resources, event=None):
        client = local_session(
            self.manager.session_factory).client('cloudtrail')
        trails = client.describe_trails()['trailList']
        resources[0]['c7n:cloudtrails'] = trails
        if self.data.get('global-events'):
            trails = [t for t in trails if t.get('IncludeGlobalServiceEvents')]
        if self.data.get('kms'):
            trails = [t for t in trails if t.get('KmsKeyId')]
        if self.data.get('kms-key'):
            trails = [
                t for t in trails
                if t.get('KmsKeyId', '') == self.data['kms-key']
            ]
        if self.data.get('file-digest'):
            trails = [t for t in trails if t.get('LogFileValidationEnabled')]
        if self.data.get('multi-region'):
            trails = [t for t in trails if t.get('IsMultiRegionTrail')]
        if self.data.get('notifies'):
            trails = [t for t in trails if t.get('SNSTopicArn')]
        if self.data.get('running', True):
            running = []
            for t in list(trails):
                t['Status'] = status = client.get_trail_status(
                    Name=t['TrailARN'])
                if status['IsLogging'] and not status.get(
                        'LatestDeliveryError'):
                    running.append(t)
            trails = running
        if trails:
            return []
        return resources
Exemplo n.º 21
0
class Start(InstanceAction):

    schema = type_schema('start')
    method_spec = {'op': 'start'}
    attr_filter = ('status', ('TERMINATED', ))
Exemplo n.º 22
0
class Resume(Action):
    """Resume a suspended autoscale group and its instances

    Parameter 'delay' is the amount of time (in seconds) to wait between
    resuming each instance within the ASG (default value: 30)

    :example:

        .. code-block: yaml

            policies:
              - name: asg-resume-processes
                resource: asg
                filters:
                  - "tag:Resume": present
                actions:
                  - type: resume
                    delay: 300
    """
    schema = type_schema('resume', delay={'type': 'number'})
    permissions = ("autoscaling:ResumeProcesses", "ec2:StartInstances")

    def process(self, asgs):
        original_count = len(asgs)
        asgs = [a for a in asgs if a['SuspendedProcesses']]
        self.delay = self.data.get('delay', 30)
        self.log.debug("Filtered from %d to %d suspended asgs", original_count,
                       len(asgs))

        with self.executor_factory(max_workers=3) as w:
            futures = {}
            for a in asgs:
                futures[w.submit(self.resume_asg_instances, a)] = a
            for f in as_completed(futures):
                if f.exception():
                    log.error(
                        "Traceback resume asg:%s instances error:%s" %
                        (futures[f]['AutoScalingGroupName'], f.exception()))
                    continue

        log.debug("Sleeping for asg health check grace")
        time.sleep(self.delay)

        with self.executor_factory(max_workers=3) as w:
            futures = {}
            for a in asgs:
                futures[w.submit(self.resume_asg, a)] = a
            for f in as_completed(futures):
                if f.exception():
                    log.error(
                        "Traceback resume asg:%s error:%s" %
                        (futures[f]['AutoScalingGroupName'], f.exception()))

    def resume_asg_instances(self, asg):
        """Resume asg instances.
        """
        session = local_session(self.manager.session_factory)
        ec2_client = session.client('ec2')
        instance_ids = [i['InstanceId'] for i in asg['Instances']]
        if not instance_ids:
            return
        ec2_client.start_instances(InstanceIds=instance_ids)

    def resume_asg(self, asg):
        """Resume asg processes.
        """
        session = local_session(self.manager.session_factory)
        asg_client = session.client('autoscaling')
        self.manager.retry(asg_client.resume_processes,
                           AutoScalingGroupName=asg['AutoScalingGroupName'])
Exemplo n.º 23
0
class Stop(InstanceAction):

    schema = type_schema('stop')
    method_spec = {'op': 'stop'}
    attr_filter = ('status', ('RUNNING', ))
Exemplo n.º 24
0
class Tag(Action):
    """Action to add a tag to an ASG

    The *propagate* parameter can be used to specify that the tag being added
    will need to be propagated down to each ASG instance associated or simply
    to the ASG itself.

    :example:

        .. code-block: yaml

            policies:
              - name: asg-add-owner-tag
                resource: asg
                filters:
                  - "tag:OwnerName": absent
                actions:
                  - type: tag
                    key: OwnerName
                    value: OwnerName
                    propagate: true
    """

    schema = type_schema(
        'tag',
        key={'type': 'string'},
        value={'type': 'string'},
        # Backwards compatibility
        tag={'type': 'string'},
        msg={'type': 'string'},
        propagate={'type': 'boolean'},
        aliases=('mark', ))
    permissions = ('autoscaling:CreateOrUpdateTags', )
    batch_size = 1

    def process(self, asgs):
        key = self.data.get('key', self.data.get('tag', DEFAULT_TAG))
        value = self.data.get(
            'value',
            self.data.get('msg',
                          'AutoScaleGroup does not meet policy guidelines'))
        return self.tag(asgs, key, value)

    def tag(self, asgs, key, value):
        error = None
        with self.executor_factory(max_workers=3) as w:
            futures = {}
            for asg_set in chunks(asgs, self.batch_size):
                futures[w.submit(self.process_asg_set, asg_set, key,
                                 value)] = asg_set
            for f in as_completed(futures):
                asg_set = futures[f]
                if f.exception():
                    self.log.exception(
                        "Exception untagging tag:%s error:%s asg:%s" %
                        (self.data.get(
                            'key', DEFAULT_TAG), f.exception(), ", ".join(
                                [a['AutoScalingGroupName'] for a in asg_set])))
        if error:
            raise error

    def process_asg_set(self, asgs, key, value):
        session = local_session(self.manager.session_factory)
        client = session.client('autoscaling')
        propagate = self.data.get('propagate_launch', True)
        tags = [
            dict(Key=key,
                 ResourceType='auto-scaling-group',
                 Value=value,
                 PropagateAtLaunch=propagate,
                 ResourceId=a['AutoScalingGroupName']) for a in asgs
        ]
        self.manager.retry(client.create_or_update_tags, Tags=tags)
Exemplo n.º 25
0
class Webhook(EventAction):
    """Calls a webhook with optional parameters and body
       populated from JMESPath queries.

        .. code-block:: yaml

          policies:
            - name: call-webhook
              resource: ec2
              description: |
                Call webhook with list of resource groups
              actions:
               - type: webhook
                 url: http://foo.com
                 query-params:
                    resource_name: resource.name
                    policy_name: policy.name
    """

    schema_alias = True
    schema = utils.type_schema(
        'webhook',
        required=['url'],
        **{
            'url': {
                'type': 'string'
            },
            'body': {
                'type': 'string'
            },
            'batch': {
                'type': 'boolean'
            },
            'batch-size': {
                'type': 'number'
            },
            'method': {
                'type': 'string',
                'enum': ['PUT', 'POST', 'GET', 'PATCH', 'DELETE']
            },
            'query-params': {
                "type": "object",
                "additionalProperties": {
                    "type": "string",
                    "description": "query string values"
                }
            },
            'headers': {
                "type": "object",
                "additionalProperties": {
                    "type": "string",
                    "description": "header values"
                }
            }
        })

    def __init__(self, data=None, manager=None, log_dir=None):
        super(Webhook, self).__init__(data, manager, log_dir)
        self.http = None
        self.url = self.data.get('url')
        self.body = self.data.get('body')
        self.batch = self.data.get('batch', False)
        self.batch_size = self.data.get('batch-size', 500)
        self.query_params = self.data.get('query-params', {})
        self.headers = self.data.get('headers', {})
        self.method = self.data.get('method', 'POST')
        self.lookup_data = None

    def process(self, resources, event=None):
        self.lookup_data = {
            'account_id': self.manager.config.account_id,
            'region': self.manager.config.region,
            'execution_id': self.manager.ctx.execution_id,
            'execution_start': self.manager.ctx.start_time,
            'policy': self.manager.data
        }

        self.http = self._build_http_manager()

        if self.batch:
            for chunk in utils.chunks(resources, self.batch_size):
                resource_data = self.lookup_data
                resource_data['resources'] = chunk
                self._process_call(resource_data)
        else:
            for r in resources:
                resource_data = self.lookup_data
                resource_data['resource'] = r
                self._process_call(resource_data)

    def _process_call(self, resource):
        prepared_url = self._build_url(resource)
        prepared_body = self._build_body(resource)
        prepared_headers = self._build_headers(resource)

        if prepared_body:
            prepared_headers['Content-Type'] = 'application/json'

        try:
            res = self.http.request(method=self.method,
                                    url=prepared_url,
                                    body=prepared_body,
                                    headers=prepared_headers)

            self.log.info("%s got response %s with URL %s" %
                          (self.method, res.status, prepared_url))
        except urllib3.exceptions.HTTPError as e:
            self.log.error("Error calling %s. Code: %s" %
                           (prepared_url, e.reason))

    def _build_http_manager(self):
        pool_kwargs = {
            'cert_reqs': 'CERT_REQUIRED',
            'ca_certs': certifi and certifi.where() or None
        }

        proxy_url = utils.get_proxy_url(self.url)
        if proxy_url:
            return urllib3.ProxyManager(proxy_url, **pool_kwargs)
        else:
            return urllib3.PoolManager(**pool_kwargs)

    def _build_headers(self, resource):
        return {
            k: jmespath.search(v, resource)
            for k, v in self.headers.items()
        }

    def _build_url(self, resource):
        """
        Compose URL with query string parameters.

        Will not lose existing static parameters in the URL string
        but does not support 'duplicate' parameter entries
        """

        if not self.query_params:
            return self.url

        evaluated_params = {
            k: jmespath.search(v, resource)
            for k, v in self.query_params.items()
        }

        url_parts = list(parse.urlparse(self.url))
        query = dict(parse.parse_qsl(url_parts[4]))
        query.update(evaluated_params)
        url_parts[4] = parse.urlencode(query)

        return parse.urlunparse(url_parts)

    def _build_body(self, resource):
        """Create a JSON body and dump it to encoded bytes."""

        if not self.body:
            return None

        return json.dumps(jmespath.search(self.body, resource)).encode('utf-8')
Exemplo n.º 26
0
class RenameTag(Action):
    """Rename a tag on an AutoScaleGroup.

    :example:

        .. code-block: yaml

            policies:
              - name: asg-rename-owner-tag
                resource: asg
                filters:
                  - "tag:OwnerNames": present
                actions:
                  - type: rename-tag
                    propagate: true
                    source: OwnerNames
                    dest: OwnerName
    """

    schema = type_schema('rename-tag',
                         required=['source', 'dest'],
                         propagate={'type': 'boolean'},
                         source={'type': 'string'},
                         dest={'type': 'string'})

    def get_permissions(self):
        permissions = ('autoscaling:CreateOrUpdateTags',
                       'autoscaling:DeleteTags')
        if self.data.get('propagate', True):
            permissions += ('ec2:CreateTags', 'ec2:DeleteTags')
        return permissions

    def process(self, asgs):
        source = self.data.get('source')
        dest = self.data.get('dest')
        count = len(asgs)

        filtered = []
        for a in asgs:
            for t in a.get('Tags'):
                if t['Key'] == source:
                    filtered.append(a)
                    break
        asgs = filtered
        self.log.info("Filtered from %d asgs to %d", count, len(asgs))
        self.log.info("Renaming %s to %s on %d asgs", source, dest,
                      len(filtered))
        with self.executor_factory(max_workers=3) as w:
            list(w.map(self.process_asg, asgs))

    def process_asg(self, asg):
        """Move source tag to destination tag.

        Check tag count on asg
        Create new tag tag
        Delete old tag
        Check tag count on instance
        Create new tag
        Delete old tag
        """
        source_tag = self.data.get('source')
        tag_map = {t['Key']: t for t in asg.get('Tags', [])}
        source = tag_map[source_tag]
        destination_tag = self.data.get('dest')
        propagate = self.data.get('propagate', True)
        client = local_session(
            self.manager.session_factory).client('autoscaling')
        # technically safer to create first, but running into
        # max tags constraints, otherwise.
        #
        # delete_first = len([t for t in tag_map if not t.startswith('aws:')])
        client.delete_tags(Tags=[{
            'ResourceId': asg['AutoScalingGroupName'],
            'ResourceType': 'auto-scaling-group',
            'Key': source_tag,
            'Value': source['Value']
        }])
        client.create_or_update_tags(
            Tags=[{
                'ResourceId': asg['AutoScalingGroupName'],
                'ResourceType': 'auto-scaling-group',
                'PropagateAtLaunch': propagate,
                'Key': destination_tag,
                'Value': source['Value']
            }])
        if propagate:
            self.propogate_instance_tag(source, destination_tag, asg)

    def propogate_instance_tag(self, source, destination_tag, asg):
        client = local_session(self.manager.session_factory).client('ec2')
        client.delete_tags(
            Resources=[i['InstanceId'] for i in asg['Instances']],
            Tags=[{
                "Key": source['Key']
            }])
        client.create_tags(
            Resources=[i['InstanceId'] for i in asg['Instances']],
            Tags=[{
                'Key': source['Key'],
                'Value': source['Value']
            }])
Exemplo n.º 27
0
class CrossAccountAccessFilter(Filter):
    """Check a resource's embedded iam policy for cross account access.
    """

    schema = type_schema(
        'cross-account',
        # only consider policies that grant one of the given actions.
        actions={
            'type': 'array',
            'items': {
                'type': 'string'
            }
        },
        # only consider policies which grant to *
        everyone_only={'type': 'boolean'},
        # disregard statements using these conditions.
        whitelist_conditions={
            'type': 'array',
            'items': {
                'type': 'string'
            }
        },
        # white list accounts
        whitelist_from=ValuesFrom.schema,
        whitelist={
            'type': 'array',
            'items': {
                'type': 'string'
            }
        },
        whitelist_vpce_from=ValuesFrom.schema,
        whitelist_vpce={
            'type': 'array',
            'items': {
                'type': 'string'
            }
        },
        whitelist_vpc_from=ValuesFrom.schema,
        whitelist_vpc={
            'type': 'array',
            'items': {
                'type': 'string'
            }
        })

    policy_attribute = 'Policy'
    annotation_key = 'CrossAccountViolations'

    checker_factory = PolicyChecker

    def process(self, resources, event=None):
        self.everyone_only = self.data.get('everyone_only', False)
        self.conditions = set(
            self.data.get('whitelist_conditions',
                          ("aws:userid", "aws:username")))
        self.actions = self.data.get('actions', ())
        self.accounts = self.get_accounts()
        self.vpcs = self.get_vpcs()
        self.vpces = self.get_vpces()
        self.checker = self.checker_factory({
            'allowed_accounts':
            self.accounts,
            'allowed_vpc':
            self.vpcs,
            'allowed_vpce':
            self.vpces,
            'check_actions':
            self.actions,
            'everyone_only':
            self.everyone_only,
            'whitelist_conditions':
            self.conditions
        })
        return super(CrossAccountAccessFilter, self).process(resources, event)

    def get_accounts(self):
        owner_id = self.manager.config.account_id
        accounts = set(self.data.get('whitelist', ()))
        if 'whitelist_from' in self.data:
            values = ValuesFrom(self.data['whitelist_from'], self.manager)
            accounts = accounts.union(values.get_values())
        accounts.add(owner_id)
        return accounts

    def get_vpcs(self):
        vpc = set(self.data.get('whitelist_vpc', ()))
        if 'whitelist_vpc_from' in self.data:
            values = ValuesFrom(self.data['whitelist_vpc_from'], self.manager)
            vpc = vpc.union(values.get_values())
        return vpc

    def get_vpces(self):
        vpce = set(self.data.get('whitelist_vpce', ()))
        if 'whitelist_vpce_from' in self.data:
            values = ValuesFrom(self.data['whitelist_vpce_from'], self.manager)
            vpce = vpce.union(values.get_values())
        return vpce

    def get_resource_policy(self, r):
        return r.get(self.policy_attribute, None)

    def __call__(self, r):
        p = self.get_resource_policy(r)
        if p is None:
            return False
        violations = self.checker.check(p)
        if violations:
            r[self.annotation_key] = violations
            return True
Exemplo n.º 28
0
class ConfigPollRuleMode(LambdaMode, PullMode):
    """This mode represents a periodic/scheduled AWS config evaluation.

    The primary benefit this mode offers is to support additional resources
    beyond what config supports natively, as it can post evaluations for
    any resource which has a cloudformation type. If a resource is natively
    supported by config its highly recommended to use a `config-rule`
    mode instead.

    This mode effectively receives no data from config, instead its
    periodically executed by config and polls and evaluates all
    resources. It is equivalent to a periodic policy, except it also
    pushes resource evaluations to config.
    """
    schema = utils.type_schema(
        'config-poll-rule',
        schedule={'enum': [
            "One_Hour",
            "Three_Hours",
            "Six_Hours",
            "Twelve_Hours",
            "TwentyFour_Hours"]},
        rinherit=LambdaMode.schema)

    def validate(self):
        super().validate()
        if not self.policy.data['mode'].get('schedule'):
            raise PolicyValidationError(
                "policy:%s config-poll-rule schedule required" % (
                    self.policy.name))
        if self.policy.resource_manager.resource_type.config_type:
            raise PolicyValidationError(
                "resource:%s fully supported by config and should use mode: config-rule" % (
                    self.policy.resource_type))
        if self.policy.data['mode'].get('pattern'):
            raise PolicyValidationError(
                "policy:%s AWS Config does not support event pattern filtering" % (
                    self.policy.name))
        if not self.policy.resource_manager.resource_type.cfn_type:
            raise PolicyValidationError((
                'policy:%s resource:%s does not have a cloudformation type'
                ' and is there-fore not supported by config-poll-rule'))

    def _get_client(self):
        return utils.local_session(
            self.policy.session_factory).client('config')

    def put_evaluations(self, client, token, evaluations):
        for eval_set in utils.chunks(evaluations, 100):
            self.policy.resource_manager.retry(
                client.put_evaluations,
                Evaluations=eval_set,
                ResultToken=token)

    def run(self, event, lambda_context):
        cfg_event = json.loads(event['invokingEvent'])
        resource_type = self.policy.resource_manager.resource_type.cfn_type
        resource_id = self.policy.resource_manager.resource_type.id
        client = self._get_client()
        token = event.get('resultToken')

        matched_resources = set()
        for r in PullMode.run(self):
            matched_resources.add(r[resource_id])
        unmatched_resources = set()
        for r in self.policy.resource_manager.get_resource_manager(
                self.policy.resource_type).resources():
            if r[resource_id] not in matched_resources:
                unmatched_resources.add(r[resource_id])

        evaluations = [dict(
            ComplianceResourceType=resource_type,
            ComplianceResourceId=r,
            ComplianceType='NON_COMPLIANT',
            OrderingTimestamp=cfg_event['notificationCreationTime'],
            Annotation='The resource is not compliant with policy:%s.' % (
                self.policy.name))
            for r in matched_resources]
        if evaluations and token:
            self.put_evaluations(client, token, evaluations)

        evaluations = [dict(
            ComplianceResourceType=resource_type,
            ComplianceResourceId=r,
            ComplianceType='COMPLIANT',
            OrderingTimestamp=cfg_event['notificationCreationTime'],
            Annotation='The resource is compliant with policy:%s.' % (
                self.policy.name))
            for r in unmatched_resources]
        if evaluations and token:
            self.put_evaluations(client, token, evaluations)
        return list(matched_resources)
Exemplo n.º 29
0
class ConfigEnabled(Filter):
    """Is config service enabled for this account

    :example:

        .. code-block: yaml

            policies:
              - name: account-check-config-services
                resource: account
                region: us-east-1
                filters:
                  - type: check-config
                    all-resources: true
                    global-resources: true
                    running: true
    """

    schema = type_schema(
        'check-config', **{
            'all-resources': {
                'type': 'boolean'
            },
            'running': {
                'type': 'boolean'
            },
            'global-resources': {
                'type': 'boolean'
            }
        })

    permissions = ('config:DescribeDeliveryChannels',
                   'config:DescribeConfigurationRecorders',
                   'config:DescribeConfigurationRecorderStatus')

    def process(self, resources, event=None):
        client = local_session(self.manager.session_factory).client('config')
        channels = client.describe_delivery_channels()['DeliveryChannels']
        recorders = client.describe_configuration_recorders(
        )['ConfigurationRecorders']
        resources[0]['c7n:config_recorders'] = recorders
        resources[0]['c7n:config_channels'] = channels
        if self.data.get('global-resources'):
            recorders = [
                r for r in recorders
                if r['recordingGroup'].get('includeGlobalResourceTypes')
            ]
        if self.data.get('all-resources'):
            recorders = [
                r for r in recorders if r['recordingGroup'].get('allSupported')
            ]
        if self.data.get('running', True) and recorders:
            status = {
                s['name']: s
                for s in client.describe_configuration_recorder_status()
                ['ConfigurationRecordersStatus']
            }
            resources[0]['c7n:config_status'] = status
            recorders = [
                r for r in recorders
                if status[r['name']]['recording'] and status[r['name']]
                ['lastStatus'].lower() in ('pending', 'success')
            ]
        if channels and recorders:
            return []
        return resources
Exemplo n.º 30
0
class Delete(InstanceAction):

    schema = type_schema('delete')
    method_spec = {'op': 'delete'}
class EnableTrail(BaseAction):
    """Enables logging on the trail(s) named in the policy

    :Example:

    .. code-block: yaml

        policies:
          - name: trail-test
            description: Ensure CloudTrail logging is enabled
            resource: account
            actions:
              - type: enable-cloudtrail
                trail: mytrail
                bucket: trails
    """

    permissions = (
        'cloudtrail:CreateTrail',
        'cloudtrail:DescribeTrails',
        'cloudtrail:GetTrailStatus',
        'cloudtrail:StartLogging',
        'cloudtrail:UpdateTrail',
        's3:CreateBucket',
        's3:GetBucketPolicy',
        's3:PutBucketPolicy',
    )
    schema = type_schema(
        'enable-cloudtrail',
        **{
            'trail': {'type': 'string'},
            'bucket': {'type': 'string'},
            'bucket-region': {'type': 'string'},
            'multi-region': {'type': 'boolean'},
            'global-events': {'type': 'boolean'},
            'notify': {'type': 'string'},
            'file-digest': {'type': 'boolean'},
            'kms': {'type': 'boolean'},
            'kms-key': {'type': 'string'},
            'required': ('bucket',),
        }
    )

    def process(self, accounts):
        """Create or enable CloudTrail"""
        session = local_session(self.manager.session_factory)
        client = session.client('cloudtrail')
        bucket_name = self.data['bucket']
        bucket_region = self.data.get('bucket-region', 'us-east-1')
        trail_name = self.data.get('trail', 'default-trail')
        multi_region = self.data.get('multi-region', True)
        global_events = self.data.get('global-events', True)
        notify = self.data.get('notify', '')
        file_digest = self.data.get('file-digest', False)
        kms = self.data.get('kms', False)
        kms_key = self.data.get('kms-key', '')

        s3client = session.client('s3')
        try:
            s3client.create_bucket(
                Bucket=bucket_name,
                CreateBucketConfiguration={'LocationConstraint': bucket_region}
            )
        except ClientError as ce:
            if not ('Error' in ce.response and
            ce.response['Error']['Code'] == 'BucketAlreadyOwnedByYou'):
                raise ce

        try:
            current_policy = s3client.get_bucket_policy(Bucket=bucket_name)
        except ClientError:
            current_policy = None

        policy_json = cloudtrail_policy(
            current_policy, bucket_name, self.manager.config.account_id)

        s3client.put_bucket_policy(Bucket=bucket_name, Policy=policy_json)
        trails = client.describe_trails().get('trailList', ())
        if trail_name not in [t.get('Name') for t in trails]:
            new_trail = client.create_trail(
                Name=trail_name,
                S3BucketName=bucket_name,
            )
            if new_trail:
                trails.append(new_trail)
                # the loop below will configure the new trail
        for trail in trails:
            if trail.get('Name') != trail_name:
                continue
            # enable
            arn = trail['TrailARN']
            status = client.get_trail_status(Name=arn)
            if not status['IsLogging']:
                client.start_logging(Name=arn)
            # apply configuration changes (if any)
            update_args = {}
            if multi_region != trail.get('IsMultiRegionTrail'):
                update_args['IsMultiRegionTrail'] = multi_region
            if global_events != trail.get('IncludeGlobalServiceEvents'):
                update_args['IncludeGlobalServiceEvents'] = global_events
            if notify != trail.get('SNSTopicArn'):
                update_args['SnsTopicName'] = notify
            if file_digest != trail.get('LogFileValidationEnabled'):
                update_args['EnableLogFileValidation'] = file_digest
            if kms_key != trail.get('KmsKeyId'):
                if not kms and 'KmsKeyId' in trail:
                    kms_key = ''
                update_args['KmsKeyId'] = kms_key
            if update_args:
                update_args['Name'] = trail_name
                client.update_trail(**update_args)
Exemplo n.º 32
0
class SetSslListenerPolicy(BaseAction):
    """Action to set the ELB SSL listener policy

    :example:

    .. code-block:: yaml

            policies:
              - name: elb-set-listener-policy
                resource: elb
                actions:
                  - type: set-ssl-listener-policy
                    name: SSLNegotiation-Policy-01
                    attributes:
                      - Protocol-SSLv3
                      - Protocol-TLSv1.1
                      - DHE-RSA-AES256-SHA256
    """

    schema = type_schema('set-ssl-listener-policy',
                         name={'type': 'string'},
                         attributes={
                             'type': 'array',
                             'items': {
                                 'type': 'string'
                             }
                         },
                         required=['name', 'attributes'])

    permissions = ('elasticloadbalancing:CreateLoadBalancerPolicy',
                   'elasticloadbalancing:SetLoadBalancerPoliciesOfListener')

    def process(self, load_balancers):
        client = local_session(self.manager.session_factory).client('elb')
        rid = self.manager.resource_type.id
        error = None

        with self.executor_factory(max_workers=2) as w:
            futures = {}
            for lb in load_balancers:
                futures[w.submit(self.process_elb, client, lb)] = lb

            for f in as_completed(futures):
                if f.exception():
                    log.error(
                        "set-ssl-listener-policy error on lb:%s error:%s",
                        futures[f][rid], f.exception())
                    error = f.exception()

        if error is not None:
            raise error

    def process_elb(self, client, elb):
        if not is_ssl(elb):
            return

        # Create a custom policy with epoch timestamp.
        # to make it unique within the
        # set of policies for this load balancer.
        policy_name = self.data.get('name') + '-' + \
            str(int(datetime.now(tz=tzutc()).strftime("%s")) * 1000)
        lb_name = elb['LoadBalancerName']
        attrs = self.data.get('attributes')
        policy_attributes = [{
            'AttributeName': attr,
            'AttributeValue': 'true'
        } for attr in attrs]

        try:
            client.create_load_balancer_policy(
                LoadBalancerName=lb_name,
                PolicyName=policy_name,
                PolicyTypeName='SSLNegotiationPolicyType',
                PolicyAttributes=policy_attributes)
        except ClientError as e:
            if e.response['Error']['Code'] not in (
                    'DuplicatePolicyName', 'DuplicatePolicyNameException',
                    'DuplicationPolicyNameException'):
                raise

        # Apply it to all SSL listeners.
        ssl_policies = ()
        if 'c7n.ssl-policies' in elb:
            ssl_policies = elb['c7n.ssl-policies']

        for ld in elb['ListenerDescriptions']:
            if ld['Listener']['Protocol'] in ('HTTPS', 'SSL'):
                policy_names = [policy_name]
                # Preserve extant non-ssl listener policies
                policy_names.extend(ld.get('PolicyNames', ()))
                # Remove extant ssl listener policy
                if ssl_policies:
                    policy_names = list(
                        set(policy_names).difference(ssl_policies))
                client.set_load_balancer_policies_of_listener(
                    LoadBalancerName=lb_name,
                    LoadBalancerPort=ld['Listener']['LoadBalancerPort'],
                    PolicyNames=policy_names)
Exemplo n.º 33
0
class FirehoseEncryptS3Destination(Action):
    """Action to set encryption key a Firehose S3 destination

    :example:

    .. code-block:: yaml

            policies:
              - name: encrypt-s3-destination
                resource: firehose
                filters:
                  - KmsMasterKeyId: absent
                actions:
                  - type: encrypt-s3-destination
                    key_arn: <arn of KMS key/alias>
    """
    schema = type_schema(
        'encrypt-s3-destination',
        key_arn={'type': 'string'}, required=('key_arn',))

    permissions = ("firehose:UpdateDestination",)

    DEST_MD = {
        'SplunkDestinationDescription': {
            'update': 'SplunkDestinationUpdate',
            'clear': ['S3BackupMode'],
            'encrypt_path': 'S3DestinationDescription.EncryptionConfiguration',
            'remap': [('S3DestinationDescription', 'S3Update')]
        },
        'ElasticsearchDestinationDescription': {
            'update': 'ElasticsearchDestinationUpdate',
            'clear': ['S3BackupMode'],
            'encrypt_path': 'S3DestinationDescription.EncryptionConfiguration',
            'remap': [('S3DestinationDescription', 'S3Update')],
        },
        'ExtendedS3DestinationDescription': {
            'update': 'ExtendedS3DestinationUpdate',
            'clear': ['S3BackupMode'],
            'encrypt_path': 'EncryptionConfiguration',
            'remap': []
        },
        'RedshiftDestinationDescription': {
            'update': 'RedshiftDestinationUpdate',
            'clear': ['S3BackupMode', "ClusterJDBCURL", "CopyCommand", "Username"],
            'encrypt_path': 'S3DestinationDescription.EncryptionConfiguration',
            'remap': [('S3DestinationDescription', 'S3Update')]
        },
    }

    def process(self, resources):
        client = local_session(self.manager.session_factory).client('firehose')
        key = self.data.get('key_arn')
        for r in resources:
            if not r['DeliveryStreamStatus'] == 'ACTIVE':
                continue
            version = r['VersionId']
            name = r['DeliveryStreamName']
            d = r['Destinations'][0]
            destination_id = d['DestinationId']

            for dtype, dmetadata in self.DEST_MD.items():
                if dtype not in d:
                    continue
                dinfo = d[dtype]
                for k in dmetadata['clear']:
                    dinfo.pop(k, None)
                if dmetadata['encrypt_path']:
                    encrypt_info = jmespath.search(dmetadata['encrypt_path'], dinfo)
                else:
                    encrypt_info = dinfo
                encrypt_info.pop('NoEncryptionConfig', None)
                encrypt_info['KMSEncryptionConfig'] = {'AWSKMSKeyARN': key}

                for old_k, new_k in dmetadata['remap']:
                    if old_k in dinfo:
                        dinfo[new_k] = dinfo.pop(old_k)
                params = dict(DeliveryStreamName=name,
                              DestinationId=destination_id,
                              CurrentDeliveryStreamVersionId=version)
                params[dmetadata['update']] = dinfo
                client.update_destination(**params)