Beispiel #1
0
    def process(self, resources, event=None):
        if 'AWS_EBS_VOLUME_LOST' not in self.data['types']:
            return super(HealthFilter, self).process(resources, event)
        if not resources:
            return resources

        client = local_session(self.manager.session_factory).client(
            'health', region_name='us-east-1')
        f = self.get_filter_parameters()
        resource_map = {}

        paginator = client.get_paginator('describe_events')
        events = list(itertools.chain(
            *[p['events']for p in paginator.paginate(filter=f)]))
        entities = self.process_event(events)

        event_map = {e['arn']: e for e in events}
        config = local_session(self.manager.session_factory).client('config')
        for e in entities:
            rid = e['entityValue']
            if not resource_map.get(rid):
                resource_map[rid] = self.load_resource(config, rid)
            resource_map[rid].setdefault(
                'c7n:HealthEvent', []).append(event_map[e['eventArn']])
        return list(resource_map.values())
Beispiel #2
0
    def __exit__(self, exc_type=None, exc_value=None, exc_traceback=None):
        if isinstance(self.ctx.session_factory, credentials.SessionFactory):
            self.ctx.session_factory.set_subscribers(())

        # With cached sessions, we need to unregister any events subscribers
        # on extant sessions to allow for the next registration.
        utils.local_session(self.ctx.session_factory).events.unregister(
            'after-call.*.*', self._record, unique_id='c7n-api-stats')

        self.ctx.metrics.put_metric(
            "ApiCalls", sum(self.api_calls.values()), "Count")
        self.pop_snapshot()
    def test_local_session_agent_update(self):
        factory = SessionFactory('us-east-1')
        factory.policy_name = "check-ebs"
        client = local_session(factory).client('ec2')
        self.assertTrue(
            'check-ebs' in client._client_config.user_agent)

        factory.policy_name = "check-ec2"
        factory.update(local_session(factory))
        client = local_session(factory).client('ec2')
        self.assertTrue(
            'check-ec2' in client._client_config.user_agent)
Beispiel #4
0
 def process(self, resources):
     # get KeyId
     key = "alias/" + self.data.get('key')
     self.key_id = local_session(self.manager.session_factory).client(
         'kms').describe_key(KeyId=key)['KeyMetadata']['KeyId']
     client = local_session(self.manager.session_factory).client('kinesis')
     for r in resources:
         if not r['StreamStatus'] == 'ACTIVE':
             continue
         client.start_stream_encryption(
             StreamName=r['StreamName'],
             EncryptionType='KMS',
             KeyId=self.key_id
         )
Beispiel #5
0
    def get_elb_bucket_locations(self):
        session = local_session(self.manager.session_factory)
        client = session.client('elb')

        # Try to use the cache if it exists
        elbs = self.manager._cache.get(
            {'region': self.manager.config.region, 'resource': 'elb'})

        # Sigh, post query refactor reuse, we can't save our cache here
        # as that resource manager does extra lookups on tags. Not
        # worth paginating, since with cache usage we have full set in
        # mem.
        if elbs is None:
            p = client.get_paginator('describe_load_balancers')
            results = p.paginate()
            elbs = results.build_full_result().get(
                'LoadBalancerDescriptions', ())
            self.log.info("Queried %d elbs", len(elbs))
        else:
            self.log.info("Using %d cached elbs", len(elbs))

        get_elb_attrs = functools.partial(
            _query_elb_attrs, self.manager.session_factory)

        with self.executor_factory(max_workers=2) as w:
            futures = []
            for elb_set in chunks(elbs, 100):
                futures.append(w.submit(get_elb_attrs, elb_set))
            for f in as_completed(futures):
                if f.exception():
                    log.error("Error while scanning elb log targets: %s" % (
                        f.exception()))
                    continue
                for tgt in f.result():
                    yield tgt
Beispiel #6
0
    def process_elb(self, elb):
        if not is_ssl(elb):
            return

        client = local_session(self.manager.session_factory).client('elb')

        # Create a custom policy.
        attrs = self.data.get('attributes')
        # This name must be unique within the
        # set of policies for this load balancer.
        policy_name = self.data.get('name')
        lb_name = elb['LoadBalancerName']
        policy_attributes = [{'AttributeName': attr, 'AttributeValue': 'true'}
            for attr in attrs]
        client.create_load_balancer_policy(
            LoadBalancerName=lb_name,
            PolicyName=policy_name,
            PolicyTypeName='SSLNegotiationPolicyType',
            PolicyAttributes=policy_attributes)

        # Apply it to all SSL listeners.
        for ld in elb['ListenerDescriptions']:
            if ld['Listener']['Protocol'] in ('HTTPS', 'SSL'):
                client.set_load_balancer_policies_of_listener(
                    LoadBalancerName=lb_name,
                    LoadBalancerPort=ld['Listener']['LoadBalancerPort'],
                    PolicyNames=[policy_name])
Beispiel #7
0
 def process(self, resources):
     client = local_session(self.manager.session_factory).client('ec2')
     groups = super(InterfaceRemoveGroups, self).get_groups(resources)
     for idx, r in enumerate(resources):
         client.modify_network_interface_attribute(
             NetworkInterfaceId=r['NetworkInterfaceId'],
             Groups=groups[idx])
Beispiel #8
0
    def process_elb_policy_set(self, elb_policy_set):
        results = []
        client = local_session(self.manager.session_factory).client('elb')

        for (elb, policy_names) in elb_policy_set:
            elb_name = elb['LoadBalancerName']
            try:
                policies = client.describe_load_balancer_policies(
                    LoadBalancerName=elb_name,
                    PolicyNames=policy_names)['PolicyDescriptions']
            except ClientError as e:
                if e.response['Error']['Code'] == "LoadBalancerNotFound":
                    continue
                raise
            active_lb_policies = []
            for p in policies:
                if p['PolicyTypeName'] != 'SSLNegotiationPolicyType':
                    continue
                active_lb_policies.extend(
                    [policy_description['AttributeName']
                     for policy_description in
                     p['PolicyAttributeDescriptions']
                     if policy_description['AttributeValue'] == 'true']
                )
            results.append((elb, active_lb_policies))

        return results
 def process(self, resources):
     client = local_session(
         self.manager.session_factory).client('cloudsearch')
     for r in resources:
         if r['Created'] is not True or r['Deleted'] is True:
             continue
         client.delete_domain(DomainName=r['DomainName'])
Beispiel #10
0
 def _process_attributes(elb):
     if 'Attributes' not in elb:
         client = local_session(
             self.manager.session_factory).client('elb')
         results = client.describe_load_balancer_attributes(
             LoadBalancerName=elb['LoadBalancerName'])
         elb['Attributes'] = results['LoadBalancerAttributes']
Beispiel #11
0
    def process_delete_enablement(self, b):
        """Prep a bucket for deletion.

        Clear out any pending multi-part uploads.

        Disable versioning on the bucket, so deletes don't
        generate fresh deletion markers.
        """
        client = local_session(self.manager.session_factory).client('s3')

        # Suspend versioning, so we don't get new delete markers
        # as we walk and delete versions
        if (self.get_bucket_style(b) == 'versioned'
            and b['Versioning']['Status'] == 'Enabled'
                and self.data.get('remove-contents', True)):
            client.put_bucket_versioning(
                Bucket=b['Name'],
                VersioningConfiguration={'Status': 'Suspended'})
        # Clear our multi-part uploads
        uploads = client.get_paginator('list_multipart_uploads')
        for p in uploads.paginate(Bucket=b['Name']):
            for u in p.get('Uploads', ()):
                client.abort_multipart_upload(
                    Bucket=b['Name'],
                    Key=u['Key'],
                    UploadId=u['UploadId'])
Beispiel #12
0
 def __init__(self, session_factory, map_queue, reduce_queue):
     self.session_factory = session_factory
     self.map_queue = map_queue
     self.reduce_queue = reduce_queue
     self.sqs = utils.local_session(self.session_factory).client('sqs')
     self.op_sequence = self.op_sequence_start = int(random.random() * 1000000)
     self.futures = {}
Beispiel #13
0
 def process(self, resources, event=None):
     c = local_session(self.manager.session_factory).client('iam')
     results = [r for r in resources if self.has_allow_all_policy(c, r)]
     self.log.info(
         "%d of %d iam policies have allow all.",
         len(results), len(resources))
     return results
Beispiel #14
0
    def process(self, resources):
        client = local_session(self.manager.session_factory).client('iam')

        age = self.data.get('age')
        disable = self.data.get('disable')

        if age:
            threshold_date = datetime.datetime.now(tz=tzutc()) - timedelta(age)

        for r in resources:
            if 'AccessKeys' not in r:
                r['AccessKeys'] = client.list_access_keys(
                    UserName=r['UserName'])['AccessKeyMetadata']
            keys = r['AccessKeys']
            for k in keys:
                if age:
                    if not parse(k['CreateDate']) < threshold_date:
                        continue
                if disable:
                    client.update_access_key(
                        UserName=r['UserName'],
                        AccessKeyId=k['AccessKeyId'],
                        Status='Inactive')
                else:
                    client.delete_access_key(
                        UserName=r['UserName'],
                        AccessKeyId=k['AccessKeyId'])
Beispiel #15
0
    def process_instance_volumes(self, instance, volumes):
        client = local_session(self.manager.session_factory).client('ec2')

        for v in volumes:
            copy_tags = self.get_volume_tags(v, instance, v['Attachments'][0])
            if not copy_tags:
                continue
            # Can't add more tags than the resource supports could try
            # to delete extant ones inline, else trim-tags action.
            if len(copy_tags) > 10:
                log.warning(
                    "action:%s volume:%s instance:%s too many tags to copy" % (
                        self.__class__.__name__.lower(),
                        v['VolumeId'], instance['InstanceId']))
                continue

            try:
                client.create_tags(
                    Resources=[v['VolumeId']],
                    Tags=copy_tags,
                    DryRun=self.manager.config.dryrun)
            except ClientError as e:
                if e.response['Error']['Code'] == "InvalidVolume.NotFound":
                    continue
                raise
Beispiel #16
0
    def process(self, volumes):
        original_count = len(volumes)
        volumes = [v for v in volumes
                   if not v['Encrypted'] or not v['Attachments']]
        log.debug(
            "EncryptVolumes filtered from %d to %d "
            " unencrypted attached volumes" % (
                original_count, len(volumes)))

        # Group volumes by instance id
        instance_vol_map = {}
        for v in volumes:
            instance_id = v['Attachments'][0]['InstanceId']
            instance_vol_map.setdefault(instance_id, []).append(v)

        # Query instances to find current instance state
        self.instance_map = {
            i['InstanceId']: i for i in query_instances(
                local_session(self.manager.session_factory),
                InstanceIds=instance_vol_map.keys())}

        with self.executor_factory(max_workers=10) as w:
            futures = {}
            for instance_id, vol_set in instance_vol_map.items():
                futures[w.submit(
                    self.process_volume, instance_id, vol_set)] = instance_id

            for f in as_completed(futures):
                if f.exception():
                    instance_id = futures[f]
                    log.error(
                        "Exception processing instance:%s volset: %s \n %s" % (
                            instance_id, instance_vol_map[instance_id],
                            f.exception()))
Beispiel #17
0
 def process_rds_snapshot(self, resource):
     c = local_session(self.manager.session_factory).client('rds')
     c.create_db_snapshot(
         DBSnapshotIdentifier="Backup-%s-%s" % (
             resource['DBInstanceIdentifier'],
             resource['Engine']),
         DBInstanceIdentifier=resource['DBInstanceIdentifier'])
Beispiel #18
0
    def resources(self, query=None):
        client = local_session(self.manager.session_factory).client('config')
        paginator = client.get_paginator('list_discovered_resources')
        pages = paginator.paginate(
            resourceType=self.manager.get_model().config_type)
        results = []

        with self.manager.executor_factory(max_workers=5) as w:
            ridents = pager(pages, self.retry)
            resource_ids = [
                r['resourceId'] for r in ridents.get('resourceIdentifiers', ())]
            self.manager.log.debug(
                "querying %d %s resources",
                len(resource_ids),
                self.manager.__class__.__name__.lower())

            for resource_set in chunks(resource_ids, 50):
                futures = []
                futures.append(w.submit(self.get_resources, resource_set))
                for f in as_completed(futures):
                    if f.exception():
                        self.manager.log.error(
                            "Exception getting resources from config \n %s" % (
                                f.exception()))
                    results.extend(f.result())
        return results
 def process_cluster_snapshot(self, cluster):
     c = local_session(self.manager.session_factory).client('rds')
     c.create_db_cluster_snapshot(
         DBClusterSnapshotIdentifier=snapshot_identifier(
             'Backup',
             cluster['DBClusterIdentifier']),
         DBClusterIdentifier=cluster['DBClusterIdentifier'])
Beispiel #20
0
 def process_resource_set(self, resources, tags):
     client = local_session(self.manager.session_factory).client('rds')
     for r in resources:
         arn = "arn:aws:rds:%s:%s:db:%s" % (
             self.manager.config.region, self.manager.account_id,
             r['DBInstanceIdentifier'])
         client.add_tags_to_resource(ResourceName=arn, Tags=tags)
    def process(self, clusters):
        skip = self.data.get('skip-snapshot', False)
        delete_instances = self.data.get('delete-instances', True)
        client = local_session(self.manager.session_factory).client('rds')

        for cluster in clusters:
            if delete_instances:
                for instance in cluster.get('DBClusterMembers', []):
                    client.delete_db_instance(
                        DBInstanceIdentifier=instance['DBInstanceIdentifier'],
                        SkipFinalSnapshot=True)
                    self.log.info(
                        'Deleted RDS instance: %s',
                        instance['DBInstanceIdentifier'])

            params = {'DBClusterIdentifier': cluster['DBClusterIdentifier']}
            if skip:
                params['SkipFinalSnapshot'] = True
            else:
                params['FinalDBSnapshotIdentifier'] = snapshot_identifier(
                    'Final', cluster['DBClusterIdentifier'])
            try:
                client.delete_db_cluster(**params)
            except ClientError as e:
                if e.response['Error']['Code'] == 'InvalidDBClusterStateFault':
                    self.log.info(
                        'RDS cluster in invalid state: %s',
                        cluster['DBClusterIdentifier'])
                    continue
                raise

            self.log.info(
                'Deleted RDS cluster: %s',
                cluster['DBClusterIdentifier'])
 def set_retention_window(self, cluster, retention):
     c = local_session(self.manager.session_factory).client('rds')
     c.modify_db_cluster(
         DBClusterIdentifier=cluster['DBClusterIdentifier'],
         BackupRetentionPeriod=retention,
         PreferredBackupWindow=cluster['PreferredBackupWindow'],
         PreferredMaintenanceWindow=cluster['PreferredMaintenanceWindow'])
Beispiel #23
0
 def process(self, resources):
     client = local_session(self.manager.session_factory).client('eks')
     for r in resources:
         try:
             client.delete_cluster(name=r['name'])
         except client.exceptions.ResourceNotFoundException:
             continue
Beispiel #24
0
 def process(self, resources, event=None):
     client = local_session(
         self.manager.session_factory).client('config')
     channels = client.describe_delivery_channels()[
         'DeliveryChannels']
     recorders = client.describe_configuration_recorders()[
         'ConfigurationRecorders']
     resources[0]['config_recorders'] = recorders
     resources[0]['config_channels'] = channels
     if self.data.get('global-resources'):
         recorders = [r for r in recorders
                      if r['recordingGroup'].get('includeGlobalResources')]
     if self.data.get('all-resources'):
         recorders = [r for r in recorders
                      if r['recordingGroup'].get('allSupported')]
     if self.data.get('running', True) and recorders:
         status = {s['name']: s for
                   s in client.describe_configuration_recorder_status(
                   )['ConfigurationRecordersStatus']}
         resources[0]['config_status'] = status
         recorders = [r for r in recorders
                      if status[r['name']]['recording']
                      and status[r['name']]['lastStatus'].lower() in (
                          'pending', 'success')]
     if channels and recorders:
         return []
     return resources
Beispiel #25
0
 def resources(self):
     session = local_session(self.session_factory)
     client = session.client('iam')
     return self.filter_resources(
         [{'account_id': get_account_id(session),
           'account_name': client.list_account_aliases(
           ).get('AccountAliases', ('',))[0]}])
Beispiel #26
0
 def process(self, resources):
     client = local_session(
         self.manager.session_factory).client('kinesisanalytics')
     for r in resources:
         client.delete_application(
             ApplicationName=r['ApplicationName'],
             CreateTimestamp=r['CreateTimestamp'])
Beispiel #27
0
def _scalar_augment(manager, model, detail_spec, resource_set):
    detail_op, param_name, param_key, detail_path = detail_spec
    client = local_session(manager.session_factory).client(
        model.service, region_name=manager.config.region)
    op = getattr(client, detail_op)
    if manager.retry:
        args = (op,)
        op = manager.retry
    else:
        args = ()
    results = []
    for r in resource_set:
        kw = {param_name: param_key and r[param_key] or r}
        response = op(*args, **kw)
        if detail_path:
            response = response[detail_path]
        else:
            response.pop('ResponseMetadata')
        if param_key is None:
            response[model.id] = r
            r = response
        else:
            r.update(response)
        results.append(r)
    return results
Beispiel #28
0
    def process_asg(self, asg):
        """Move source tag to destination tag.

        Check tag count on asg
        Create new tag tag
        Delete old tag
        Check tag count on instance
        Create new tag
        Delete old tag
        """
        source_tag = self.data.get('source')
        tag_map = {t['Key']: t for t in asg.get('Tags', [])}
        source = tag_map[source_tag]
        destination_tag = self.data.get('dest')
        propagate = self.data.get('propagate', True)        
        client = local_session(
            self.manager.session_factory).client('autoscaling')
        # technically safer to create first, but running into
        # max tags constraints, otherwise.
        #
        # delete_first = len([t for t in tag_map if not t.startswith('aws:')])
        client.delete_tags(Tags=[
            {'ResourceId': 'tags-auto-scaling-group',
             'ResourceType': 'auto-scaling-group',
             'Key': source_tag,
             'Value': source['Value']}])
        client.create_or_update_tags(Tags=[
            {'ResourceId': 'tags-auto-scaling-group',
             'ResourceType': 'auto-scaling-group',
             'PropagateAtLaunch': propagate,
             'Key': destination_tag,
             'Value': source['Value']}])
        self.propogate_instance_tag(source, destination_tag, asg)
Beispiel #29
0
 def process(self, resources, event=None):
     client = local_session(
         self.manager.session_factory).client('cloudtrail')
     trails = client.describe_trails()['trailList']
     resources[0]['cloudtrails'] = trails
     if self.data.get('global-events'):
         trails = [t for t in trails if t.get('IncludeGlobalServiceEvents')]
     if self.data.get('kms'):
         trails = [t for t in trails if t.get('KmsKeyId')]
     if self.data.get('kms-key'):
         trails = [t for t in trails
                   if t.get('KmsKeyId', '') == self.data['kms-key']]
     if self.data.get('file-digest'):
         trails = [t for t in trails
                   if t.get('LogFileValidationEnabled')]
     if self.data.get('multi-region'):
         trails = [t for t in trails if t.get('IsMultiRegionTrail')]
     if self.data.get('notifies'):
         trails = [t for t in trails if t.get('SNSTopicArn')]
     if self.data.get('running', True):
         running = []
         for t in list(trails):
             t['Status'] = status = client.get_trail_status(
                 Name=t['TrailARN'])
             if status['IsLogging'] and not status.get(
                     'LatestDeliveryError'):
                 running.append(t)
         trails = running
     if trails:
         return []
     return resources
Beispiel #30
0
 def get_cloud_trail_locations(self, buckets):
     session = local_session(self.manager.session_factory)
     client = session.client('cloudtrail')
     names = set([b['Name'] for b in buckets])
     for t in client.describe_trails().get('trailList', ()):
         if t.get('S3BucketName') in names:
             yield (t['S3BucketName'], t.get('S3KeyPrefix', ''))
Beispiel #31
0
 def process(self, repositories):
     client = local_session(
         self.manager.session_factory).client('codecommit')
     for r in repositories:
         self.process_repository(client, r)
Beispiel #32
0
 def process(self, resources):
     client = utils.local_session(
         self.manager.session_factory).client('apigateway')
     for r in resources:
         client.update_rest_api(restApiId=r['id'],
                                patchOperations=self.data['patch'])
Beispiel #33
0
 def initialize(self, asgs):
     super(NotEncryptedFilter, self).initialize(asgs)
     ec2 = local_session(self.manager.session_factory).client('ec2')
     self.unencrypted_images = self.get_unencrypted_images(ec2)
     self.unencrypted_configs = self.get_unencrypted_configs(ec2)
Beispiel #34
0
 def process(self, distributions):
     client = local_session(self.manager.session_factory).client(
         self.manager.get_model().service)
     for d in distributions:
         self.process_distribution(client, d)
Beispiel #35
0
 def process_elb(self, elb):
     client = local_session(self.manager.session_factory).client('elb')
     client.delete_load_balancer(LoadBalancerName=elb['LoadBalancerName'])
Beispiel #36
0
 def process_resource_set(self, resource_set, tags):
     client = local_session(self.manager.session_factory).client('elb')
     client.add_tags(
         LoadBalancerNames=[r['LoadBalancerName'] for r in resource_set],
         Tags=tags)
Beispiel #37
0
 def _get_client(self):
     return utils.local_session(
         self.policy.session_factory).client('config')
Beispiel #38
0
 def _get_key_vault_client_string(self):
     client = local_session(Session) \
         .client('azure.mgmt.keyvault.KeyVaultManagementClient').vaults
     return client.__module__ + '.' + client.__class__.__name__
Beispiel #39
0
 def process(self, load_balancers):
     client = local_session(self.manager.session_factory).client('elb')
     for elb in load_balancers:
         self.manager.retry(client.delete_load_balancer,
                            LoadBalancerName=elb['LoadBalancerName'])
Beispiel #40
0
def get_account(session_factory):
    session = local_session(session_factory)
    client = session.client('iam')
    aliases = client.list_account_aliases().get('AccountAliases', ('', ))
    name = aliases and aliases[0] or ""
    return {'account_id': get_account_id(session), 'account_name': name}
Beispiel #41
0
 def process_resource_set(self, resource_set, tag_keys):
     client = local_session(self.manager.session_factory).client('elb')
     client.remove_tags(
         LoadBalancerNames=[r['LoadBalancerName'] for r in resource_set],
         Tags=[{'Key': k
                for k in tag_keys}])
Beispiel #42
0
    def test_account_password_policy_update(self):
        factory = self.replay_flight_data("test_account_password_policy_update")
        p = self.load_policy(
            {
                "name": "set-password-policy",
                "resource": "account",
                "filters": [
                    {
                        "or": [
                            {
                                "not": [
                                    {
                                        "type": "password-policy",
                                        "key": "MinimumPasswordLength",
                                        "value": 12,
                                        "op": "ge"
                                    },
                                    {
                                        "type": "password-policy",
                                        "key": "RequireSymbols",
                                        "value": True
                                    },
                                    {
                                        "type": "password-policy",
                                        "key": "RequireNumbers",
                                        "value": True
                                    }
                                ]
                            }
                        ]
                    }
                ],
                "actions": [
                    {
                        "type": "set-password-policy",
                        "policy": {
                            "MinimumPasswordLength": 12,
                            "RequireSymbols": True,
                            "RequireNumbers": True
                        }
                    }
                ]
            },
            session_factory=factory,
        )

        resources = p.run()
        self.assertEqual(len(resources), 1)
        client = local_session(factory).client('iam')
        policy = client.get_account_password_policy().get('PasswordPolicy')
        self.assertEqual(
            [
                policy['MinimumPasswordLength'],
                policy['RequireSymbols'],
                policy['RequireNumbers'],
            ],
            [
                12,
                True,
                True,
            ]
        )
Beispiel #43
0
 def get_client(self):
     return utils.local_session(
         self.manager.session_factory).client('resourcegroupstaggingapi')
Beispiel #44
0
 def get_client(self):
     return local_session(self.session_factory).client(
         self.resource_type.service, self.resource_type.version,
         self.resource_type.component)
Beispiel #45
0
 def process_snapshot_set(self, snapshots_set):
     c = local_session(self.manager.session_factory).client('redshift')
     for s in snapshots_set:
         c.delete_cluster_snapshot(
             SnapshotIdentifier=s['SnapshotIdentifier'],
             SnapshotClusterIdentifier=s['ClusterIdentifier'])
Beispiel #46
0
 def get_client(self):
     return utils.local_session(self.manager.session_factory).client(
         self.manager.resource_type.service)
Beispiel #47
0
 def set_retention_window(self, cluster, retention):
     c = local_session(self.manager.session_factory).client('redshift')
     c.modify_cluster(ClusterIdentifier=cluster['ClusterIdentifier'],
                      AutomatedSnapshotRetentionPeriod=retention)
Beispiel #48
0
 def set_access(self, c):
     client = local_session(self.manager.session_factory).client('redshift')
     client.modify_cluster(ClusterIdentifier=c['ClusterIdentifier'],
                           PubliclyAccessible=self.data.get('state', False))
Beispiel #49
0
 def process(self, resources):
     client = local_session(self.manager.session_factory).client('ecs')
     for r in resources:
         self.process_instance(client, r.get('c7n:cluster'),
                               r.get('containerInstanceArn'))
Beispiel #50
0
 def get_session(self):
     if self._session is None:
         self._session = local_session(self.session_factory)
     return self._session
Beispiel #51
0
 def _user_mfa_devices(resource):
     client = local_session(self.manager.session_factory).client('iam')
     resource['MFADevices'] = client.list_mfa_devices(
         UserName=resource['UserName'])['MFADevices']
Beispiel #52
0
 def process(self, resources, events=None):
     c = local_session(self.manager.session_factory).client('iam')
     if self.data.get('value', True):
         return [r for r in resources if self._inline_policies(c, r) > 0]
     return [r for r in resources if self._inline_policies(c, r) == 0]
Beispiel #53
0
 def _user_policies(resource):
     client = local_session(self.manager.session_factory).client('iam')
     resource['AttachedPolicies'] = client.list_attached_user_policies(
         UserName=resource['UserName'])['AttachedPolicies']
Beispiel #54
0
 def _user_keys(resource):
     client = local_session(self.manager.session_factory).client('iam')
     resource['AccessKeys'] = client.list_access_keys(
         UserName=resource['UserName'])['AccessKeyMetadata']
Beispiel #55
0
 def process(self, resources):
     client = local_session(self.manager.session_factory).client('efs')
     self.unmount_filesystems(resources)
     retry = get_retry(('FileSystemInUse',), 12)
     for r in resources:
         retry(client.delete_file_system, FileSystemId=r['FileSystemId'])
 def _augment(r):
     client = local_session(self.session_factory).client('sagemaker')
     tags = client.list_tags(
         ResourceArn=r['NotebookInstanceArn'])['Tags']
     r.setdefault('Tags', []).extend(tags)
     return r
Beispiel #57
0
 def process(self, resources, event=None):
     self.client = local_session(
         self.manager.session_factory).client('secretsmanager')
     return super(CrossAccountAccessFilter, self).process(resources)
Beispiel #58
0
 def get_image_mapping(self, resources):
     ec2 = utils.local_session(self.manager.session_factory).client('ec2')
     image_ids = set([i['ImageId'] for i in resources])
     results = ec2.describe_images(ImageIds=list(image_ids))
     return {i['ImageId']: i for i in results['Images']}
Beispiel #59
0
 def __init__(self, session_factory, region):
     self.session_factory = session_factory
     self.session = local_session(session_factory)
     self.client = self.session.client('cloudfunctions', 'v1',
                                       'projects.locations.functions')
     self.region = region
Beispiel #60
0
 def process(self, resources):
     client = utils.local_session(
         self.manager.session_factory).client('apigateway')
     client.update_account(patchOperations=self.data['patch'])