def get_doc_policies(resources): """ Retrieve all unique policies from the list of resources. Duplicate policy is a policy that uses same name but has different set of actions and/or filters. Input a resource list. Returns policies map (name->policy) and a list of duplicate policy names. """ policies = {} duplicate_names = set() for ptext, resource_name, el_name in get_doc_examples(resources): data = yaml_load(ptext) for p in data.get('policies', []): if p['name'] in policies: if policies[p['name']] != p: duplicate_names.add(p['name']) else: policies[p['name']] = p if duplicate_names: print('If you see this error, there are some policies with the same name but different ' 'set of filters and/or actions.\n' 'Please make sure you\'re using unique names for different policies.\n') print('Duplicate policy names:') for d in duplicate_names: print('\t{0}'.format(d)) return policies, duplicate_names
def load(options, path, format='yaml', validate=True): # should we do os.path.expanduser here? if not os.path.exists(path): raise IOError("Invalid path for config %r" % path) load_resources() with open(path) as fh: if format == 'yaml': data = utils.yaml_load(fh.read()) elif format == 'json': data = utils.loads(fh.read()) validate = False # Test for empty policy file if not data or data.get('policies') is None: return None if validate: from c7n.schema import validate errors = validate(data) if errors: raise Exception("Failed to validate on policy %s \n %s" % (errors[1], errors[0])) collection = PolicyCollection.from_data(data, options) return collection
def test_putmetrics_schema(self): import jsonschema from c7n.actions import PutMetric data = yaml_load(self.EXAMPLE_EC2_POLICY) action_schema = PutMetric.schema res = jsonschema.validate(data['policies'][0]['actions'][0], action_schema) self.assertIsNone(res, "PutMetric.schema failed to validate.")
def test_schema(self): # no options stdout, stderr = self.run_and_expect_success(["custodian", "schema"]) data = yaml_load(stdout) assert data['resources'] # summary option self.run_and_expect_success(["custodian", "schema", "--summary"]) # json option self.run_and_expect_success(["custodian", "schema", "--json"]) # with just a cloud self.run_and_expect_success(["custodian", "schema", "aws"]) # with just a resource self.run_and_expect_success(["custodian", "schema", "ec2"]) # with just a mode self.run_and_expect_success(["custodian", "schema", "mode"]) # mode.type self.run_and_expect_success(["custodian", "schema", "mode.phd"]) # resource.actions self.run_and_expect_success(["custodian", "schema", "ec2.actions"]) # resource.filters self.run_and_expect_success(["custodian", "schema", "ec2.filters"]) # specific item self.run_and_expect_success( ["custodian", "schema", "ec2.filters.tag-count"])
def _get_test_policy(self, name, yaml_doc, record=False): if record: logger.warn("TestPutMetrics is RECORDING") session_factory = self.record_flight_data('test_cw_put_metrics_'+name) else: logger.debug("TestPutMetrics is replaying") session_factory = self.replay_flight_data('test_cw_put_metrics_'+name) policy = self.load_policy(yaml_load(yaml_doc)['policies'][0], session_factory=session_factory) return policy
def test_copy_related_resource_tag_multi_ref(self): session_factory = self.replay_flight_data( 'test_copy_related_resource_tag_multi_ref') client = session_factory().client('ec2') result = client.describe_volumes()['Volumes'] self.assertEqual(len(result), 1) vol = result[0] self.assertEqual(vol['Tags'], [{'Key': 'test', 'Value': 'test'}]) policy = """ name: copy-tags-from-ebs-volume-to-snapshot resource: ebs-snapshot filters: - type: value key: Tags value: empty actions: - type: copy-related-tag resource: ebs skip_missing: True key: VolumeId tags: '*' """ p = self.load_policy(yaml_load(policy), session_factory=session_factory) resources = p.run() self.assertEqual(len(resources), 3) if self.recording: time.sleep(10) all_snaps = client.describe_snapshots(OwnerIds=['self'])['Snapshots'] self.assertEqual(len(all_snaps), 3) tagged_snaps = [ e for e in all_snaps if e['VolumeId'] == vol['VolumeId'] ] untagged_snaps = [ e for e in all_snaps if e['VolumeId'] != vol['VolumeId'] ] self.assertEqual(len(tagged_snaps), 2) self.assertEqual(tagged_snaps[0]['Tags'], vol['Tags']) self.assertEqual(tagged_snaps[1]['Tags'], vol['Tags']) self.assertEqual(len(untagged_snaps), 1) self.assertTrue('Tags' not in untagged_snaps[0].keys())
def _get_test_policy(self, name, yaml_doc, record=False): if record: logger.warn("TestPutMetrics is RECORDING") session_factory = self.record_flight_data("test_cw_put_metrics_" + name) else: logger.debug("TestPutMetrics is replaying") session_factory = self.replay_flight_data("test_cw_put_metrics_" + name) policy = self.load_policy( yaml_load(yaml_doc)["policies"][0], session_factory=session_factory ) return policy
def test_usage_metric_filter(self): session_factory = self.replay_flight_data('test_service_quota') policy = yaml_load(""" name: service-quota-usage-metric resource: aws.service-quota filters: - UsageMetric: present - type: usage-metric limit: 20 """) p = self.load_policy(policy, session_factory=session_factory) resources = p.run() self.assertEqual(len(resources), 1)
def run(config, tag, bucket, account, not_bucket, not_account, debug, region): """Run across a set of accounts and buckets.""" logging.basicConfig( level=logging.INFO, format="%(asctime)s: %(name)s:%(levelname)s %(message)s") logging.getLogger('botocore').setLevel(level=logging.WARNING) if debug: def invoke(f, *args, **kw): # if f.func_name == 'process_keyset': # key_count = len(args[-1]) # print("debug skip keyset %d" % key_count) # return return f(*args, **kw) worker.invoke = invoke with open(config) as fh: data = utils.yaml_load(fh.read()) for account_info in data.get('accounts', ()): if tag and tag not in account_info.get('tags', ()): continue if account and account_info['name'] not in account: continue if not_account and account_info['name'] in not_account: continue if 'inventory' in data and 'inventory' not in account_info: account_info['inventory'] = data['inventory'] if 'visitors' in data and 'visitors' not in account_info: account_info['visitors'] = data['visitors'] if 'object-reporting' in data and 'object-reporting' not in account_info: account_info['object-reporting'] = data['object-reporting'] account_info['object-reporting'][ 'record-prefix'] = datetime.utcnow().strftime('%Y/%m/%d') if bucket: account_info['buckets'] = bucket if not_bucket: account_info['not-buckets'] = not_bucket if region: account_info['regions'] = region try: worker.invoke(worker.process_account, account_info) except Exception: if not debug: raise import pdb, traceback, sys traceback.print_exc() pdb.post_mortem(sys.exc_info()[-1]) raise
def test_service_quota_request_history_filter(self): session_factory = self.replay_flight_data('test_service_quota') policy = yaml_load(""" name: service-quota-history-filter resource: aws.service-quota filters: - type: request-history key: "[].Status" value: CASE_CLOSED op: in value_type: swap """) p = self.load_policy(policy, session_factory=session_factory) resources = p.run() self.assertTrue(resources)
def load(options, path, format='yaml', validate=True): if not os.path.exists(path): raise ValueError("Invalid path for config %r" % path) with open(path) as fh: if format == 'yaml': data = utils.yaml_load(fh.read()) elif format == 'json': data = utils.loads(fh.read()) validate = False if validate: from c7n.schema import validate errors = validate(data) if errors: raise errors[0] return PolicyCollection(data, options)
def test_sns_delete(self): session_factory = self.replay_flight_data('test_sns_delete_topic') policy = """ name: delete-sns resource: aws.sns filters: - TopicArn: arn:aws:sns:us-west-1:644160558196:test actions: - type: delete """ p = self.load_policy(yaml_load(policy), session_factory=session_factory) resources = p.run() self.assertEqual(len(resources), 1) client = session_factory().client('sns') resources = client.list_topics()['Topics'] self.assertEqual(len(resources), 0)
def test_service_quota_request_increase(self): session_factory = self.replay_flight_data('test_service_quota') policy = yaml_load(""" name: service-quota-request-increase resource: aws.service-quota filters: - QuotaCode: L-355B2B67 actions: - type: request-increase multiplier: 1.2 """) p = self.load_policy(policy, session_factory=session_factory) resources = p.run() self.assertEqual(len(resources), 1) client = local_session(session_factory).client('service-quotas') changes = client.list_requested_service_quota_change_history_by_quota( ServiceCode=resources[0]['ServiceCode'], QuotaCode=resources[0]['QuotaCode'])['RequestedQuotas'] self.assertTrue(changes)
def load(options, path, format='yaml', validate=True): # should we do os.path.expanduser here? if not os.path.exists(path): raise IOError("Invalid path for config %r" % path) load_resources() with open(path) as fh: if format == 'yaml': data = utils.yaml_load(fh.read()) elif format == 'json': data = utils.loads(fh.read()) validate = False # Test for empty policy file if not data or data.get('policies') is None: return None if validate: from c7n.schema import validate errors = validate(data) if errors: raise Exception("Failed to validate on policy %s \n %s" % (errors[1], errors[0])) return PolicyCollection(data, options)
def validate(config): """Validate a configuration file.""" with open(config) as fh: data = utils.yaml_load(fh.read()) jsonschema.validate(data, CONFIG_SCHEMA)