def run(event, context): # policies file should always be valid in functions so do loading naively with open(context['config_file']) as f: policy_config = json.load(f) if not policy_config or not policy_config.get('policies'): log.error('Invalid policy config') return False options_overrides = \ policy_config['policies'][0].get('mode', {}).get('execution-options', {}) # setup our auth file location on disk options_overrides['authorization_file'] = context['auth_file'] # if output_dir specified use that, otherwise make a temp directory if 'output_dir' not in options_overrides: options_overrides['output_dir'] = get_tmp_output_dir() # merge all our options in options = Config.empty(**options_overrides) load_resources() options = Azure().initialize(options) policies = PolicyCollection.from_data(policy_config, options) if policies: for p in policies: try: p.push(event, context) except (CloudError, AzureHttpError) as error: log.error("Unable to process policy: %s :: %s" % (p.name, error)) return True
def main(): parser = setup_parser() options = parser.parse_args() config = Config.empty() resources.load_resources() collection = policy_load( config, options.config_file).filter(options.policy_filter) sam = { 'AWSTemplateFormatVersion': '2010-09-09', 'Transform': 'AWS::Serverless-2016-10-31', 'Resources': {}} for p in collection: if p.provider_name != 'aws': continue exec_mode_type = p.data.get('mode', {'type': 'pull'}).get('type') if exec_mode_type == 'pull': continue sam_func = render(p) if sam_func: sam['Resources'][resource_name(p.name)] = sam_func sam_func['Properties']['CodeUri'] = './%s.zip' % p.name else: print("unable to render sam for policy:%s" % p.name) continue archive = mu.PolicyLambda(p).get_archive() with open(os.path.join(options.output_dir, "%s.zip" % p.name), 'wb') as fh: fh.write(archive.get_bytes()) with open(os.path.join(options.output_dir, 'deploy.yml'), 'w') as fh: fh.write(yaml.safe_dump(sam, default_flow_style=False))
def init(config, use, debug, verbose, accounts, tags, policies, resource=None, policy_tags=()): level = verbose and logging.DEBUG or logging.INFO logging.basicConfig( level=level, format="%(asctime)s: %(name)s:%(levelname)s %(message)s") logging.getLogger('botocore').setLevel(logging.ERROR) logging.getLogger('s3transfer').setLevel(logging.WARNING) logging.getLogger('custodian.s3').setLevel(logging.ERROR) logging.getLogger('urllib3').setLevel(logging.WARNING) # Filter out custodian log messages on console output if not # at warning level or higher, see LogFilter docs and #2674 for h in logging.getLogger().handlers: if isinstance(h, logging.StreamHandler): h.addFilter(LogFilter()) with open(config) as fh: accounts_config = yaml.safe_load(fh.read()) jsonschema.validate(accounts_config, CONFIG_SCHEMA) if use: with open(use) as fh: custodian_config = yaml.safe_load(fh.read()) else: custodian_config = {} accounts_config['accounts'] = list(accounts_iterator(accounts_config)) filter_policies(custodian_config, policy_tags, policies, resource) filter_accounts(accounts_config, tags, accounts) load_resources() MainThreadExecutor.c7n_async = False executor = debug and MainThreadExecutor or ProcessPoolExecutor return accounts_config, custodian_config, executor
def init(config, use, debug, verbose, accounts, tags, policies, resource=None, policy_tags=()): level = verbose and logging.DEBUG or logging.INFO logging.basicConfig( level=level, format="%(asctime)s: %(name)s:%(levelname)s %(message)s") logging.getLogger('botocore').setLevel(logging.ERROR) logging.getLogger('custodian').setLevel(logging.WARNING) logging.getLogger('custodian.s3').setLevel(logging.ERROR) logging.getLogger('urllib3').setLevel(logging.WARNING) with open(config) as fh: accounts_config = yaml.safe_load(fh.read()) jsonschema.validate(accounts_config, CONFIG_SCHEMA) if use: with open(use) as fh: custodian_config = yaml.safe_load(fh.read()) else: custodian_config = {} accounts_config['accounts'] = list(accounts_iterator(accounts_config)) filter_policies(custodian_config, policy_tags, policies, resource) filter_accounts(accounts_config, tags, accounts) load_resources() MainThreadExecutor.c7n_async = False executor = debug and MainThreadExecutor or ProcessPoolExecutor return accounts_config, custodian_config, executor
def schema_completer(prefix): """ For tab-completion via argcomplete, return completion options. For the given prefix so far, return the possible options. Note that filtering via startswith happens after this list is returned. """ load_resources() components = prefix.split('.') # Completions for resource if len(components) == 1: choices = [r for r in resources.keys() if r.startswith(prefix)] if len(choices) == 1: choices += ['{}{}'.format(choices[0], '.')] return choices if components[0] not in resources.keys(): return [] # Completions for category if len(components) == 2: choices = ['{}.{}'.format(components[0], x) for x in ('actions', 'filters') if x.startswith(components[1])] if len(choices) == 1: choices += ['{}{}'.format(choices[0], '.')] return choices # Completions for item elif len(components) == 3: resource_mapping = schema.resource_vocabulary() return ['{}.{}.{}'.format(components[0], components[1], x) for x in resource_mapping[components[0]][components[1]]] return []
def load(options, path, format='yaml', validate=True, vars=None): # should we do os.path.expanduser here? if not os.path.exists(path): raise IOError("Invalid path for config %r" % path) load_resources() data = utils.load_file(path, format=format, vars=vars) if format == 'json': validate = False if isinstance(data, list): log.warning('yaml in invalid format. The "policies:" line is probably missing.') return None # Test for empty policy file if not data or data.get('policies') is None: return None if validate: from c7n.schema import validate errors = validate(data) if errors: raise Exception( "Failed to validate on policy %s \n %s" % ( errors[1], errors[0])) collection = PolicyCollection.from_data(data, options) if validate: # non schema validation of policies [p.validate() for p in collection] return collection
def main(): logging.basicConfig(level=logging.INFO) logging.getLogger('botocore').setLevel(logging.WARNING) parser = setup_parser() options = parser.parse_args() options.log_group = None options.cache = None factory = SessionFactory( options.region, options.profile, options.assume_role) session = factory() client = session.client('cloudwatch') load_resources() policies = load(options, options.config) if options.start and options.end: start = options.start end = options.end elif options.days: end = datetime.utcnow() start = end - timedelta(options.days) data = {} for p in policies: logging.info('Getting %s metrics', p) data[p.name] = p.get_metrics(start, end, options.period) print dumps(data, indent=2)
def dispatch_event(event, context): error = event.get('detail', {}).get('errorCode') if error: log.debug("Skipping failed operation: %s" % error) return event['debug'] = True if event['debug']: log.info("Processing event\n %s", format_event(event)) # policies file should always be valid in lambda so do loading naively with open('config.json') as f: policy_config = json.load(f) if not policy_config or not policy_config.get('policies'): return False # TODO. This enshrines an assumption of a single policy per lambda. options_overrides = policy_config[ 'policies'][0].get('mode', {}).get('execution-options', {}) options = Config.empty(**options_overrides) load_resources() policies = PolicyCollection.from_data(policy_config, options) if policies: for p in policies: p.push(event, context) return True
def json_dump(resource=None): load_resources() try: print(json.dumps(generate(resource), indent=2)) except: import traceback, pdb, sys traceback.print_exc() pdb.post_mortem(sys.exc_info()[-1])
def index_resources( config, policies, date=None, concurrency=5, accounts=None, tag=None, verbose=False): """index policy resources""" logging.basicConfig(level=(verbose and logging.DEBUG or logging.INFO)) logging.getLogger('botocore').setLevel(logging.WARNING) logging.getLogger('elasticsearch').setLevel(logging.WARNING) logging.getLogger('urllib3').setLevel(logging.WARNING) logging.getLogger('requests').setLevel(logging.WARNING) logging.getLogger('c7n.worker').setLevel(logging.INFO) # validating the config and policy files. with open(config) as fh: config = yaml.safe_load(fh.read()) jsonschema.validate(config, CONFIG_SCHEMA) with open(policies) as fh: policies = yaml.safe_load(fh.read()) load_resources() schema.validate(policies) date = valid_date(date, delta=1) with ProcessPoolExecutor(max_workers=concurrency) as w: futures = {} jobs = [] for account in config.get('accounts'): if accounts and account['name'] not in accounts: continue if tag: found = False for t in account['tags'].values(): if tag == t: found = True break if not found: continue for region in account.get('regions'): for policy in policies.get('policies'): p = (config, account, region, policy, date) jobs.append(p) for j in jobs: log.debug("submit account:{} region:{} policy:{} date:{}".format( j[1]['name'], j[2], j[3]['name'], j[4])) futures[w.submit(index_account_resources, *j)] = j # Process completed for f in as_completed(futures): config, account, region, policy, date = futures[f] if f.exception(): log.warning( "error account:{} region:{} policy:{} error:{}".format( account['name'], region, policy['name'], f.exception())) continue log.info("complete account:{} region:{} policy:{}".format( account['name'], region, policy['name']))
def validate(options): load_resources() if len(options.configs) < 1: log.error('no config files specified') sys.exit(1) used_policy_names = set() schm = schema.generate() errors = [] for config_file in options.configs: config_file = os.path.expanduser(config_file) if not os.path.exists(config_file): raise ValueError("Invalid path for config %r" % config_file) options.dryrun = True fmt = config_file.rsplit('.', 1)[-1] with open(config_file) as fh: if fmt in ('yml', 'yaml'): data = yaml.safe_load(fh.read()) elif fmt in ('json',): data = json.load(fh) else: log.error("The config file must end in .json, .yml or .yaml.") raise ValueError("The config file must end in .json, .yml or .yaml.") errors += schema.validate(data, schm) conf_policy_names = { p.get('name', 'unknown') for p in data.get('policies', ())} dupes = conf_policy_names.intersection(used_policy_names) if len(dupes) >= 1: errors.append(ValueError( "Only one policy with a given name allowed, duplicates: %s" % ( ", ".join(dupes) ) )) used_policy_names = used_policy_names.union(conf_policy_names) if not errors: null_config = Config.empty(dryrun=True, account_id='na', region='na') for p in data.get('policies', ()): try: policy = Policy(p, null_config, Bag()) policy.validate() except Exception as e: msg = "Policy: %s is invalid: %s" % ( p.get('name', 'unknown'), e) errors.append(msg) if not errors: log.info("Configuration valid: {}".format(config_file)) continue log.error("Configuration invalid: {}".format(config_file)) for e in errors: log.error("%s" % e) if errors: sys.exit(1)
def __init__(self, ctx, management_group_id, env=[]): super(ManagementGroupDeployment, self).__init__(ctx, default_environment={e[0]: e[1] for e in env}) self.management_group_id = management_group_id load_resources() self.session = local_session(Session) self.run()
def init(provider): global INITIALIZED if INITIALIZED: return load_resources() CustodianDirective.vocabulary = resource_vocabulary() CustodianDirective.definitions = generate_schema()['definitions'] CustodianDirective.env = env = get_environment(provider) INITIALIZED = True return env
def stream(repo_uri, stream_uri, verbose, assume, sort, before=None, after=None): """Stream git history policy changes to destination. Default stream destination is a summary of the policy changes to stdout, one per line. Also supported for stdout streaming is `jsonline`. AWS Kinesis and SQS destinations are specified by providing the ARN. Database destinations are supported by providing a sqlalchemy DSN. Note SQLAlchemy and db drivers must be installed separately as they an optional dependency. When using database destinations, streaming defaults to incremental. """ logging.basicConfig( format="%(asctime)s: %(name)s:%(levelname)s %(message)s", level=(verbose and logging.DEBUG or logging.INFO)) logging.getLogger('botocore').setLevel(logging.WARNING) if before: before = parse(before) if after: after = parse(after) if sort: sort = six.moves.reduce(operator.or_, [SORT_TYPE[s] for s in sort]) with contextlib.closing(TempDir().open()) as temp_dir: if repo_uri is None: repo_uri = pygit2.discover_repository(os.getcwd()) log.debug("Using repository %s", repo_uri) if repo_uri.startswith('http') or repo_uri.startswith('git@'): log.info("Cloning repository: %s", repo_uri) repo = pygit2.clone_repository(repo_uri, temp_dir.path) else: repo = pygit2.Repository(repo_uri) load_resources() policy_repo = PolicyRepo(repo_uri, repo) change_count = 0 with contextlib.closing(transport(stream_uri, assume)) as t: if after is None and isinstance(t, IndexedTransport): after = t.last() for change in policy_repo.delta_stream(after=after, before=before): change_count += 1 t.send(change) log.info("Streamed %d policy repo changes", change_count) return change_count
def validate(options): load_resources() if options.config is not None: # support the old -c option options.configs.append(options.config) if len(options.configs) < 1: # no configs to test # We don't have the parser object, so fake ArgumentParser.error print('custodian validate: error: no config files specified', file=sys.stderr) sys.exit(2) used_policy_names = set() schm = schema.generate() errors = [] for config_file in options.configs: config_file = os.path.expanduser(config_file) if not os.path.exists(config_file): raise ValueError("Invalid path for config %r" % config_file) options.dryrun = True format = config_file.rsplit('.', 1)[-1] with open(config_file) as fh: if format in ('yml', 'yaml'): data = yaml.safe_load(fh.read()) if format in ('json',): data = json.load(fh) errors = schema.validate(data, schm) conf_policy_names = {p['name'] for p in data.get('policies', ())} dupes = conf_policy_names.intersection(used_policy_names) if len(dupes) >= 1: errors.append(ValueError( "Only one policy with a given name allowed, duplicates: %s" % ( ", ".join(dupes) ) )) used_policy_names = used_policy_names.union(conf_policy_names) if not errors: null_config = Bag(dryrun=True, log_group=None, cache=None, assume_role="na") for p in data.get('policies', ()): try: Policy(p, null_config, Bag()) except Exception as e: msg = "Policy: %s is invalid: %s" % ( p.get('name', 'unknown'), e) errors.append(msg) if not errors: log.info("Configuration valid: {}".format(config_file)) continue log.error("Configuration invalid: {}".format(config_file)) for e in errors: log.error(" %s" % e) if errors: sys.exit(1)
def test_check_permissions(self): load_resources(('gcp.*', )) missing = [] invalid = [] iam_path = os.path.join(os.path.dirname(__file__), 'data', 'iam-permissions.json') with open(iam_path) as fh: valid_perms = set(json.load(fh).get('permissions')) cfg = Config.empty() for k, v in resources.items(): policy = Bag({ 'name': 'permcheck', 'resource': 'gcp.%s' % k, 'provider_name': 'gcp' }) ctx = self.get_context(config=cfg, policy=policy) mgr = v(ctx, policy) perms = mgr.get_permissions() if not perms: missing.append(k) for p in perms: if p not in valid_perms: invalid.append((k, p)) for n, a in list(v.action_registry.items()): if n in ALLOWED_NOPERM: continue policy['actions'] = [n] perms = a({}, mgr).get_permissions() if not perms: missing.append('%s.actions.%s' % (k, n)) for p in perms: if p not in valid_perms: invalid.append(('%s.actions.%s' % (k, n), p)) for n, f in list(v.filter_registry.items()): if n in ALLOWED_NOPERM: continue policy['filters'] = [n] perms = f({}, mgr).get_permissions() if not perms: missing.append('%s.filters.%s' % (k, n)) for p in perms: if p not in valid_perms: invalid.append(('%s.filters.%s' % (k, n), p)) if missing: self.fail('missing permissions %d on \n\t%s' % (len(missing), '\n\t'.join(sorted(missing)))) if invalid: self.fail('invalid permissions %d on \n\t%s' % (len(invalid), '\n\t'.join(map(str, sorted(invalid)))))
def __init__(self, event_queue_id, event_queue_name, policy_storage, log_group=None, metrics=None, output_dir=None): logging.basicConfig(level=logging.INFO, format='%(message)s') log.info("Running Azure Cloud Custodian Self-Host") load_resources() self.session = local_session(Session) # Load configuration self.options = Host.build_options(output_dir, log_group, metrics) self.policy_storage_uri = policy_storage self.event_queue_name = event_queue_name self.event_queue_id = event_queue_id # Prepare storage bits self.policy_blob_client = None self.blob_cache = {} self.queue_storage_account = self.prepare_queue_storage( self.event_queue_id, self.event_queue_name) self.queue_service = None # Track required event subscription updates self.require_event_update = False # Policy cache and dictionary self.policy_cache = tempfile.mkdtemp() self.policies = {} # Configure scheduler self.scheduler = BlockingScheduler() logging.getLogger('apscheduler.executors.default').setLevel( logging.ERROR) # Schedule recurring policy updates self.scheduler.add_job(self.update_policies, 'interval', seconds=policy_update_seconds, id="update_policies", next_run_time=datetime.now()) # Schedule recurring queue polling self.scheduler.add_job(self.poll_queue, 'interval', seconds=queue_poll_seconds, id="poll_queue") self.scheduler.start()
def __init__(self, ctx): self.dry_run = ctx.parent.params.get('dry_run') self.deployment_name = ctx.parent.params.get('deployment_name') self.deployment_namespace = ctx.parent.params.get( 'deployment_namespace') self.helm_values_file = ctx.parent.params.get('helm_values_file') self.helm_set_values = ctx.parent.params.get('helm_set', []) self.subscription_hosts = [] load_resources() self.session = local_session(Session)
def test_get_resource_class(self): with self.assertRaises(KeyError) as ectx: get_resource_class('aws.xyz') self.assertIn("resource: xyz", str(ectx.exception)) with self.assertRaises(KeyError) as ectx: get_resource_class('xyz.foo') self.assertIn("provider: xyz", str(ectx.exception)) load_resources(('aws.ec2',)) ec2 = get_resource_class('aws.ec2') self.assertEqual(ec2.type, 'ec2')
def test_bool_operator_child_validation(self): data = {'policies': [ {'name': 'test', 'resource': 'ec2', 'filters': [ {'or': [ {'type': 'imagex', 'key': 'tag:Foo', 'value': 'a'} ]}]}]} load_resources(('aws.ec2',)) validator = self.policy_loader.validator.gen_schema(('aws.ec2',)) errors = list(validator.iter_errors(data)) self.assertTrue(errors)
def test_report_metadata(self): load_resources(('gcp.*', )) missing = set() for k, v in GoogleCloud.resources.items(): if (not v.resource_type.id or not v.resource_type.name or not v.resource_type.default_report_fields): missing.add("%s~%s" % (k, v)) if missing: raise AssertionError("Missing report metadata on \n %s" % (' \n'.join(sorted(missing))))
def _load_policies(options): load_resources() policies = [] all_policies = [] errors = 0 for file in options.configs: try: collection = policy_load(options, file) except IOError: eprint('Error: policy file does not exist ({})'.format(file)) errors += 1 continue except ValueError as e: eprint('Error: problem loading policy file ({})'.format(e.message)) errors += 1 continue if collection is None: log.debug('Loaded file {}. Contained no policies.'.format(file)) else: log.debug( 'Loaded file {}. Contains {} policies (after filtering)'.format( file, len(collection))) policies.extend(collection.policies) all_policies.extend(collection.unfiltered_policies) if errors > 0: eprint('Found {} errors. Exiting.'.format(errors)) sys.exit(1) if len(policies) == 0: _print_no_policies_warning(options, all_policies) # If we filtered out all the policies we want to exit with a # non-zero status. But if the policy file is empty then continue # on to the specific command to determine the exit status. if len(all_policies) > 0: sys.exit(1) # Do not allow multiple policies in a region with the same name, # even across files policies_by_region = defaultdict(list) for p in policies: policies_by_region[p.options.region].append(p) for region in policies_by_region.keys(): counts = Counter([p.name for p in policies_by_region[region]]) for policy, count in counts.iteritems(): if count > 1: eprint("Error: duplicate policy name '{}'".format(policy)) sys.exit(1) return f(options, policies)
def test_metadata(self): data = { "policies": [{ "name": "test", "resource": "ec2", "metadata": { "createdBy": "Totoro" } }], } load_resources(('aws.ec2', )) validator = self.get_validator(data) self.assertEqual(list(validator.iter_errors(data)), [])
def main(): parser = setup_parser() options = parser.parse_args() options.policy_filter = None options.log_group = None options.cache_period = 0 options.cache = None logging.basicConfig( level=logging.DEBUG, format="%(asctime)s: %(name)s:%(levelname)s %(message)s") logging.getLogger('botocore').setLevel(logging.ERROR) resources.load_resources() resources_gc_prefix(options)
def schema_completer(prefix): """ For tab-completion via argcomplete, return completion options. For the given prefix so far, return the possible options. Note that filtering via startswith happens after this list is returned. """ from c7n import schema load_resources() components = prefix.split('.') if components[0] in provider.clouds.keys(): cloud_provider = components.pop(0) provider_resources = provider.resources(cloud_provider) else: cloud_provider = 'aws' provider_resources = provider.resources('aws') components[0] = "aws.%s" % components[0] # Completions for resource if len(components) == 1: choices = [ r for r in provider.resources().keys() if r.startswith(components[0]) ] if len(choices) == 1: choices += ['{}{}'.format(choices[0], '.')] return choices if components[0] not in provider_resources.keys(): return [] # Completions for category if len(components) == 2: choices = [ '{}.{}'.format(components[0], x) for x in ('actions', 'filters') if x.startswith(components[1]) ] if len(choices) == 1: choices += ['{}{}'.format(choices[0], '.')] return choices # Completions for item elif len(components) == 3: resource_mapping = schema.resource_vocabulary(cloud_provider) return [ '{}.{}.{}'.format(components[0], components[1], x) for x in resource_mapping[components[0]][components[1]] ] return []
def local(reload, port): """run local app server, assumes into the account """ import logging from bottle import run from app import controller, app from c7n.resources import load_resources load_resources() print("Loaded resources definitions") logging.basicConfig(level=logging.DEBUG) logging.getLogger('botocore').setLevel(logging.WARNING) if controller.db.provision(): print("Table Created") run(app, reloader=reload, port=port)
def dispatch_event(event, context): global account_id if account_id is None: session = boto3.Session() account_id = get_account_id_from_sts(session) error = event.get('detail', {}).get('errorCode') if error: log.debug("Skipping failed operation: %s" % error) return event['debug'] = True if event['debug']: log.info("Processing event\n %s", format_event(event)) # policies file should always be valid in lambda so do loading naively with open('config.json') as f: policy_config = json.load(f) if not policy_config or not policy_config.get('policies'): return False # Initialize output directory, we've seen occassional perm issues with # lambda on temp directory and changing unix execution users, so # use a per execution temp space. output_dir = os.environ.get( 'C7N_OUTPUT_DIR', '/tmp/' + str(uuid.uuid4())) if not os.path.exists(output_dir): try: os.mkdir(output_dir) except OSError as error: log.warning("Unable to make output directory: {}".format(error)) # TODO. This enshrines an assumption of a single policy per lambda. options_overrides = policy_config[ 'policies'][0].get('mode', {}).get('execution-options', {}) options_overrides['account_id'] = account_id if 'output_dir' not in options_overrides: options_overrides['output_dir'] = output_dir options = Config.empty(**options_overrides) load_resources() policies = PolicyCollection.from_data(policy_config, options) if policies: for p in policies: p.push(event, context) return True
def test_metadata(self): data = { "policies": [{ "name": "object_test", "resource": "ec2", "metadata": { "createdBy": "Totoro", "version": 1988, "relatedTo": ['Ghibli', 'Classic', 'Miyazaki'] } }], } load_resources(('aws.ec2', )) validator = self.get_validator(data) self.assertEqual(list(validator.iter_errors(data)), [])
def test_semantic_error_policy_scope(self): data = { 'policies': [ {'actions': [{'key': 'AES3000', 'type': 'encryption', 'value': 'This resource should have AES3000 encryption'}], 'description': 'Identify resources which lack our outrageous cipher', 'name': 'bogus-policy', 'resource': 'aws.waf'}]} load_resources(('aws.waf',)) validator = self.policy_loader.validator.gen_schema(('aws.waf',)) errors = list(validator.iter_errors(data)) self.assertEqual(len(errors), 1) error = policy_error_scope(specific_error(errors[0]), data) self.assertTrue("policy:bogus-policy" in error.message)
def test_vars_and_tags(self): data = { "vars": { "alpha": 1, "beta": 2 }, "policies": [{ "name": "test", "resource": "ec2", "tags": ["controls"] }], } load_resources(('aws.ec2', )) validator = self.get_validator(data) self.assertEqual(list(validator.iter_errors(data)), [])
def tag_org(config, db, region, creator_tag, user_suffix, dryrun, accounts, tags, debug, verbose, type): """Tag an orgs resources """ accounts_config, custodian_config, executor = org_init(config, use=None, debug=debug, verbose=verbose, accounts=accounts or None, tags=tags, policies=None, resource=None, policy_tags=None) load_resources() stats = {} total = 0 start_exec = time.time() with executor(max_workers=WORKER_COUNT) as w: futures = {} for a in accounts_config['accounts']: for r in resolve_regions(region or a.get('regions', ())): futures[w.submit(tag_org_account, a, r, db, creator_tag, user_suffix, dryrun, type)] = (a, r) for f in as_completed(futures): a, region = futures[f] if f.exception(): log.warning( "error account:%s id:%s region:%s error:%s" % (a['name'], a['account_id'], region, f.exception())) continue result = f.result() if result: stats[(a['name'], region)] = (a, result) print(("auto tag complete account:%s id:%s region:%s \n %s" % (a['name'], a['account_id'], region, "\n ".join([ " {}: {}".format(k, v) for k, v in result.items() if v and not k.endswith('not-found') ]))).strip()) total += sum( [v for k, v in result.items() if not k.endswith('not-found')]) print("Total resources tagged: %d in %0.2f" % total, time.time() - start_exec) return stats
def main(): parser = setup_parser() options = parser.parse_args() options.policy_filter = None options.log_group = None options.cache_period = 0 options.cache = None logging.basicConfig( level=logging.DEBUG, format="%(asctime)s: %(name)s:%(levelname)s %(message)s") logging.getLogger('botocore').setLevel(logging.ERROR) logging.getLogger('c7n.cache').setLevel(logging.WARNING) resources.load_resources() policies = load_policies(options) resources_gc_prefix(options, policies)
def init(config, use, debug, verbose, accounts, tags, policies, resource=None): level = verbose and logging.DEBUG or logging.INFO logging.basicConfig( level=level, format="%(asctime)s: %(name)s:%(levelname)s %(message)s") logging.getLogger('botocore').setLevel(logging.ERROR) logging.getLogger('custodian').setLevel(logging.WARNING) logging.getLogger('custodian.s3').setLevel(logging.ERROR) logging.getLogger('urllib3').setLevel(logging.WARNING) with open(config) as fh: accounts_config = yaml.safe_load(fh.read()) jsonschema.validate(accounts_config, CONFIG_SCHEMA) if use: with open(use) as fh: custodian_config = yaml.safe_load(fh.read()) else: custodian_config = {} filtered_policies = [] for p in custodian_config.get('policies', ()): if policies and p['name'] not in policies: continue if resource and p['resource'] != resource: continue filtered_policies.append(p) custodian_config['policies'] = filtered_policies filtered_accounts = [] for a in accounts_config.get('accounts', ()): if accounts and a['name'] not in accounts: continue if tags: found = set() for t in tags: if t in a.get('tags', ()): found.add(t) if not found == set(tags): continue filtered_accounts.append(a) accounts_config['accounts'] = filtered_accounts load_resources() MainThreadExecutor.async = False executor = debug and MainThreadExecutor or ProcessPoolExecutor return accounts_config, custodian_config, executor
def dispatch_event(event, context): global account_id if account_id is None: session = boto3.Session() account_id = get_account_id_from_sts(session) error = event.get('detail', {}).get('errorCode') if error: log.debug("Skipping failed operation: %s" % error) return event['debug'] = True if event['debug']: log.info("Processing event\n %s", format_event(event)) # policies file should always be valid in lambda so do loading naively with open('config.json') as f: policy_config = json.load(f) if not policy_config or not policy_config.get('policies'): return False # Initialize output directory, we've seen occassional perm issues with # lambda on temp directory and changing unix execution users, so # use a per execution temp space. output_dir = os.environ.get('C7N_OUTPUT_DIR', '/tmp/' + str(uuid.uuid4())) if not os.path.exists(output_dir): try: os.mkdir(output_dir) except OSError as error: log.warning("Unable to make output directory: {}".format(error)) # TODO. This enshrines an assumption of a single policy per lambda. options_overrides = policy_config['policies'][0].get('mode', {}).get( 'execution-options', {}) options_overrides['account_id'] = account_id if 'output_dir' not in options_overrides: options_overrides['output_dir'] = output_dir options = Config.empty(**options_overrides) load_resources() policies = PolicyCollection.from_data(policy_config, options) if policies: for p in policies: p.push(event, context) return True
def test_semantic_error_with_nested_resource_key(self): data = { 'policies': [{ 'name': 'team-tag-ebs-snapshot-audit', 'resource': 'ebs-snapshot', 'actions': [ {'type': 'copy-related-tag', 'resource': 'ebs', 'skip_missing': True, 'key': 'VolumeId', 'tags': 'Team'}]}]} load_resources(('aws.ebs',)) validator = self.get_validator(data) errors = list(validator.iter_errors(data)) self.assertEqual(len(errors), 1) error = specific_error(errors[0]) self.assertTrue('Team' in error.message)
def diff(repo_uri, source, target, output, verbose): """Policy diff between two arbitrary revisions. Revision specifiers for source and target can use fancy git refspec syntax for symbolics, dates, etc. See: https://git-scm.com/book/en/v2/Git-Tools-Revision-Selection Default revision selection is dependent on current working tree branch. The intent is for two use cases, if on a non-master branch then show the diff to master. If on master show the diff to previous commit on master. For repositories not using the `master` convention, please specify explicit source and target. """ logging.basicConfig( format="%(asctime)s: %(name)s:%(levelname)s %(message)s", level=(verbose and logging.DEBUG or logging.INFO)) logging.getLogger('botocore').setLevel(logging.WARNING) if repo_uri is None: repo_uri = pygit2.discover_repository(os.getcwd()) repo = pygit2.Repository(repo_uri) load_resources() # If on master show diff between last commit to current head if repo.head.shorthand == 'master': if source is None: source = 'master@{1}' if target is None: target = 'master' # Else show difference between master and current head elif target is None: target = repo.head.shorthand if source is None: source = 'master' policy_repo = PolicyRepo(repo_uri, repo) changes = list( policy_repo.delta_commits(repo.revparse_single(source), repo.revparse_single(target))) output.write( yaml.safe_dump({ 'policies': [c.policy.data for c in changes if c.kind != ChangeType.REMOVE] }).encode('utf8'))
def test_gcp_resource_metadata_asset_type(): load_resources(('gcp.*', )) # asset inventory doesn't support these whitelist = set( ('app-engine-domain', 'app-engine-certificate', 'app-engine-firewall-ingress-rule', 'app-engine-domain-mapping', 'bq-job', 'bq-project', 'build', 'dataflow-job', 'dm-deployment', 'function', 'loadbalancer-ssl-policy', 'log-exclusion', 'ml-job', 'ml-model', 'sourcerepo', 'sql-backup-run', 'sql-ssl-cert', 'sql-user', 'pubsub-snapshot')) missing = set() for k, v in GoogleCloud.resources.items(): if v.resource_type.asset_type is None: missing.add(k) remainder = missing.difference(whitelist) if remainder: raise ValueError(str(remainder))
def load(options, path, format='yaml', validate=True): if not os.path.exists(path): raise ValueError("Invalid path for config %r" % path) load_resources() with open(path) as fh: if format == 'yaml': data = utils.yaml_load(fh.read()) elif format == 'json': data = utils.loads(fh.read()) validate = False if validate: from c7n.schema import validate errors = validate(data) if errors: raise errors[0] return PolicyCollection(data, options)
def init(config, use, debug, verbose, accounts, tags, policies, resource=None): level = verbose and logging.DEBUG or logging.INFO logging.basicConfig( level=level, format="%(asctime)s: %(name)s:%(levelname)s %(message)s") logging.getLogger('botocore').setLevel(logging.ERROR) logging.getLogger('custodian').setLevel(logging.WARNING) logging.getLogger('custodian.s3').setLevel(logging.ERROR) logging.getLogger('urllib3').setLevel(logging.WARNING) with open(config) as fh: accounts_config = yaml.safe_load(fh.read()) jsonschema.validate(accounts_config, CONFIG_SCHEMA) if use: with open(use) as fh: custodian_config = yaml.safe_load(fh.read()) else: custodian_config = {} filtered_policies = [] for p in custodian_config.get('policies', ()): if policies and p['name'] not in policies: continue if resource and p['resource'] != resource: continue filtered_policies.append(p) custodian_config['policies'] = filtered_policies filtered_accounts = [] for a in accounts_config.get('accounts', ()): if accounts and a['name'] not in accounts: continue if tags: found = set() for t in tags: if t in a.get('tags', ()): found.add(t) if not found == set(tags): continue filtered_accounts.append(a) accounts_config['accounts'] = filtered_accounts load_resources() MainThreadExecutor. async = False executor = debug and MainThreadExecutor or ProcessPoolExecutor return accounts_config, custodian_config, executor
def test_doc_examples(provider_name): load_resources() loader = PolicyLoader(Config.empty()) provider = clouds.get(provider_name) policies = get_doc_policies(provider.resources) for p in policies.values(): loader.load_data({'policies': [p]}, 'memory://') for p in policies.values(): # Note max name size here is 54 if it a lambda policy given # our default prefix custodian- to stay under 64 char limit on # lambda function names. This applies to AWS and GCP, and # afaict Azure. if len(p['name']) >= 54 and 'mode' in p: raise ValueError( "doc policy exceeds name limit policy:%s" % (p['name']))
def main(): parser = setup_parser() options = parser.parse_args() level = options.verbose and logging.DEBUG or logging.INFO logging.basicConfig( level=level, format="%(asctime)s: %(name)s:%(levelname)s %(message)s") logging.getLogger('botocore').setLevel(logging.ERROR) try: resources.load_resources() options.command(options) except Exception: if not options.debug: raise traceback.print_exc() pdb.post_mortem(sys.exc_info()[-1])
def _load_policies(options): load_resources() policies = [] all_policies = [] errors = 0 for file in options.configs: try: collection = policy_load(options, file) except IOError: eprint('Error: policy file does not exist ({})'.format(file)) errors += 1 continue except ValueError as e: eprint('Error: problem loading policy file ({})'.format(e.message)) errors += 1 continue if collection is None: log.debug('Loaded file {}. Contained no policies.'.format(file)) else: log.debug('Loaded file {}. Contains {} policies (after filtering)'.format(file, len(collection))) policies.extend(collection.policies) all_policies.extend(collection.unfiltered_policies) if errors > 0: eprint('Found {} errors. Exiting.'.format(errors)) sys.exit(1) if len(policies) == 0: _print_no_policies_warning(options, all_policies) # If we filtered out all the policies we want to exit with a # non-zero status. But if the policy file is empty then continue # on to the specific command to determine the exit status. if len(all_policies) > 0: sys.exit(1) # Do not allow multiple policies with the same name, even across files counts = Counter([p.name for p in policies]) for policy, count in counts.iteritems(): if count > 1: eprint("Error: duplicate policy name '{}'".format(policy)) sys.exit(1) return f(options, policies)
def gen_index(index, schema_dir): index_path = Path(index) schema_dir = Path(schema_dir) from c7n.resources import load_resources load_resources(("aws.*")) from c7n.resources.aws import AWS index_data = {"resources": {}, "augment": {}} all_services = boto3.Session().get_available_services() cfn_c7n_map = {} for rname, rtype in AWS.resources.items(): if not rtype.resource_type.cfn_type: continue cfn_c7n_map[rtype.resource_type.cfn_type] = rtype for path in sorted(schema_dir.rglob("*.json")): if path.name == "index.json": continue service = path.stem.split("_")[1] rdata = json.loads(path.read_text()) raugment = index_data["augment"].setdefault(rdata["typeName"], {}) if service not in all_services: service = ServiceMap.get(service) raugment["service"] = service rname = path.stem.split("_", 1)[-1] raugment["type"] = rname c7n_resource = cfn_c7n_map.get(rdata["typeName"]) if c7n_resource: extract_custodian(rdata, c7n_resource, raugment) class_name = "".join([s.title() for s in path.stem.split("_")[1:]]) index_data["resources"]["awscc.%s" % rname] = "c7n_awscc.resources.%s.%s" % ( path.stem.split("_", 1)[-1], class_name, ) index_path.write_text(json.dumps(index_data, indent=2))
def schema_completer(prefix): """ For tab-completion via argcomplete, return completion options. For the given prefix so far, return the possible options. Note that filtering via startswith happens after this list is returned. """ from c7n import schema load_resources() components = prefix.split('.') if components[0] in provider.clouds.keys(): cloud_provider = components.pop(0) provider_resources = provider.resources(cloud_provider) else: cloud_provider = 'aws' provider_resources = provider.resources('aws') components[0] = "aws.%s" % components[0] # Completions for resource if len(components) == 1: choices = [r for r in provider.resources().keys() if r.startswith(components[0])] if len(choices) == 1: choices += ['{}{}'.format(choices[0], '.')] return choices if components[0] not in provider_resources.keys(): return [] # Completions for category if len(components) == 2: choices = ['{}.{}'.format(components[0], x) for x in ('actions', 'filters') if x.startswith(components[1])] if len(choices) == 1: choices += ['{}{}'.format(choices[0], '.')] return choices # Completions for item elif len(components) == 3: resource_mapping = schema.resource_vocabulary(cloud_provider) return ['{}.{}.{}'.format(components[0], components[1], x) for x in resource_mapping[components[0]][components[1]]] return []
def diff(repo_uri, source, target, output, verbose): """Policy diff between two arbitrary revisions. Revision specifiers for source and target can use fancy git refspec syntax for symbolics, dates, etc. See: https://git-scm.com/book/en/v2/Git-Tools-Revision-Selection Default revision selection is dependent on current working tree branch. The intent is for two use cases, if on a non-master branch then show the diff to master. If on master show the diff to previous commit on master. For repositories not using the `master` convention, please specify explicit source and target. """ logging.basicConfig( format="%(asctime)s: %(name)s:%(levelname)s %(message)s", level=(verbose and logging.DEBUG or logging.INFO)) logging.getLogger('botocore').setLevel(logging.WARNING) if repo_uri is None: repo_uri = pygit2.discover_repository(os.getcwd()) repo = pygit2.Repository(repo_uri) load_resources() # If on master show diff between last commit to current head if repo.head.shorthand == 'master': if source is None: source = 'master@{1}' if target is None: target = 'master' # Else show difference between master and current head elif target is None: target = repo.head.shorthand if source is None: source = 'master' policy_repo = PolicyRepo(repo_uri, repo) changes = list(policy_repo.delta_commits( repo.revparse_single(source), repo.revparse_single(target))) output.write( yaml.safe_dump({ 'policies': [c.policy.data for c in changes if c.kind != ChangeType.REMOVE]}).encode('utf8'))
def init(config, use, debug, verbose, accounts, tags, policies, resource=None, policy_tags=()): level = verbose and logging.DEBUG or logging.INFO logging.basicConfig( level=level, format="%(asctime)s: %(name)s:%(levelname)s %(message)s") logging.getLogger().setLevel(level) logging.getLogger('botocore').setLevel(logging.ERROR) logging.getLogger('s3transfer').setLevel(logging.WARNING) logging.getLogger('custodian.s3').setLevel(logging.ERROR) logging.getLogger('urllib3').setLevel(logging.WARNING) # Filter out custodian log messages on console output if not # at warning level or higher, see LogFilter docs and #2674 for h in logging.getLogger().handlers: if isinstance(h, logging.StreamHandler): h.addFilter(LogFilter()) with open(config, 'rb') as fh: accounts_config = yaml.safe_load(fh.read()) jsonschema.validate(accounts_config, CONFIG_SCHEMA) if use: with open(use) as fh: custodian_config = yaml.safe_load(fh.read()) else: custodian_config = {} accounts_config['accounts'] = list(accounts_iterator(accounts_config)) filter_policies(custodian_config, policy_tags, policies, resource) filter_accounts(accounts_config, tags, accounts) load_resources() MainThreadExecutor.c7n_async = False executor = debug and MainThreadExecutor or ProcessPoolExecutor return accounts_config, custodian_config, executor
def main(): parser = setup_parser() options = parser.parse_args() options.policy_filter = None options.log_group = None options.cache_period = 0 options.cache = None log_level = logging.INFO if options.verbose: log_level = logging.DEBUG logging.basicConfig( level=log_level, format="%(asctime)s: %(name)s:%(levelname)s %(message)s") logging.getLogger('botocore').setLevel(logging.ERROR) logging.getLogger('c7n.cache').setLevel(logging.WARNING) resources.load_resources() policies = load_policies(options) resources_gc_prefix(options, policies)
def tag(assume, region, db, creator_tag, user_suffix, dryrun, summary=True, profile=None, type=()): """Tag resources with their creator. """ trail_db = TrailDB(db) load_resources(resource_types=('aws.*', )) with temp_dir() as output_dir: config = ExecConfig.empty(output_dir=output_dir, assume=assume, region=region, profile=profile) factory = aws.AWS().get_session_factory(config) account_id = local_session(factory).client( 'sts').get_caller_identity().get('Account') config['account_id'] = account_id tagger = ResourceTagger(trail_db, config, creator_tag, user_suffix, dryrun, type) try: stats = tagger.process() except Exception: log.exception( "error processing account:%s region:%s config:%s env:%s", account_id, region, config, dict(os.environ)) raise if not summary: return stats log.info( "auto tag summary account:%s region:%s \n%s", config['account_id'], config['region'], "\n".join([" {}: {}".format(k, v) for k, v in stats.items() if v])) total = sum([v for k, v in stats.items() if not k.endswith('not-found')]) log.info("Total resources tagged: %d" % total)
def load(options, path, format='yaml', validate=True): # should we do os.path.expanduser here? if not os.path.exists(path): raise IOError("Invalid path for config %r" % path) load_resources() with open(path) as fh: if format == 'yaml': data = utils.yaml_load(fh.read()) elif format == 'json': data = utils.loads(fh.read()) validate = False # Test for empty policy file if not data or data.get('policies') is None: return None if validate: from c7n.schema import validate errors = validate(data) if errors: raise Exception("Failed to validate on policy %s \n %s" % (errors[1], errors[0])) return PolicyCollection(data, options)
from c7n.resources import load_resources from c7n.utils import format_event, get_account_id_from_sts from c7n.config import Config import boto3 logging.root.setLevel(logging.DEBUG) logging.getLogger('botocore').setLevel(logging.WARNING) logging.getLogger('urllib3').setLevel(logging.WARNING) log = logging.getLogger('custodian.lambda') account_id = None # On cold start load all resources, requires a pythonpath directory scan if 'AWS_EXECUTION_ENV' in os.environ: load_resources() def dispatch_event(event, context): global account_id error = event.get('detail', {}).get('errorCode') if error: log.debug("Skipping failed operation: %s" % error) return event['debug'] = True if event['debug']: log.info("Processing event\n %s", format_event(event))
def json_dump(resource=None): load_resources() print(json.dumps(generate(resource), indent=2))
def schema_cmd(options): """ Print info about the resources, actions and filters available. """ if options.json: schema.json_dump(options.resource) return load_resources() resource_mapping = schema.resource_vocabulary() if options.summary: schema.summary(resource_mapping) return # Here are the formats for what we accept: # - No argument # - List all available RESOURCES # - RESOURCE # - List all available actions and filters for supplied RESOURCE # - RESOURCE.actions # - List all available actions for supplied RESOURCE # - RESOURCE.actions.ACTION # - Show class doc string and schema for supplied action # - RESOURCE.filters # - List all available filters for supplied RESOURCE # - RESOURCE.filters.FILTER # - Show class doc string and schema for supplied filter if not options.resource: resource_list = {'resources': sorted(resources.keys())} print(yaml.safe_dump(resource_list, default_flow_style=False)) return # Format is RESOURCE.CATEGORY.ITEM components = options.resource.split('.') # # Handle resource # resource = components[0].lower() if resource not in resource_mapping: eprint('Error: {} is not a valid resource'.format(resource)) sys.exit(1) if len(components) == 1: del(resource_mapping[resource]['classes']) output = {resource: resource_mapping[resource]} print(yaml.safe_dump(output)) return # # Handle category # category = components[1].lower() if category not in ('actions', 'filters'): eprint(("Error: Valid choices are 'actions' and 'filters'." " You supplied '{}'").format(category)) sys.exit(1) if len(components) == 2: output = "No {} available for resource {}.".format(category, resource) if category in resource_mapping[resource]: output = {resource: { category: resource_mapping[resource][category]}} print(yaml.safe_dump(output)) return # # Handle item # item = components[2].lower() if item not in resource_mapping[resource][category]: eprint('Error: {} is not in the {} list for resource {}'.format( item, category, resource)) sys.exit(1) if len(components) == 3: cls = resource_mapping[resource]['classes'][category][item] # Print docstring docstring = _schema_get_docstring(cls) print("\nHelp\n----\n") if docstring: print(docstring) else: # Shouldn't ever hit this, so exclude from cover print("No help is available for this item.") # pragma: no cover # Print schema print("\nSchema\n------\n") pp = pprint.PrettyPrinter(indent=4) if hasattr(cls, 'schema'): pp.pprint(cls.schema) else: # Shouldn't ever hit this, so exclude from cover print("No schema is available for this item.", file=sys.sterr) # pragma: no cover print('') return # We received too much (e.g. s3.actions.foo.bar) eprint("Invalid selector '{}'. Max of 3 components in the " "format RESOURCE.CATEGORY.ITEM".format(options.resource)) sys.exit(1)
def schema_cmd(options): """ Print info about the resources, actions and filters available. """ if options.json: schema.json_dump(options.resource) return load_resources() resource_mapping = schema.resource_vocabulary() if options.summary: schema.summary(resource_mapping) return # Here are the formats for what we accept: # - No argument # - List all available RESOURCES # - PROVIDER # - List all available RESOURCES for supplied PROVIDER # - RESOURCE # - List all available actions and filters for supplied RESOURCE # - RESOURCE.actions # - List all available actions for supplied RESOURCE # - RESOURCE.actions.ACTION # - Show class doc string and schema for supplied action # - RESOURCE.filters # - List all available filters for supplied RESOURCE # - RESOURCE.filters.FILTER # - Show class doc string and schema for supplied filter if not options.resource: resource_list = {'resources': sorted(provider.resources().keys())} print(yaml.safe_dump(resource_list, default_flow_style=False)) return # Format is [PROVIDER].RESOURCE.CATEGORY.ITEM # optional provider defaults to aws for compatibility components = options.resource.lower().split('.') if len(components) == 1 and components[0] in provider.clouds.keys(): resource_list = {'resources': sorted( provider.resources(cloud_provider=components[0]).keys())} print(yaml.safe_dump(resource_list, default_flow_style=False)) return if components[0] in provider.clouds.keys(): cloud_provider = components.pop(0) resource_mapping = schema.resource_vocabulary( cloud_provider) components[0] = '%s.%s' % (cloud_provider, components[0]) else: resource_mapping = schema.resource_vocabulary('aws') components[0] = 'aws.%s' % components[0] # # Handle resource # resource = components[0] if resource not in resource_mapping: log.error('{} is not a valid resource'.format(resource)) sys.exit(1) if len(components) == 1: del(resource_mapping[resource]['classes']) output = {resource: resource_mapping[resource]} print(yaml.safe_dump(output)) return # # Handle category # category = components[1] if category not in ('actions', 'filters'): log.error("Valid choices are 'actions' and 'filters'. You supplied '{}'".format(category)) sys.exit(1) if len(components) == 2: output = "No {} available for resource {}.".format(category, resource) if category in resource_mapping[resource]: output = {resource: { category: resource_mapping[resource][category]}} print(yaml.safe_dump(output)) return # # Handle item # item = components[2] if item not in resource_mapping[resource][category]: log.error('{} is not in the {} list for resource {}'.format(item, category, resource)) sys.exit(1) if len(components) == 3: cls = resource_mapping[resource]['classes'][category][item] # Print docstring docstring = _schema_get_docstring(cls) print("\nHelp\n----\n") if docstring: print(docstring) else: # Shouldn't ever hit this, so exclude from cover print("No help is available for this item.") # pragma: no cover # Print schema print("\nSchema\n------\n") if hasattr(cls, 'schema'): print(json.dumps(cls.schema, indent=4)) else: # Shouldn't ever hit this, so exclude from cover print("No schema is available for this item.", file=sys.sterr) # pragma: no cover print('') return # We received too much (e.g. s3.actions.foo.bar) log.error("Invalid selector '{}'. Max of 3 components in the " "format RESOURCE.CATEGORY.ITEM".format(options.resource)) sys.exit(1)
def _load_policies(options): validate = True if 'skip_validation' in options: validate = not options.skip_validation if not validate: log.debug('Policy validation disabled') load_resources() vars = _load_vars(options) errors = 0 all_policies = PolicyCollection.from_data({}, options) # for a default region for policy loading, we'll expand regions later. options.region = "" for fp in options.configs: try: collection = policy_load(options, fp, validate=validate, vars=vars) except IOError: log.error('policy file does not exist ({})'.format(fp)) errors += 1 continue except ValueError as e: log.error('problem loading policy file ({})'.format(e.message)) errors += 1 continue if collection is None: log.debug('Loaded file {}. Contained no policies.'.format(fp)) else: log.debug( 'Loaded file {}. Contains {} policies'.format( fp, len(collection))) all_policies = all_policies + collection if errors > 0: log.error('Found {} errors. Exiting.'.format(errors)) sys.exit(1) # filter by name and resource type policies = all_policies.filter( getattr(options, 'policy_filter', None), getattr(options, 'resource_type', None)) # provider initialization provider_policies = {} for p in policies: provider_policies.setdefault(p.provider_name, []).append(p) policies = PolicyCollection.from_data({}, options) for provider_name in provider_policies: provider = clouds[provider_name]() p_options = provider.initialize(options) policies += provider.initialize_policies( PolicyCollection(provider_policies[provider_name], p_options), p_options) if len(policies) == 0: _print_no_policies_warning(options, all_policies) # If we filtered out all the policies we want to exit with a # non-zero status. But if the policy file is empty then continue # on to the specific command to determine the exit status. if len(all_policies) > 0: sys.exit(1) # Do not allow multiple policies in a region with the same name, # even across files policies_by_region = defaultdict(list) for p in policies: policies_by_region[p.options.region].append(p) for region in policies_by_region.keys(): counts = Counter([p.name for p in policies_by_region[region]]) for policy, count in six.iteritems(counts): if count > 1: log.error("duplicate policy name '{}'".format(policy)) sys.exit(1) # Variable expansion and non schema validation (not optional) for p in policies: p.expand_variables(p.get_variables()) p.validate() return f(options, list(policies))
def _load_policies(options): load_resources() vars = _load_vars(options) errors = 0 all_policies = PolicyCollection.from_data({}, options) # for a default region for policy loading, we'll expand regions later. options.region = options.regions[0] for fp in options.configs: try: collection = policy_load(options, fp, vars=vars) except IOError: log.error('policy file does not exist ({})'.format(fp)) errors += 1 continue except ValueError as e: log.error('problem loading policy file ({})'.format(e.message)) errors += 1 continue if collection is None: log.debug('Loaded file {}. Contained no policies.'.format(fp)) else: log.debug( 'Loaded file {}. Contains {} policies'.format( fp, len(collection))) all_policies = all_policies + collection if errors > 0: log.error('Found {} errors. Exiting.'.format(errors)) sys.exit(1) # filter by name and resource type policies = all_policies.filter( getattr(options, 'policy_filter', None), getattr(options, 'resource_type', None)) # expand by region, this results in a separate policy instance per region of execution. policies = policies.expand_regions(options.regions) if len(policies) == 0: _print_no_policies_warning(options, all_policies) # If we filtered out all the policies we want to exit with a # non-zero status. But if the policy file is empty then continue # on to the specific command to determine the exit status. if len(all_policies) > 0: sys.exit(1) # Do not allow multiple policies in a region with the same name, # even across files policies_by_region = defaultdict(list) for p in policies: policies_by_region[p.options.region].append(p) for region in policies_by_region.keys(): counts = Counter([p.name for p in policies_by_region[region]]) for policy, count in counts.iteritems(): if count > 1: log.error("duplicate policy name '{}'".format(policy)) sys.exit(1) return f(options, list(policies))