def run(event, context=None): # policies file should always be valid in functions so do loading naively with open('config.json') as f: policy_config = json.load(f) if not policy_config or not policy_config.get('policies'): log.error('Invalid policy config') return False options_overrides = \ policy_config['policies'][0].get('mode', {}).get('execution-options', {}) # if output_dir specified use that, otherwise make a temp directory if 'output_dir' not in options_overrides: options_overrides['output_dir'] = get_tmp_output_dir() # merge all our options in options = Config.empty(**options_overrides) loader = PolicyLoader(options) policies = loader.load_data(policy_config, 'config.json', validate=False) if policies: for p in policies: log.info("running policy %s", p.name) p.validate() p.push(event, context) return True
def eperm(provider, el, r=None): if el.permissions: return el.permissions element_type = get_element_type(el) if r is None or r.type is None: # dummy resource type for policy if provider == 'aws': r = Bag({'type': 'kinesis'}) elif provider == 'gcp': r = Bag({'type': 'instance'}) elif provider == 'azure': r = Bag({'type': 'vm'}) # print(f'policy construction lookup {r.type}.{element_type}.{el.type}') loader = PolicyLoader(Config.empty()) pdata = { 'name': f'permissions-{r.type}', 'resource': f'{provider}.{r.type}' } pdata[element_type] = get_element_data(element_type, el) try: pset = loader.load_data({'policies': [pdata]}, ':mem:', validate=False) except Exception as e: print(f'error loading {el} as {element_type}:{el.type} error: {e} \n {pdata}') return [] el = get_policy_element(el, list(pset)[0]) return el.get_permissions()
def test_cloudtrail_policy(): collection = PolicyLoader(Config.empty()).load_data( { 'policies': [{ 'name': 'check-ec2', 'resource': 'ec2', 'mode': { 'type': 'cloudtrail', 'events': ['RunInstances'] } }] }, file_uri=":mem:") sam = {'Resources': {}} p = list(collection).pop() dispatch_render(p, sam) assert sam['Resources']['CheckEc2']['Properties']['Events'] == { 'PolicyTriggerA': { 'Properties': { 'Pattern': { 'detail': { 'eventName': ['RunInstances'], 'eventSource': ['ec2.amazonaws.com'] }, 'detail-type': ['AWS API Call via CloudTrail'] } }, 'Type': 'CloudWatchEvent' } }
def test_doc_examples(provider_name): load_resources() loader = PolicyLoader(Config.empty()) provider = clouds.get(provider_name) policies = get_doc_policies(provider.resources) for p in policies.values(): loader.load_data({'policies': [p]}, 'memory://') for p in policies.values(): # Note max name size here is 54 if it a lambda policy given # our default prefix custodian- to stay under 64 char limit on # lambda function names. This applies to AWS and GCP, and # afaict Azure. if len(p['name']) >= 54 and 'mode' in p: raise ValueError( "doc policy exceeds name limit policy:%s" % (p['name']))
def validate(self): if 'mode' in self.data['policy']: raise PolicyValidationError("Execution mode can't be specified in " "embedded policy %s" % self.data) if 'actions' in self.data['policy']: raise PolicyValidationError("Actions can't be specified in " "embedded policy %s" % self.data) collection = PolicyLoader(self.manager.config).load_data( {'policies': [self.data['policy']]}, "memory://", session_factory=self.manager.session_factory) if not collection: raise PolicyValidationError( "policy %s missing filter empty embedded policy" % (self.manager.ctx.policy.name)) self.embedded_policy = list(collection).pop() self.embedded_policy.validate() return self
def test_config_rule_policy(): collection = PolicyLoader(Config.empty()).load_data( { 'policies': [{ 'name': 'check-ec2', 'resource': 'ec2', 'mode': { 'type': 'config-rule' } }] }, file_uri=":mem:") sam = {'Resources': {}} p = list(collection).pop() dispatch_render(p, sam) assert set(sam['Resources']) == set( ('CheckEc2', 'CheckEc2ConfigRule', 'CheckEc2InvokePermission')) assert jmespath.search( 'Resources.CheckEc2ConfigRule.Properties.Source.SourceIdentifier', sam) == { 'Fn::GetAtt': 'CheckEc2' + '.Arn' }
def main(): parser = setup_parser() options = parser.parse_args() collection = PolicyLoader(Config.empty()).load_file( options.config_file).filter(options.policy_filter) sam = { 'AWSTemplateFormatVersion': '2010-09-09', 'Transform': 'AWS::Serverless-2016-10-31', 'Resources': {} } for p in collection: if p.provider_name != 'aws': continue policy_lambda = dispatch_render(p, sam) archive = policy_lambda.get_archive() with open(os.path.join(options.output_dir, "%s.zip" % p.name), 'wb') as fh: fh.write(archive.get_bytes()) with open(os.path.join(options.output_dir, 'deploy.yml'), 'w') as fh: fh.write(yaml.safe_dump(sam, default_flow_style=False))
def test_periodic_policy(): collection = PolicyLoader(Config.empty()).load_data( { 'policies': [{ 'name': 'check-ec2', 'resource': 'ec2', 'mode': { 'schedule': 'rate(1 hour)', 'type': 'periodic' } }] }, file_uri=":mem:") sam = {'Resources': {}} p = list(collection).pop() dispatch_render(p, sam) assert sam['Resources']['CheckEc2']['Properties']['Events'] == { 'PolicySchedule': { 'Type': 'Schedule', 'Properties': { 'Schedule': 'rate(1 hour)' } } }
class CustodianTestCore: custodian_schema = None # thread local? tests are single threaded, multiprocess execution policy_loader = PolicyLoader(Config.empty()) policy_loader.default_policy_validate = C7N_VALIDATE def addCleanup(self, func, *args, **kw): raise NotImplementedError("subclass required") def write_policy_file(self, policy, format="yaml"): """ Write a policy file to disk in the specified format. Input a dictionary and a format. Valid formats are `yaml` and `json` Returns the file path. """ fh = tempfile.NamedTemporaryFile(mode="w+b", suffix="." + format, delete=False) if format == "json": fh.write(json.dumps(policy).encode("utf8")) else: fh.write(yaml.dump(policy, encoding="utf8", Dumper=yaml.SafeDumper)) fh.flush() self.addCleanup(os.unlink, fh.name) self.addCleanup(fh.close) return fh.name def get_temp_dir(self): """ Return a temporary directory that will get cleaned up. """ temp_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, temp_dir) return temp_dir def get_context(self, config=None, session_factory=None, policy=None): if config is None: self.context_output_dir = self.get_temp_dir() config = Config.empty(output_dir=self.context_output_dir) ctx = ExecutionContext( session_factory, policy or Bag({ "name": "test-policy", "provider_name": "aws"}), config) return ctx def load_policy( self, data, config=None, session_factory=None, validate=C7N_VALIDATE, output_dir='null://', log_group='null://', cache=False, ): pdata = {'policies': [data]} if not (config and isinstance(config, Config)): config = self._get_policy_config( log_group=log_group, output_dir=output_dir, cache=cache, **(config or {})) collection = self.policy_loader.load_data( pdata, validate=validate, file_uri="memory://test", session_factory=session_factory, config=config) # policy non schema validation is also lazy initialization [p.validate() for p in collection] return list(collection)[0] def _get_policy_config(self, **kw): config = kw if kw.get('output_dir') is None or config.get('cache'): config["output_dir"] = temp_dir = self.get_temp_dir() if config.get('cache'): config["cache"] = os.path.join(temp_dir, "c7n.cache") config["cache_period"] = 300 return Config.empty(**config) def load_policy_set(self, data, config=None): filename = self.write_policy_file(data, format="json") if config: e = Config.empty(**config) else: e = Config.empty() return policy.load(e, filename) def patch(self, obj, attr, new): old = getattr(obj, attr, None) setattr(obj, attr, new) self.addCleanup(setattr, obj, attr, old) def change_cwd(self, work_dir=None): if work_dir is None: work_dir = self.get_temp_dir() cur_dir = os.path.abspath(os.getcwd()) def restore(): os.chdir(cur_dir) self.addCleanup(restore) os.chdir(work_dir) return work_dir def change_environment(self, **kwargs): """Change the environment to the given set of variables. To clear an environment variable set it to None. Existing environment restored after test. """ # preserve key elements needed for testing for env in ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_DEFAULT_REGION"]: if env not in kwargs: kwargs[env] = os.environ.get(env, "") original_environ = dict(os.environ) @self.addCleanup def cleanup_env(): os.environ.clear() os.environ.update(original_environ) os.environ.clear() for key, value in list(kwargs.items()): if value is None: del (kwargs[key]) os.environ.update(kwargs) def capture_logging( self, name=None, level=logging.INFO, formatter=None, log_file=None ): if log_file is None: log_file = TextTestIO() log_handler = logging.StreamHandler(log_file) if formatter: log_handler.setFormatter(formatter) logger = logging.getLogger(name) logger.addHandler(log_handler) old_logger_level = logger.level logger.setLevel(level) @self.addCleanup def reset_logging(): logger.removeHandler(log_handler) logger.setLevel(old_logger_level) return log_file # Backport from stdlib for 2.7 compat, drop when 2.7 support is dropped. def assertRegex(self, text, expected_regex, msg=None): """Fail the test unless the text matches the regular expression.""" if isinstance(expected_regex, six.string_types): assert expected_regex, "expected_regex must not be empty." expected_regex = re.compile(expected_regex) if not expected_regex.search(text): standardMsg = "Regex didn't match: %r not found in %r" % ( expected_regex.pattern, text) # _formatMessage ensures the longMessage option is respected msg = self._formatMessage(msg, standardMsg) raise self.failureException(msg) def assertJmes(self, expr, instance, expected): value = jmespath.search(expr, instance) self.assertEqual(value, expected)