def get_orgids(self): org_ids = set(self.data.get('whitelist_orgids', ())) if 'whitelist_orgids_from' in self.data: values = ValuesFrom(self.data['whitelist_orgids_from'], self.manager) org_ids = org_ids.union(values.get_values()) return org_ids
def get_endpoints(self): endpoints = set(self.data.get('whitelist_endpoints', ())) if 'whitelist_endpoints_from' in self.data: values = ValuesFrom(self.data['whitelist_endpoints_from'], self.manager) endpoints = endpoints.union(values.get_values()) return endpoints
def get_accounts(self): owner_id = self.manager.config.account_id accounts = set(self.data.get('whitelist', ())) if 'whitelist_from' in self.data: values = ValuesFrom(self.data['whitelist_from'], self.manager) accounts = accounts.union(values.get_values()) accounts.add(owner_id) return accounts
def get_accounts(self): owner_id = get_account_id(local_session(self.manager.session_factory)) accounts = set(self.data.get('whitelist', ())) if 'whitelist_from' in self.data: values = ValuesFrom(self.data['whitelist_from'], self.manager) accounts = accounts.union(values.get_values()) accounts.add(owner_id) return accounts
def get_protocols(self): protocols = set(self.data.get('whitelist_protocols', ())) if 'whitelist_protocols_from' in self.data: values = ValuesFrom(self.data['whitelist_protocols_from'], self.manager) protocols = protocols.union( [p for p in values.get_values() if p in self.valid_protocols] ) return protocols
def get_protocols(self): protocols = set(self.data.get('whitelist_protocols', ())) if 'whitelist_protocols_from' in self.data: values = ValuesFrom(self.data['whitelist_protocols_from'], self.manager) protocols = protocols.union( [p for p in values.get_values() if p in self.valid_protocols]) return protocols
def match(self, i): if self.v is None and len(self.data) == 1: [(self.k, self.v)] = self.data.items() elif self.v is None: self.k = self.data.get('key') self.op = self.data.get('op') if 'value_from' in self.data: values = ValuesFrom(self.data['value_from'], self.manager) self.v = values.get_values() else: self.v = self.data.get('value') self.vtype = self.data.get('value_type') if i is None: return False # Value extract if self.k.startswith('tag:'): tk = self.k.split(':', 1)[1] r = None for t in i.get("Tags", []): if t.get('Key') == tk: r = t.get('Value') break elif '.' not in self.k and '[' not in self.k and '(' not in self.k: r = i.get(self.k) elif self.expr: r = self.expr.search(i) else: self.expr = jmespath.compile(self.k) r = self.expr.search(i) # value type conversion if self.vtype is not None: v, r = self.process_value_type(self.v, r) else: v = self.v # Value match if r is None and v == 'absent': return True elif r is not None and v == 'present': return True elif v == 'not-null' and r: return True elif self.op: op = OPERATORS[self.op] try: return op(r, v) except TypeError: return False elif r == self.v: return True return False
def match(self, i): # i is the describe resource response. ie. aws ec2 describe-instance --instance-id=<instance_id> if self.v is None and len(self.data) == 1: [(self.k, self.v)] = self.data.items() elif self.v is None and not hasattr(self, 'content_initialized'): self.k = self.data.get('key') self.op = self.data.get('op') if 'value_from' in self.data: # Grab value here to send to ValuesFrom for when the whitelist returns "*" # When that happens, return r instead r = self.get_resource_value(self.k, i) values = ValuesFrom(self.data['value_from'], self.manager, self.event, value=r) self.v = values.get_values() else: self.v = self.data.get('value') self.content_initialized = True self.vtype = self.data.get('value_type') if i is None: return False # value extract r = self.get_resource_value(self.k, i) if self.op in ('in', 'not-in') and r is None: r = () # value type conversion if self.vtype is not None: v, r = self.process_value_type(self.v, r, i) else: v = self.v # Value match if r is None and v == 'absent': return True elif r is not None and v == 'present': return True elif v == 'not-null' and r: return True elif v == 'empty' and not r: return True elif self.op: op = OPERATORS[self.op] try: return op(r, v) except TypeError: return False elif r == self.v: return True return False
def match(self, i): if self.v is None and len(self.data) == 1: [(self.k, self.v)] = self.data.items() elif self.v is None and not hasattr(self, 'content_initialized'): self.k = self.data.get('key') self.op = self.data.get('op') if 'value_from' in self.data: values = ValuesFrom(self.data['value_from'], self.manager) self.v = values.get_values() else: self.v = self.data.get('value') self.content_initialized = True self.vtype = self.data.get('value_type') if i is None: return False # value extract r = self.get_resource_value(self.k, i) if self.op in ('in', 'not-in') and r is None: r = () # value type conversion if self.vtype is not None: v, r = self.process_value_type(self.v, r, i) else: v = self.v # Value match if r is None and v == 'absent': return True elif r is not None and v == 'present': return True elif v == 'not-null' and r: return True elif v == 'empty' and not r: return True elif self.op: op = OPERATORS[self.op] try: return op(r, v) except TypeError: return False elif r == v: return True return False
def match(self, i): if self.v is None and len(self.data) == 1: [(self.k, self.v)] = self.data.items() elif self.v is None and not hasattr(self, 'content_initialized'): self.k = self.data.get('key') self.op = self.data.get('op') if 'value_from' in self.data: values = ValuesFrom(self.data['value_from'], self.manager) self.v = values.get_values() else: self.v = self.data.get('value') self.content_initialized = True self.vtype = self.data.get('value_type') if i is None: return False # value extract r = self.get_resource_value(self.k, i) if self.op in ('in', 'not-in') and r is None: r = () # value type conversion if self.vtype is not None: v, r = self.process_value_type(self.v, r, i) else: v = self.v # Value match if r is None and v == 'absent': return True elif r is not None and v == 'present': return True elif v == 'not-null' and r: return True elif v == 'empty' and not r: return True elif self.op: op = OPERATORS[self.op] try: return op(r, v) except TypeError: return False elif r == self.v: return True return False
def expand_variables(self, message): """expand any variables in the action to_from/cc_from fields. """ p = self.data.copy() if 'to_from' in self.data: to_from = self.data['to_from'].copy() to_from['url'] = to_from['url'].format(**message) if 'expr' in to_from: to_from['expr'] = to_from['expr'].format(**message) p.setdefault('to', []).extend(ValuesFrom(to_from, self.manager).get_values()) if 'cc_from' in self.data: cc_from = self.data['cc_from'].copy() cc_from['url'] = cc_from['url'].format(**message) if 'expr' in cc_from: cc_from['expr'] = cc_from['expr'].format(**message) p.setdefault('cc', []).extend(ValuesFrom(cc_from, self.manager).get_values()) return p
def expand_variables(self, message): """expand any variables in the action to_from/cc_from fields. """ p = self.data.copy() if 'to_from' in self.data: to_from = self.data['to_from'].copy() to_from['url'] = to_from['url'].format(**message) if 'expr' in to_from: to_from['expr'] = to_from['expr'].format(**message) p['to'] = ValuesFrom(to_from).get_values() if 'cc_from' in self.data: cc_from = self.data['cc_from'].copy() cc_from['url'] = cc_from['url'].format(**message) if 'expr' in cc_from: cc_from['expr'] = cc_from['expr'].format(**message) p['cc'] = ValuesFrom(cc_from).get_values() return p
def process_resource_schedule(self, i, value, time_type): """Does the resource tag schedule and policy match the current time.""" rid = i[self.id_key] # this is to normalize trailing semicolons which when done allows # dateutil.parser.parse to process: value='off=(m-f,1);' properly. # before this normalization, some cases would silently fail. value = ';'.join(filter(None, value.split(';'))) if self.parser.has_resource_schedule(value, time_type): schedule = self.parser.parse(value) elif self.parser.keys_are_valid(value): # respect timezone from tag raw_data = self.parser.raw_data(value) if 'tz' in raw_data: schedule = dict(self.default_schedule) schedule['tz'] = raw_data['tz'] else: schedule = self.default_schedule else: schedule = None if schedule is None: log.warning("Invalid schedule on resource:%s value:%s", rid, value) self.parse_errors.append((rid, value)) return False tz = self.get_tz(schedule['tz']) if not tz: log.warning("Could not resolve tz on resource:%s value:%s", rid, value) self.parse_errors.append((rid, value)) return False now = datetime.datetime.now(tz).replace(minute=0, second=0, microsecond=0) now_str = now.strftime("%Y-%m-%d") if 'skip-days-from' in self.data: values = ValuesFrom(self.data['skip-days-from'], self.manager) self.skip_days = values.get_values() else: self.skip_days = self.data.get('skip-days', []) if now_str in self.skip_days: return False return self.match(now, schedule)
def process_resource_schedule(self, i, value, time_type): """Does the resource tag schedule and policy match the current time.""" rid = i[self.id_key] # this is to normalize trailing semicolons which when done allows # dateutil.parser.parse to process: value='off=(m-f,1);' properly. # before this normalization, some cases would silently fail. value = ';'.join(filter(None, value.split(';'))) if self.parser.has_resource_schedule(value, time_type): schedule = self.parser.parse(value) elif self.parser.keys_are_valid(value): # respect timezone from tag raw_data = self.parser.raw_data(value) if 'tz' in raw_data: schedule = dict(self.default_schedule) schedule['tz'] = raw_data['tz'] else: schedule = self.default_schedule else: schedule = None if schedule is None: log.warning( "Invalid schedule on resource:%s value:%s", rid, value) self.parse_errors.append((rid, value)) return False tz = self.get_tz(schedule['tz']) if not tz: log.warning( "Could not resolve tz on resource:%s value:%s", rid, value) self.parse_errors.append((rid, value)) return False now = datetime.datetime.now(tz).replace( minute=0, second=0, microsecond=0) now_str = now.strftime("%Y-%m-%d") if 'skip-days-from' in self.data: values = ValuesFrom(self.data['skip-days-from'], self.manager) self.skip_days = values.get_values() else: self.skip_days = self.data.get('skip-days', []) if now_str in self.skip_days: return False return self.match(now, schedule)
def get_values_from(self, data, content): mgr = Bag({'session_factory': None, '_cache': None}) values = ValuesFrom(data, mgr) values.resolver = FakeResolver(content) return values
def get_values_from(self, data, content): config = Config.empty(account_id=ACCOUNT_ID) mgr = Bag({"session_factory": None, "_cache": None, "config": config}) values = ValuesFrom(data, mgr) values.resolver = FakeResolver(content) return values
class ProjectPropagateLabels(HierarchyAction): """Propagate labels from the organization hierarchy to a project. folder-labels should resolve to a json data mapping of folder path to labels that should be applied to contained projects. as a worked example assume the following resource hierarchy :: - /dev /network /project-a /ml /project-b Given a folder-labels json with contents like .. code-block:: json {"dev": {"env": "dev", "owner": "dev"}, "dev/network": {"owner": "network"}, "dev/ml": {"owner": "ml"} Running the following policy .. code-block:: yaml policies: - name: tag-projects resource: gcp.project # use a server side filter to only look at projects # under the /dev folder the id for the dev folder needs # to be manually resolved outside of the policy. query: - filter: "parent.id:389734459211 parent.type:folder" filters: - "tag:owner": absent actions: - type: propagate-labels folder-labels: url: file://folder-labels.json Will result in project-a being tagged with owner: network and env: dev and project-b being tagged with owner: ml and env: dev """ schema = type_schema( 'propagate-labels', required=('folder-labels', ), **{ 'folder-labels': { '$ref': '#/definitions/filters_common/value_from' } }, ) attr_filter = ('lifecycleState', ('ACTIVE', )) permissions = ('resourcemanager.folders.get', 'resourcemanager.projects.update') method_spec = {'op': 'update'} def load_metadata(self): """Load hierarchy tags""" self.resolver = ValuesFrom(self.data['folder-labels'], self.manager) self.labels = self.resolver.get_values() self.load_folders() self.resolve_paths() def resolve_paths(self): self.folder_paths = {} def get_path_segments(fid): p = self.folders[fid]['parent'] if p.startswith('folder'): for s in get_path_segments(p.split('/')[-1]): yield s yield self.folders[fid]['displayName'] for fid in self.folder_ids: self.folder_paths[fid] = '/'.join(get_path_segments(fid)) def resolve_labels(self, project_id): hlabels = {} parents = self.parents[project_id] for p in reversed(parents): pkeys = [p, self.folder_paths[p], 'folders/%s' % p] for pk in pkeys: hlabels.update(self.labels.get(pk, {})) return hlabels def diff(self, resources): model = self.manager.resource_type for r in resources: hlabels = self.resolve_labels(r['projectId']) if not hlabels: continue delta = False rlabels = r.get('labels', {}) for k, v in hlabels.items(): if k not in rlabels or rlabels[k] != v: delta = True if not delta: continue rlabels = dict(rlabels) rlabels.update(hlabels) if delta: yield ('update', model.get_label_params(r, rlabels))
def load_metadata(self): """Load hierarchy tags""" self.resolver = ValuesFrom(self.data['folder-labels'], self.manager) self.labels = self.resolver.get_values() self.load_folders() self.resolve_paths()
def get_values_from(self, data, content): config = Config.empty() mgr = Bag({'session_factory': None, '_cache': None, 'config': config}) values = ValuesFrom(data, mgr) values.resolver = FakeResolver(content) return values
def get_vpces(self): vpce = set(self.data.get('whitelist_vpce', ())) if 'whitelist_vpce_from' in self.data: values = ValuesFrom(self.data['whitelist_vpce_from'], self.manager) vpce = vpce.union(values.get_values()) return vpce