def get_axfr_records(self, server, domains): """Return a `list` of `dict`s containing the zones and their records, obtained from the DNS server Returns: :obj:`list` of `dict` """ zones = [] for zoneName in domains: try: zone = { 'zone_id': get_resource_id('axfrz', zoneName), 'name': zoneName, 'source': 'AXFR', 'comment': None, 'tags': {}, 'records': [] } z = dns_zone.from_xfr(query.xfr(server, zoneName)) rdata_fields = ('name', 'ttl', 'rdata') for rr in [ dict(zip(rdata_fields, x)) for x in z.iterate_rdatas() ]: record_name = rr['name'].derelativize(z.origin).to_text() zone['records'].append({ 'id': get_resource_id( 'axfrr', record_name, ['{}={}'.format(k, str(v)) for k, v in rr.items()]), 'zone_id': zone['zone_id'], 'name': record_name, 'value': sorted([rr['rdata'].to_text()]), 'type': type_to_text(rr['rdata'].rdtype) }) if len(zone['records']) > 0: zones.append(zone) except Exception as ex: self.log.exception( 'Failed fetching DNS zone information for {}: {}'.format( zoneName, ex)) raise return zones
def _get_resource_hash(zone_name, record): """Returns the last ten digits of the sha256 hash of the combined arguments. Useful for generating unique resource IDs Args: zone_name (`str`): The name of the DNS Zone the record belongs to record (`dict`): A record dict to generate the hash from Returns: `str` """ record_data = defaultdict(int, record) if type(record_data['GeoLocation']) == dict: record_data['GeoLocation'] = ":".join(["{}={}".format(k, v) for k, v in record_data['GeoLocation'].items()]) args = [ zone_name, record_data['Name'], record_data['Type'], record_data['Weight'], record_data['Region'], record_data['GeoLocation'], record_data['Failover'], record_data['HealthCheckId'], record_data['TrafficPolicyInstanceId'] ] return get_resource_id('r53r', args)
def get_known_resources_missing_tags(self): non_compliant_resources = {} audited_types = dbconfig.get('audit_scope', NS_AUDITOR_REQUIRED_TAGS, {'enabled': []})['enabled'] try: # resource_info is a tuple with the resource typename as [0] and the resource class as [1] resources = filter( lambda resource_info: resource_info[0] in audited_types, self.resource_classes.items()) for resource_name, resource_class in resources: for resource_id, resource in resource_class.get_all().items(): missing_tags, notes = self.check_required_tags_compliance( resource) if missing_tags: # Not really a get, it generates a new resource ID issue_id = get_resource_id('reqtag', resource_id) non_compliant_resources[issue_id] = { 'issue_id': issue_id, 'missing_tags': missing_tags, 'notes': notes, 'resource_id': resource_id, 'resource': resource } finally: db.session.rollback() return non_compliant_resources
def get_cloudflare_records(self, *, account): """Return a `list` of `dict`s containing the zones and their records, obtained from the CloudFlare API Returns: account (:obj:`CloudFlareAccount`): A CloudFlare Account object :obj:`list` of `dict` """ zones = [] for zobj in self.__cloudflare_list_zones(account=account): try: self.log.debug('Processing DNS zone CloudFlare/{}'.format( zobj['name'])) zone = { 'zone_id': get_resource_id('cfz', zobj['name']), 'name': zobj['name'], 'source': 'CloudFlare', 'comment': None, 'tags': {}, 'records': [] } for record in self.__cloudflare_list_zone_records( account=account, zoneID=zobj['id']): zone['records'].append({ 'id': get_resource_id( 'cfr', zobj['id'], ['{}={}'.format(k, v) for k, v in record.items()]), 'zone_id': zone['zone_id'], 'name': record['name'], 'value': record['value'], 'type': record['type'] }) if len(zone['records']) > 0: zones.append(zone) except CloudFlareError: self.log.exception( 'Failed getting records for CloudFlare zone {}'.format( zobj['name'])) return zones
def __fetch_route53_zones(self): """Return a list of all DNS zones hosted in Route53 Returns: :obj:`list` of `dict` """ done = False marker = None zones = {} route53 = self.session.client('route53') try: while not done: if marker: response = route53.list_hosted_zones(Marker=marker) else: response = route53.list_hosted_zones() if response['IsTruncated']: marker = response['NextMarker'] else: done = True for zone_data in response['HostedZones']: zones[get_resource_id('r53z', zone_data['Id'])] = { 'name': zone_data['Name'].rstrip('.'), 'source': 'AWS/{}'.format(self.account), 'comment': zone_data['Config']['Comment'] if 'Comment' in zone_data['Config'] else None, 'zone_id': zone_data['Id'], 'private_zone': zone_data['Config']['PrivateZone'], 'tags': self.__fetch_route53_zone_tags(zone_data['Id']) } return zones finally: del route53
def get_unattached_volumes(self): """Build a list of all volumes missing tags and not ignored. Returns a `dict` keyed by the issue_id with the volume as the value Returns: :obj:`dict` of `str`: `EBSVolume` """ volumes = {} ignored_tags = dbconfig.get('ignore_tags', self.ns) for volume in EBSVolume.get_all().values(): issue_id = get_resource_id('evai', volume.id) if len(volume.attachments) == 0: if len( list( filter( set(ignored_tags).__contains__, [tag.key for tag in volume.tags]))): continue volumes[issue_id] = volume return volumes
def run(self, *args, **kwargs): """Update the cache of all DNS entries and perform checks Args: *args: Optional list of arguments **kwargs: Optional list of keyword arguments Returns: None """ try: zones = list(DNSZone.get_all().values()) buckets = {k.lower(): v for k, v in S3Bucket.get_all().items()} dists = list(CloudFrontDist.get_all().values()) ec2_public_ips = [x.public_ip for x in EC2Instance.get_all().values() if x.public_ip] beanstalks = {x.cname.lower(): x for x in BeanStalk.get_all().values()} existing_issues = DomainHijackIssue.get_all() issues = [] # List of different types of domain audits auditors = [ ElasticBeanstalkAudit(beanstalks), S3Audit(buckets), S3WithoutEndpointAudit(buckets), EC2PublicDns(ec2_public_ips), ] # region Build list of active issues for zone in zones: for record in zone.records: for auditor in auditors: if auditor.match(record): issues.extend(auditor.audit(record, zone)) for dist in dists: for org in dist.origins: if org['type'] == 's3': bucket = self.return_resource_name(org['source'], 's3') if bucket not in buckets: key = '{} ({})'.format(bucket, dist.type) issues.append({ 'key': key, 'value': 'S3Bucket {} doesnt exist on any known account. Referenced by {} on {}'.format( bucket, dist.domain_name, dist.account, ) }) # endregion # region Process new, old, fixed issue lists old_issues = {} new_issues = {} fixed_issues = [] for data in issues: issue_id = get_resource_id('dhi', ['{}={}'.format(k, v) for k, v in data.items()]) if issue_id in existing_issues: issue = existing_issues[issue_id] if issue.update({'state': 'EXISTING', 'end': None}): db.session.add(issue.issue) old_issues[issue_id] = issue else: properties = { 'issue_hash': issue_id, 'state': 'NEW', 'start': datetime.now(), 'end': None, 'source': data['key'], 'description': data['value'] } new_issues[issue_id] = DomainHijackIssue.create(issue_id, properties=properties) db.session.commit() for issue in list(existing_issues.values()): if issue.id not in new_issues and issue.id not in old_issues: fixed_issues.append(issue.to_json()) db.session.delete(issue.issue) # endregion # Only alert if its been more than a day since the last alert alert_cutoff = datetime.now() - timedelta(hours=self.alert_frequency) old_alerts = [] for issue_id, issue in old_issues.items(): if issue.last_alert and issue.last_alert < alert_cutoff: if issue.update({'last_alert': datetime.now()}): db.session.add(issue.issue) old_alerts.append(issue) db.session.commit() self.notify( [x.to_json() for x in new_issues.values()], [x.to_json() for x in old_alerts], fixed_issues ) finally: db.session.rollback()