def process(self, i, router, **kwargs): if i.itype != 'fqdn': return if 'whitelist' not in i.tags: return urls = [] for p in ['http://', 'https://']: urls.append('{}{}'.format(p, i.indicator)) if not i.indicator.startswith('www.'): urls.append('{}www.{}'.format(p, i.indicator)) for u in urls: url = Indicator(**i.__dict__()) url.indicator = u try: resolve_itype(url.indicator) except InvalidIndicator as e: self.logger.error(url) self.logger.error(e) else: url.tags = ['whitelist', 'hunter'] url.itype = 'url' url.rdata = i.indicator url.lasttime = url.reporttime = arrow.utcnow() router.indicators_create(url)
def process(self, i, router, **kwargs): if i.itype != 'fqdn': return if 'search' in i.tags: return try: r = resolve_ns(i.indicator) except Timeout: self.logger.info('timeout trying to resolve: {}'.format( i.indicator)) return for rr in r: rr = str(rr) if rr in ["", 'localhost', '0.0.0.0']: continue ip = Indicator(**i.__dict__()) ip.lasttime = ip.reporttime = arrow.utcnow() ip.indicator = rr try: resolve_itype(ip.indicator) except InvalidIndicator as e: self.logger.error(ip) self.logger.error(e) else: ip.itype = 'ipv4' ip.rdata = i.indicator ip.tags = ['pdns', 'hunter'] ip.confidence = 10 router.indicators_create(ip) self.logger.debug("FQDN Hunter: {}".format(ip))
def process(self, i, router): if i.itype != 'fqdn': return if 'search' in i.tags: return try: r = resolve_ns(i.indicator) except Timeout: self.logger.info('timeout trying to resolve: {}'.format( i.indicator)) return for rr in r: if str(rr).rstrip('.') in ["", 'localhost']: continue ip = Indicator(**i.__dict__()) ip.indicator = str(rr) ip.lasttime = arrow.utcnow() try: resolve_itype(ip.indicator) except InvalidIndicator as e: self.logger.error(ip) self.logger.error(e) else: ip.itype = 'ipv4' ip.rdata = i.indicator ip.confidence = (ip.confidence - 4) if ip.confidence >= 4 else 0 router.indicators_create(ip)
def process(self, i, router): if i.itype != 'fqdn': return if 'search' in i.tags: return if not i.is_subdomain(): return fqdn = Indicator(**i.__dict__()) fqdn.indicator = i.is_subdomain() fqdn.lasttime = fqdn.reporttime = arrow.utcnow() try: resolve_itype(fqdn.indicator) except InvalidIndicator as e: self.logger.error(fqdn) self.logger.error(e) else: fqdn.confidence = (fqdn.confidence - 3) if fqdn.confidence >= 3 else 0 if 'hunter' not in fqdn.tags: fqdn.tags.append('hunter') router.indicators_create(fqdn)
def process(self, i, router): if i.itype != 'url': return if 'search' in i.tags: return u = urlparse(i.indicator) if not u.hostname: return try: resolve_itype(u.hostname) except InvalidIndicator as e: self.logger.error(u.hostname) self.logger.error(e) else: fqdn = Indicator(**i.__dict__()) fqdn.lasttime = arrow.utcnow() fqdn.indicator = u.hostname fqdn.itype = 'fqdn' fqdn.confidence = (int(fqdn.confidence) / 2) fqdn.rdata = i.indicator self.logger.debug('sending to router: {}'.format(fqdn)) router.indicators_create(fqdn)
def process(self, i, router): if i.itype != 'fqdn': return try: r = resolve_ns(i.indicator, t='CNAME') except Timeout: self.logger.info('timeout trying to resolve: {}'.format(i.indicator)) r = [] for rr in r: # http://serverfault.com/questions/44618/is-a-wildcard-cname-dns-record-valid rr = str(rr).rstrip('.').lstrip('*.') if rr in ['', 'localhost']: continue fqdn = Indicator(**i.__dict__()) fqdn.indicator = rr fqdn.lasttime = arrow.utcnow() try: resolve_itype(fqdn.indicator) except InvalidIndicator as e: self.logger.error(fqdn) self.logger.error(e) return fqdn.itype = 'fqdn' fqdn.confidence = (fqdn.confidence - 1) router.indicators_create(fqdn)
def process(self, i, router): if i.itype != 'fqdn': return if 'search' in i.tags: return try: r = resolve_ns(i.indicator, t='MX') except Timeout: self.logger.info('timeout trying to resolve MX for: {}'.format(i.indicator)) return for rr in r: rr = re.sub(r'^\d+ ', '', str(rr)) fqdn = Indicator(**i.__dict__()) fqdn.indicator = rr.rstrip('.') try: resolve_itype(fqdn.indicator) except InvalidIndicator as e: if not str(e).startswith('unknown itype for "localhost"'): self.logger.error(fqdn) self.logger.error(e) else: fqdn.itype = 'fqdn' fqdn.rdata = i.indicator fqdn.confidence = (int(fqdn.confidence) / 6) router.indicators_create(fqdn)
def process(self, i, router, **kwargs): if i.itype != 'url': return if 'search' in i.tags: return # prevent recursion with fqdn_wl hunter if ('whitelist') in i.tags and (i.rdata is not None or i.rdata != ''): return u = urlparse(i.indicator) if not u.hostname: return try: resolve_itype(u.hostname) except InvalidIndicator as e: self.logger.error(u.hostname) self.logger.error(e) else: fqdn = Indicator(**i.__dict__()) fqdn.lasttime = fqdn.reporttime = arrow.utcnow() fqdn.indicator = u.hostname fqdn.itype = 'fqdn' if 'hunter' not in fqdn.tags: fqdn.tags.append('hunter') fqdn.confidence = (int(fqdn.confidence) / 2) fqdn.rdata = i.indicator self.logger.debug('[Hunter: Url] sending to router {}'.format(fqdn)) router.indicators_create(fqdn)
def process(self, i, router): if i.itype != 'fqdn': return if 'search' in i.tags: return try: r = resolve_ns(i.indicator) except Timeout: self.logger.info('timeout trying to resolve: {}'.format(i.indicator)) return for rr in r: if str(rr).rstrip('.') in ["", 'localhost']: continue ip = Indicator(**i.__dict__()) ip.indicator = str(rr) ip.lasttime = arrow.utcnow() try: resolve_itype(ip.indicator) except InvalidIndicator as e: self.logger.error(ip) self.logger.error(e) else: ip.itype = 'ipv4' ip.rdata = i.indicator ip.confidence = (ip.confidence - 4) if ip.confidence >= 4 else 0 router.indicators_create(ip)
def process(self, i, router): if i.itype != 'fqdn': return if 'search' in i.tags: return try: r = resolve_ns(i.indicator, t='CNAME') except Timeout: self.logger.info('timeout trying to resolve: {}'.format(i.indicator)) r = [] for rr in r: # http://serverfault.com/questions/44618/is-a-wildcard-cname-dns-record-valid rr = str(rr).rstrip('.').lstrip('*.') if rr in ['', 'localhost', '0.0.0.0']: continue fqdn = Indicator(**i.__dict__()) fqdn.indicator = rr fqdn.lasttime = arrow.utcnow() try: resolve_itype(fqdn.indicator) except InvalidIndicator as e: self.logger.error(fqdn) self.logger.error(e) return fqdn.itype = 'fqdn' fqdn.confidence = (fqdn.confidence - 1) router.indicators_create(fqdn)
def process(self, i, router): if i.itype != 'fqdn': return if 'search' in i.tags: return try: r = resolve_ns(i.indicator) except Timeout: self.logger.info('timeout trying to resolve: {}'.format( i.indicator)) return for rr in r: ip = Indicator(**i.__dict__()) ip.indicator = str(rr) try: resolve_itype(ip.indicator) except InvalidIndicator as e: self.logger.error(ip) self.logger.error(e) else: ip.itype = 'ipv4' ip.rdata = i.indicator ip.confidence = (int(ip.confidence) / 4) router.indicators_create(ip) # also create a passive dns tag ip.tags = 'pdns' ip.confidence = 10 router.indicators_create(ip)
def process(self, i, router): if i.itype != 'fqdn': return try: r = resolve_ns(i.indicator, t='CNAME') except Timeout: self.logger.info('timeout trying to resolve: {}'.format( i.indicator)) r = [] for rr in r: fqdn = Indicator(**i.__dict__()) fqdn.indicator = str(rr).rstrip('.') try: resolve_itype(fqdn.indicator) except InvalidIndicator as e: self.logger.error(fqdn) self.logger.error(e) return fqdn.itype = 'fqdn' fqdn.confidence = (int(fqdn.confidence) / 2) router.indicators_create(fqdn)
def process(self, i, router, **kwargs): if i.itype != 'fqdn': return if 'search' in i.tags: return try: r = resolve_ns(i.indicator, t='MX') except Timeout: self.logger.info('timeout trying to resolve MX for: {}'.format( i.indicator)) return try: for rr in r: rr = re.sub(r'^\d+ ', '', str(rr)) rr = str(rr).rstrip('.') if rr in ["", 'localhost', '0.0.0.0']: continue elif re.match('^\d+$', rr) or re.match(r'^.{0,3}$', rr): # exclude spurious entries like those too short to be real continue fqdn = Indicator(**i.__dict__()) fqdn.indicator = rr.rstrip('.') fqdn.lasttime = fqdn.reporttime = arrow.utcnow() try: resolve_itype(fqdn.indicator) except InvalidIndicator as e: self.logger.info(fqdn) self.logger.info(e) else: fqdn.itype = 'fqdn' if 'hunter' not in fqdn.tags: fqdn.tags.append('hunter') fqdn.rdata = '{} mx'.format(i.indicator) fqdn.confidence = (fqdn.confidence - 5) if fqdn.confidence >= 5 else 0 router.indicators_create(fqdn) self.logger.debug("FQDN MX Hunter: {}".format(fqdn)) except Exception as e: self.logger.error( '[Hunter: FqdnMx] {}: giving up on rr {} from indicator {}'. format(e, rr, i))
def process(self, i, router, **kwargs): if i.itype != 'fqdn': return if 'search' in i.tags: return try: r = resolve_ns(i.indicator, t='NS') except Timeout: self.logger.info('timeout trying to resolve: {}'.format(i.indicator)) return for rr in r: rr = str(rr).rstrip('.') if rr in ["", 'localhost', '0.0.0.0']: continue i_ns = Indicator(**i.__dict__()) i_ns.indicator = rr try: i_ns_itype = resolve_itype(i_ns.indicator) except InvalidIndicator as e: self.logger.error(i_ns) self.logger.error(e) else: i_ns.lasttime = i_ns.reporttime = arrow.utcnow() i_ns.itype = i_ns_itype i_ns.rdata = "{} nameserver".format(i.indicator) if 'hunter' not in i_ns.tags: i_ns.tags.append('hunter') i_ns.confidence = (i_ns.confidence - 4) if i_ns.confidence >= 4 else 0 router.indicators_create(i_ns) self.logger.debug("FQDN NS Hunter: {}".format(i_ns))
def indicators_create(self, data): index = self._create_index() doc = data.__dict__() del doc['version'] doc['meta'] = {} doc['meta']['index'] = index if resolve_itype(data.indicator) == 'ipv4': match = re.search('^(\S+)\/(\d+)$', data.indicator) if match: doc['indicator_ipv4'] = match.group(1) doc['indicator_ipv4_mask'] = match.group(2) else: doc['indicator_ipv4'] = data.indicator if type(data.group) != list: doc['group'] = [data.group] logger.debug(doc) i = Indicator(**doc) logger.debug(i) if i.save(): return i.__dict__['_d_'] else: raise RuntimeError('unable to save')
def indicators_create(self, token, data): index = self._create_index() self.logger.debug('index: {}'.format(index)) data['meta'] = {} data['meta']['index'] = index if resolve_itype(data['indicator']) == 'ipv4': import re match = re.search('^(\S+)\/(\d+)$', data['indicator']) if match: data['indicator_ipv4'] = match.group(1) data['indicator_ipv4_mask'] = match.group(2) else: data['indicator_ipv4'] = data['indicator'] if type(data['group']) != list: data['group'] = [data['group']] self.logger.debug(data) i = Indicator(**data) self.logger.debug(i) if i.save(): return i.__dict__['_d_'] else: raise AuthError('invalid token')
def process(self, i, router): if i.itype != 'fqdn': return if 'search' in i.tags: return try: r = resolve_ns(i.indicator, t='MX') except Timeout: self.logger.info('timeout trying to resolve MX for: {}'.format( i.indicator)) return try: for rr in r: rr = re.sub(r'^\d+ ', '', str(rr)) rr = str(rr).rstrip('.') if rr in ["", 'localhost', '0.0.0.0']: continue fqdn = Indicator(**i.__dict__()) fqdn.indicator = rr.rstrip('.') fqdn.lasttime = fqdn.reporttime = arrow.utcnow() # 10 if re.match('^\d+$', rr): return try: resolve_itype(fqdn.indicator) except InvalidIndicator as e: self.logger.info(fqdn) self.logger.info(e) else: fqdn.itype = 'fqdn' if 'hunter' not in fqdn.tags: fqdn.tags.append('hunter') fqdn.rdata = i.indicator fqdn.confidence = (fqdn.confidence - 5) if fqdn.confidence >= 5 else 0 router.indicators_create(fqdn) except Exception as e: self.logger.error( '[Hunter: FqdnMx] {}: giving up on indicator {}'.format(e, rr))
def process(i): return if not ENABLED: return if i.itype != 'fqdn': return if 'search' in i.tags: return try: r = resolve_ns(i.indicator, t='MX') if not r: return except Timeout: return rv = [] for rr in r: rr = re.sub(r'^\d+ ', '', str(rr)) rr = str(rr).rstrip('.') if rr in ["", 'localhost']: continue # 10 if re.match('^\d+$', rr): continue fqdn = Indicator(**i.__dict__()) fqdn.probability = 0 fqdn.indicator = rr.rstrip('.') fqdn.lasttime = arrow.utcnow() try: resolve_itype(fqdn.indicator) except: continue fqdn.itype = 'fqdn' fqdn.rdata = i.indicator fqdn.confidence = 0 rv.append(fqdn) return rv
def process(i): if not ENABLED: return if i.itype != 'fqdn': return try: r = resolve_ns(i.indicator) if not r: return except Timeout: return rv = [] for rr in r: rr = str(rr) if rr in ["", 'localhost']: continue ip = Indicator(**i.__dict__()) ip.lasttime = arrow.utcnow() ip.indicator = rr try: resolve_itype(ip.indicator) except: continue ip.itype = 'ipv4' ip.rdata = i.indicator ip.confidence = 1 ip.probability = 0 rv.append(ip) pdns = Indicator(**copy.deepcopy(i.__dict__())) # also create a passive dns tag pdns.tags = 'pdns' pdns.confidence = 4 pdns.probability = i.probability pdns.indicator = ip.indicator pdns.rdata = i.indicator rv.append(pdns) return rv
def _filter_indicator(self, filters, s): for k, v in list(filters.items()): if k not in VALID_FILTERS: del filters[k] if not filters.get('indicator'): return s i = filters.pop('indicator') if PYVERSION == 2: if isinstance(i, str): i = unicode(i) try: itype = resolve_itype(i) except InvalidIndicator as e: logger.error(e) s = s.join(Message).filter(Indicator.Message.like( '%{}%'.format(i))) return s if itype in ['fqdn', 'email', 'url']: s = s.filter(Indicator.indicator == i) return s if itype == 'ipv4': ip = ipaddress.IPv4Network(i) mask = ip.prefixlen if mask < 8: raise InvalidSearch('prefix needs to be >= 8') start = str(ip.network_address) end = str(ip.broadcast_address) logger.debug('{} - {}'.format(start, end)) s = s.join(Ipv4).filter(Ipv4.ipv4 >= start) s = s.filter(Ipv4.ipv4 <= end) return s if itype == 'ipv6': ip = ipaddress.IPv6Network(i) mask = ip.prefixlen if mask < 32: raise InvalidSearch('prefix needs to be >= 32') start = str(ip.network_address) end = str(ip.broadcast_address) logger.debug('{} - {}'.format(start, end)) s = s.join(Ipv6).filter(Ipv6.ip >= start) s = s.filter(Ipv6.ip <= end) return s raise InvalidIndicator
def indicators_search(self, token, filters): # build filters with elasticsearch-dsl # http://elasticsearch-dsl.readthedocs.org/en/latest/search_dsl.html limit = filters.get('limit') if limit: del filters['limit'] else: limit = LIMIT nolog = filters.get('nolog') if nolog: del filters['nolog'] timeout = TIMEOUT s = Indicator.search() s = s.params(size=limit, timeout=timeout) #s = s.sort('-reporttime') q_filters = {} for f in VALID_FILTERS: if filters.get(f): q_filters[f] = filters[f] if q_filters.get('indicator'): itype = resolve_itype(q_filters['indicator']) if itype == 'ipv4': ip = ipaddress.IPv4Network(q_filters['indicator']) mask = ip.prefixlen if mask < 8: raise InvalidSearch( 'prefix needs to be greater than or equal to 8') start = str(ip.network_address) end = str(ip.broadcast_address) s = s.filter('range', indicator_ipv4={ 'gte': start, 'lte': end }) del q_filters['indicator'] for f in q_filters: kwargs = {f: q_filters[f]} s = s.filter('term', **kwargs) try: rv = s.execute() except elasticsearch.exceptions.RequestError as e: self.logger.error(e) return [] try: return [x['_source'] for x in rv.hits.hits] except KeyError: return []
def process(self, i, router): if i.itype == 'url': u = urlparse(i.indicator) if u.hostname: try: resolve_itype(u.hostname) except InvalidIndicator as e: self.logger.error(u.hostname) self.logger.error(e) else: fqdn = Indicator(**i.__dict__()) fqdn.indicator = u.hostname fqdn.itype = 'fqdn' fqdn.confidence = (int(fqdn.confidence) / 2) fqdn.rdata = i.indicator self.logger.debug('sending to router..') router.indicators_create(fqdn)
def process(self, i, router): if i.itype == 'url': u = urlparse(i.indicator) if u.netloc: try: resolve_itype(u.netloc) except InvalidIndicator as e: self.logger.error(u.netloc) self.logger.error(e) else: fqdn = Indicator(**i.__dict__()) fqdn.indicator = u.netloc fqdn.itype = 'fqdn' fqdn.confidence = (int(fqdn.confidence) / 2) fqdn.rdata = i.indicator self.logger.debug('sending to router..') router.indicators_create(fqdn)
def process(i): if i.itype != 'fqdn': return if not i.is_subdomain(): return fqdn = Indicator(**i.__dict__()) fqdn.probability = 0 fqdn.indicator = i.is_subdomain() fqdn.lasttime = arrow.utcnow() try: resolve_itype(fqdn.indicator) except: return fqdn.confidence = 1 return fqdn
def process(self, i, router, **kwargs): if i.itype != 'fqdn': return if 'search' in i.tags: return try: r = resolve_ns(i.indicator, t='CNAME') except Timeout: self.logger.info('timeout trying to resolve: {}'.format( i.indicator)) r = [] for rr in r: # http://serverfault.com/questions/44618/is-a-wildcard-cname-dns-record-valid rr = str(rr).rstrip('.').lstrip('*.') if rr in ['', 'localhost', '0.0.0.0']: continue fqdn = Indicator(**i.__dict__()) fqdn.indicator = rr fqdn.lasttime = fqdn.reporttime = arrow.utcnow() try: resolve_itype(fqdn.indicator) except InvalidIndicator as e: self.logger.error(fqdn) self.logger.error(e) return fqdn.itype = 'fqdn' fqdn.rdata = '{} cname'.format(i.indicator) if 'hunter' not in fqdn.tags: fqdn.tags.append('hunter') if fqdn.confidence < 8: fqdn.confidence -= 1 else: fqdn.confidence = 7 router.indicators_create(fqdn) self.logger.debug("FQDN CNAME Hunter: {}".format(fqdn))
def process(self, i, router): if i.itype != 'fqdn': return if 'search' in i.tags: return if not i.is_subdomain(): return fqdn = Indicator(**i.__dict__()) fqdn.indicator = i.is_subdomain() try: resolve_itype(fqdn.indicator) except InvalidIndicator as e: self.logger.error(fqdn) self.logger.error(e) else: fqdn.confidence = (int(fqdn.confidence) / 3) router.indicators_create(fqdn)
def indicators_search(self, token, filters): # build filters with elasticsearch-dsl # http://elasticsearch-dsl.readthedocs.org/en/latest/search_dsl.html limit = filters.get('limit') if limit: del filters['limit'] else: limit = LIMIT nolog = filters.get('nolog') if nolog: del filters['nolog'] timeout = TIMEOUT s = Indicator.search() s = s.params(size=limit, timeout=timeout) #s = s.sort('-reporttime') q_filters = {} for f in VALID_FILTERS: if filters.get(f): q_filters[f] = filters[f] if q_filters.get('indicator'): itype = resolve_itype(q_filters['indicator']) if itype == 'ipv4': ip = ipaddress.IPv4Network(q_filters['indicator']) mask = ip.prefixlen if mask < 8: raise InvalidSearch('prefix needs to be greater than or equal to 8') start = str(ip.network_address) end = str(ip.broadcast_address) s = s.filter('range', indicator_ipv4={'gte': start, 'lte': end}) del q_filters['indicator'] for f in q_filters: kwargs = {f: q_filters[f]} s = s.filter('term', **kwargs) try: rv = s.execute() except elasticsearch.exceptions.RequestError as e: self.logger.error(e) return [] try: return [x['_source'] for x in rv.hits.hits] except KeyError: return []
def process(i): return if not ENABLED: return if i.itype != 'fqdn': return if 'search' in i.tags: return try: r = resolve_ns(i.indicator) if not r: return except Timeout: return rv = [] for rr in r: if str(rr).rstrip('.') in ["", 'localhost']: continue ip = Indicator(**i.__dict__()) ip.probability = 0 ip.indicator = str(rr) ip.lasttime = arrow.utcnow() try: resolve_itype(ip.indicator) except: continue ip.itype = 'ipv4' ip.rdata = i.indicator ip.confidence = 0 rv.append(ip) return rv
def process(self, i, router): if i.itype != 'fqdn': return if 'search' in i.tags: return if not i.is_subdomain(): return fqdn = Indicator(**i.__dict__()) fqdn.indicator = i.is_subdomain() fqdn.lasttime = arrow.utcnow() try: resolve_itype(fqdn.indicator) except InvalidIndicator as e: self.logger.error(fqdn) self.logger.error(e) else: fqdn.confidence = (fqdn.confidence - 3) if fqdn.confidence >= 3 else 0 router.indicators_create(fqdn)
def process(self, i, router): if i.itype != 'fqdn': return if 'search' in i.tags: return try: r = resolve_ns(i.indicator) except Timeout: self.logger.info('timeout trying to resolve: {}'.format( i.indicator)) return for rr in r: rr = str(rr) if rr in ["", 'localhost', '0.0.0.0']: continue ip = Indicator(**i.__dict__()) ip.lasttime = arrow.utcnow() ip.indicator = rr try: resolve_itype(ip.indicator) except InvalidIndicator as e: self.logger.error(ip) self.logger.error(e) else: ip.itype = 'ipv4' ip.rdata = i.indicator ip.confidence = (ip.confidence - 2) if ip.confidence >= 2 else 0 router.indicators_create(ip) # also create a passive dns tag ip.tags = 'pdns' ip.confidence = 10 router.indicators_create(ip)
def process(i): if not ENABLED: return if i.itype != 'fqdn': return try: r = resolve_ns(i.indicator, t='CNAME') if not r: return except Timeout: return rv = [] for rr in r: # http://serverfault.com/questions/44618/is-a-wildcard-cname-dns-record-valid rr = str(rr).rstrip('.').lstrip('*.') if rr in ['', 'localhost']: continue fqdn = Indicator(**i.__dict__()) fqdn.probability = 0 fqdn.indicator = rr fqdn.lasttime = arrow.utcnow() try: resolve_itype(fqdn.indicator) except: return fqdn.itype = 'fqdn' # keep avoid recursive cname lookups fqdn.confidence = int(fqdn.confidence / 2) if fqdn.confidence >= 2 else 0 rv.append(fqdn) return rv
def process(self, i, router): if i.itype != 'fqdn': return if 'search' in i.tags: return try: r = resolve_ns(i.indicator, t='MX') except Timeout: self.logger.info('timeout trying to resolve MX for: {}'.format(i.indicator)) return for rr in r: rr = re.sub(r'^\d+ ', '', str(rr)) rr = str(rr).rstrip('.') if rr in ["", 'localhost']: continue fqdn = Indicator(**i.__dict__()) fqdn.indicator = rr.rstrip('.') fqdn.lasttime = arrow.utcnow() # 10 if re.match('^\d+$', rr): return try: resolve_itype(fqdn.indicator) except InvalidIndicator as e: self.logger.info(fqdn) self.logger.info(e) else: fqdn.itype = 'fqdn' fqdn.rdata = i.indicator fqdn.confidence = (fqdn.confidence - 5) if fqdn.confidence >= 5 else 0 router.indicators_create(fqdn)
def process(self, i, router): if i.itype != 'url': return u = urlparse(i.indicator) if not u.hostname: return try: resolve_itype(u.hostname) except InvalidIndicator as e: self.logger.error(u.hostname) self.logger.error(e) else: fqdn = Indicator(**i.__dict__()) fqdn.lasttime = arrow.utcnow() fqdn.indicator = u.hostname fqdn.itype = 'fqdn' fqdn.confidence = (int(fqdn.confidence) / 2) fqdn.rdata = i.indicator self.logger.debug('sending to router: {}'.format(fqdn)) router.indicators_create(fqdn)
def process(i): if i.itype != 'url': return u = urlparse(i.indicator) if not u.hostname: return try: resolve_itype(u.hostname) except TypeError as e: logger.error(u.hostname) logger.error(e) return fqdn = Indicator(**i.__dict__()) fqdn.lasttime = arrow.utcnow() fqdn.indicator = u.hostname fqdn.itype = 'fqdn' fqdn.confidence = 2 fqdn.rdata = i.indicator fqdn.probability = 0 return fqdn
def indicators_create(self, data, raw=False): index = _create_index() self.logger.debug('index: {}'.format(index)) data['meta'] = {} data['meta']['index'] = index itype = resolve_itype(data['indicator']) if itype is 'ipv4': match = re.search('^(\S+)\/(\d+)$', data['indicator']) if match: data['indicator_ipv4'] = match.group(1) data['indicator_ipv4_mask'] = match.group(2) else: data['indicator_ipv4'] = data['indicator'] elif itype is 'ipv6': match = re.search('^(\S+)\/(\d+)$', data['indicator']) if match: data['indicator_ipv6'] = binascii.b2a_hex( socket.inet_pton(socket.AF_INET6, match.group(1))).decode('utf-8') data['indicator_ipv6_mask'] = match.group(2) else: data['indicator_ipv6'] = binascii.b2a_hex( socket.inet_pton(socket.AF_INET6, data['indicator'])).decode('utf-8') if type(data['group']) != list: data['group'] = [data['group']] if data.get('message'): try: data['message'] = str(b64decode(data['message'])) except (TypeError, binascii.Error) as e: pass i = Indicator(**data) if i.save(): if raw: return i else: return i.__dict__['_d_'] else: raise AuthError('invalid token')
def _filter_indicator(self, q_filters, s): if not q_filters.get('indicator'): return s i = q_filters.pop('indicator') try: itype = resolve_itype(i) except InvalidIndicator: s = s.query("match", message=i) return s if itype in ('email', 'url', 'fqdn'): s = s.filter('term', indicator=i) return s if itype is 'ipv4': ip = ipaddress.IPv4Network(i) mask = ip.prefixlen if mask < 8: raise InvalidSearch( 'prefix needs to be greater than or equal to 8') start = str(ip.network_address) end = str(ip.broadcast_address) s = s.filter('range', indicator_ipv4={'gte': start, 'lte': end}) return s if itype is 'ipv6': ip = ipaddress.IPv6Network(i) mask = ip.prefixlen if mask < 32: raise InvalidSearch( 'prefix needs to be greater than or equal to 32') start = binascii.b2a_hex( socket.inet_pton(socket.AF_INET6, str(ip.network_address))).decode('utf-8') end = binascii.b2a_hex( socket.inet_pton(socket.AF_INET6, str(ip.broadcast_address))).decode('utf-8') s = s.filter('range', indicator_ipv6={'gte': start, 'lte': end}) return s
def indicators_create(self, data, raw=False): index = _create_index() self.logger.debug('index: {}'.format(index)) data['meta'] = {} data['meta']['index'] = index itype = resolve_itype(data['indicator']) if itype is 'ipv4': match = re.search('^(\S+)\/(\d+)$', data['indicator']) if match: data['indicator_ipv4'] = match.group(1) data['indicator_ipv4_mask'] = match.group(2) else: data['indicator_ipv4'] = data['indicator'] elif itype is 'ipv6': match = re.search('^(\S+)\/(\d+)$', data['indicator']) if match: data['indicator_ipv6'] = binascii.b2a_hex(socket.inet_pton(socket.AF_INET6, match.group(1))).decode('utf-8') data['indicator_ipv6_mask'] = match.group(2) else: data['indicator_ipv6'] = binascii.b2a_hex(socket.inet_pton(socket.AF_INET6, data['indicator'])).decode('utf-8') if type(data['group']) != list: data['group'] = [data['group']] if data.get('message'): try: data['message'] = str(b64decode(data['message'])) except (TypeError, binascii.Error) as e: pass i = Indicator(**data) if i.save(): if raw: return i else: return i.__dict__['_d_'] else: raise AuthError('invalid token')
def _expand_ip_idx(self, data): itype = resolve_itype(data['indicator']) if itype is 'ipv4': match = re.search('^(\S+)\/(\d+)$', data['indicator']) if match: data['indicator_ipv4'] = match.group(1) data['indicator_ipv4_mask'] = match.group(2) else: data['indicator_ipv4'] = data['indicator'] elif itype is 'ipv6': match = re.search('^(\S+)\/(\d+)$', data['indicator']) if match: data['indicator_ipv6'] = binascii.b2a_hex( socket.inet_pton(socket.AF_INET6, match.group(1))).decode('utf-8') data['indicator_ipv6_mask'] = match.group(2) else: data['indicator_ipv6'] = binascii.b2a_hex( socket.inet_pton(socket.AF_INET6, data['indicator'])).decode('utf-8')
def process(self, i, router): if i.itype == 'fqdn': try: r = resolve_ns(i.indicator, t='CNAME') except Timeout: self.logger.info('timeout trying to resolve: {}'.format(i.indicator)) r = [] for rr in r: fqdn = Indicator(**i.__dict__()) fqdn.indicator = str(rr).rstrip('.') try: resolve_itype(fqdn.indicator) except InvalidIndicator as e: self.logger.error(fqdn) self.logger.error(e) else: fqdn.itype = 'fqdn' fqdn.confidence = (int(fqdn.confidence) / 2) router.indicators_create(fqdn) if i.is_subdomain(): fqdn = Indicator(**i.__dict__()) fqdn.indicator = i.is_subdomain() try: resolve_itype(fqdn.indicator) except InvalidIndicator as e: self.logger.error(fqdn) self.logger.error(e) else: fqdn.confidence = (int(fqdn.confidence) / 3) router.indicators_create(fqdn) try: r = resolve_ns(i.indicator) except Timeout: self.logger.info('timeout trying to resolve: {}'.format(i.indicator)) r = [] for rr in r: ip = Indicator(**i.__dict__()) ip.indicator = str(rr) try: resolve_itype(ip.indicator) except InvalidIndicator as e: self.logger.error(ip) self.logger.error(e) else: ip.itype = 'ipv4' ip.rdata = i.indicator ip.confidence = (int(ip.confidence) / 4) router.indicators_create(ip) try: r = resolve_ns(i.indicator, t='NS') except Timeout: self.logger.info('timeout trying to resolve NS for: {}'.format(i.indicator)) r = [] for rr in r: fqdn = Indicator(**i.__dict__()) fqdn.indicator = str(rr).rstrip('.') try: resolve_itype(fqdn.indicator) except InvalidIndicator as e: self.logger.error(fqdn) self.logger.error(e) else: fqdn.itype = 'fqdn' fqdn.rdata = i.indicator fqdn.confidence = (int(fqdn.confidence) / 5) router.indicators_create(fqdn) try: r = resolve_ns(i.indicator, t='MX') except Timeout: self.logger.info('timeout trying to resolve MX for: {}'.format(i.indicator)) r = [] for rr in r: rr = re.sub(r'^\d+ ', '', str(rr)) fqdn = Indicator(**i.__dict__()) fqdn.indicator = rr.rstrip('.') try: resolve_itype(fqdn.indicator) except InvalidIndicator as e: if not str(e).startswith('unknown itype for "localhost"'): self.logger.error(fqdn) self.logger.error(e) else: fqdn.itype = 'fqdn' fqdn.rdata = i.indicator fqdn.confidence = (int(fqdn.confidence) / 6) router.indicators_create(fqdn)
def indicators_upsert(self, data): if type(data) == dict: data = [data] s = self.handle() n = 0 tmp_added = {} for d in data: self.logger.debug(d) tags = d.get("tags", []) if len(tags) > 0: if isinstance(tags, basestring): if '.' in tags: tags = tags.split(',') else: tags = [str(tags)] del d['tags'] i = s.query(Indicator).filter_by( indicator=d['indicator'], provider=d['provider'], ).order_by(Indicator.lasttime.desc()) if i.count() > 0: r = i.first() if d.get('lasttime') and arrow.get(d['lasttime']).datetime > arrow.get(r.lasttime).datetime: self.logger.debug('{} {}'.format(arrow.get(r.lasttime).datetime, arrow.get(d['lasttime']).datetime)) self.logger.debug('upserting: %s' % d['indicator']) r.count += 1 r.lasttime = arrow.get(d['lasttime']).datetime.replace(tzinfo=None) if not d.get('reporttime'): d['reporttime'] = arrow.utcnow().datetime r.reporttime = arrow.get(d['reporttime']).datetime.replace(tzinfo=None) if d.get('message'): try: d['message'] = b64decode(d['message']) except Exception as e: pass m = Message(message=d['message'], indicator=r) s.add(m) n += 1 else: self.logger.debug('skipping: %s' % d['indicator']) else: if tmp_added.get(d['indicator']): if d['lasttime'] in tmp_added[d['indicator']]: self.logger.debug('skipping: %s' % d['indicator']) continue else: tmp_added[d['indicator']] = set() if not d.get('lasttime'): d['lasttime'] = arrow.utcnow().datetime.replace(tzinfo=None) if not d.get('reporttime'): d['reporttime'] = arrow.utcnow().datetime.replace(tzinfo=None) if PYVERSION == 2: d['lasttime'] = arrow.get(d['lasttime']).datetime.replace(tzinfo=None) d['reporttime'] = arrow.get(d['reporttime']).datetime.replace(tzinfo=None) if not d.get('firsttime'): d['firsttime'] = d['lasttime'] ii = Indicator(**d) s.add(ii) itype = resolve_itype(d['indicator']) if itype is 'ipv4': match = re.search('^(\S+)\/(\d+)$', d['indicator']) # TODO -- use ipaddress if match: ipv4 = Ipv4(ipv4=match.group(1), mask=match.group(2), indicator=ii) else: ipv4 = Ipv4(ipv4=d['indicator'], indicator=ii) s.add(ipv4) elif itype is 'ipv6': match = re.search('^(\S+)\/(\d+)$', d['indicator']) # TODO -- use ipaddress if match: ip = Ipv6(ip=match.group(1), mask=match.group(2), indicator=ii) else: ip = Ipv6(ip=d['indicator'], indicator=ii) s.add(ip) for t in tags: t = Tag(tag=t, indicator=ii) s.add(t) if d.get('message'): try: d['message'] = b64decode(d['message']) except Exception as e: pass m = Message(message=d['message'], indicator=ii) s.add(m) n += 1 tmp_added[d['indicator']].add(d['lasttime']) self.logger.debug('committing') s.commit() return n
def indicators_search(self, filters, limit=500): self.logger.debug('running search') if filters.get('limit'): limit = filters['limit'] del filters['limit'] if filters.get('nolog'): del filters['nolog'] q_filters = {} for f in VALID_FILTERS: if filters.get(f): q_filters[f] = filters[f] s = self.handle().query(Indicator) filters = q_filters sql = [] if filters.get('indicator'): try: itype = resolve_itype(filters['indicator']) self.logger.debug('itype %s' % itype) if itype == 'ipv4': if PYVERSION < 3 and (filters['indicator'], str): filters['indicator'] = filters['indicator'].decode('utf-8') ip = ipaddress.IPv4Network(filters['indicator']) mask = ip.prefixlen if mask < 8: raise InvalidSearch('prefix needs to be >= 8') start = str(ip.network_address) end = str(ip.broadcast_address) self.logger.debug('{} - {}'.format(start, end)) s = s.join(Ipv4).filter(Ipv4.ipv4 >= start) s = s.filter(Ipv4.ipv4 <= end) elif itype == 'ipv6': if PYVERSION < 3 and (filters['indicator'], str): filters['indicator'] = filters['indicator'].decode('utf-8') ip = ipaddress.IPv6Network(filters['indicator']) mask = ip.prefixlen if mask < 32: raise InvalidSearch('prefix needs to be >= 32') start = str(ip.network_address) end = str(ip.broadcast_address) self.logger.debug('{} - {}'.format(start, end)) s = s.join(Ipv6).filter(Ipv6.ip >= start) s = s.filter(Ipv6.ip <= end) elif itype in ('fqdn', 'email', 'url'): sql.append("indicator = '{}'".format(filters['indicator'])) except InvalidIndicator as e: self.logger.error(e) sql.append("message LIKE '%{}%'".format(filters['indicator'])) s = s.join(Message) del filters['indicator'] for k in filters: if k == 'reporttime': sql.append("{} >= '{}'".format('reporttime', filters[k])) elif k == 'reporttimeend': sql.append("{} <= '{}'".format('reporttime', filters[k])) elif k == 'tags': sql.append("tags.tag == '{}'".format(filters[k])) elif k == 'confidence': sql.append("{} >= '{}'".format(k, filters[k])) else: sql.append("{} = '{}'".format(k, filters[k])) sql = ' AND '.join(sql) if sql: self.logger.debug(sql) if filters.get('tags'): s = s.join(Tag) rv = s.order_by(desc(Indicator.reporttime)).filter(sql).limit(limit) return [self._as_dict(x) for x in rv]
def _filter_indicator(self, filters, s): for k, v in list(filters.items()): if k not in VALID_FILTERS: del filters[k] if not filters.get('indicator'): return s i = filters.pop('indicator') if PYVERSION == 2: if isinstance(i, str): i = unicode(i) try: itype = resolve_itype(i) except InvalidIndicator as e: logger.error(e) s = s.join(Message).filter(Indicator.Message.like('%{}%'.format(i))) return s if itype in ['email']: s = s.filter(Indicator.indicator == i) return s if itype == 'ipv4': ip = ipaddress.IPv4Network(i) mask = ip.prefixlen if mask < 8: raise InvalidSearch('prefix needs to be >= 8') start = str(ip.network_address) end = str(ip.broadcast_address) logger.debug('{} - {}'.format(start, end)) s = s.join(Ipv4).filter(Ipv4.ipv4 >= start) s = s.filter(Ipv4.ipv4 <= end) return s if itype == 'ipv6': ip = ipaddress.IPv6Network(i) mask = ip.prefixlen if mask < 32: raise InvalidSearch('prefix needs to be >= 32') start = str(ip.network_address) end = str(ip.broadcast_address) logger.debug('{} - {}'.format(start, end)) s = s.join(Ipv6).filter(Ipv6.ip >= start) s = s.filter(Ipv6.ip <= end) return s if itype == 'fqdn': s = s.join(Fqdn).filter(or_( Fqdn.fqdn.like('%.{}'.format(i)), Fqdn.fqdn == str(i)) ) return s if itype == 'url': s = s.join(Url).filter(Url.url == i) return s if itype in HASH_TYPES: s = s.join(Hash).filter(Hash.hash == str(i)) return s raise InvalidIndicator
def upsert_indicators(self, s, n, d, token, tmp_added, batch): try: n += 1 if not d.get('group'): raise InvalidIndicator('missing group') if isinstance(d['group'], list): d['group'] = d['group'][0] # raises AuthError if invalid group self._check_token_groups(token, d) if PYVERSION == 2: if isinstance(d['indicator'], str): d['indicator'] = unicode(d['indicator']) self.test_valid_indicator(d) tags = d.get("tags", []) if len(tags) > 0: if isinstance(tags, basestring): tags = tags.split(',') del d['tags'] i = s.query(Indicator).options(lazyload('*')).filter_by( provider=d['provider'], itype=d['itype'], indicator=d['indicator'] ).order_by(Indicator.lasttime.desc()) if d.get('rdata'): i = i.filter_by(rdata=d['rdata']) if d['itype'] == 'ipv4': match = re.search('^(\S+)\/(\d+)$', d['indicator']) # TODO -- use ipaddress if match: i = i.join(Ipv4).filter(Ipv4.ipv4 == match.group(1), Ipv4.mask == match.group(2)) else: i = i.join(Ipv4).filter(Ipv4.ipv4 == d['indicator']) if d['itype'] == 'ipv6': match = re.search('^(\S+)\/(\d+)$', d['indicator']) # TODO -- use ipaddress if match: i = i.join(Ipv6).filter(Ipv6.ip == match.group(1), Ipv6.mask == match.group(2)) else: i = i.join(Ipv6).filter(Ipv6.ip == d['indicator']) if d['itype'] == 'fqdn': i = i.join(Fqdn).filter(Fqdn.fqdn == d['indicator']) if d['itype'] == 'url': i = i.join(Url).filter(Url.url == d['indicator']) if d['itype'] in HASH_TYPES: i = i.join(Hash).filter(Hash.hash == d['indicator']) if len(tags): i = i.join(Tag).filter(Tag.tag == tags[0]) r = i.first() if r: if not d.get('lasttime') or d.get('lasttime') == None: # If no lasttime submitted, presume a lasttime value of now d['lasttime'] = arrow.utcnow().datetime if d.get('lasttime') and arrow.get(d['lasttime']).datetime > arrow.get(r.lasttime).datetime: logger.debug('{} {}'.format(arrow.get(r.lasttime).datetime, arrow.get(d['lasttime']).datetime)) logger.debug('upserting: %s' % d['indicator']) r.count += 1 r.lasttime = arrow.get(d['lasttime']).datetime.replace(tzinfo=None) if not d.get('reporttime'): d['reporttime'] = arrow.utcnow().datetime r.reporttime = arrow.get(d['reporttime']).datetime.replace(tzinfo=None) if d.get('message'): try: d['message'] = b64decode(d['message']) except Exception as e: pass m = Message(message=d['message'], indicator=r) s.add(m) else: logger.debug('skipping: %s' % d['indicator']) n -= 1 else: if tmp_added.get(d['indicator']): if d.get('lasttime') in tmp_added[d['indicator']]: logger.debug('skipping: %s' % d['indicator']) n -= 1 return n else: tmp_added[d['indicator']] = set() if not d.get('lasttime'): d['lasttime'] = arrow.utcnow().datetime.replace(tzinfo=None) if not d.get('reporttime'): d['reporttime'] = arrow.utcnow().datetime.replace(tzinfo=None) if PYVERSION == 2: d['lasttime'] = arrow.get(d['lasttime']).datetime.replace(tzinfo=None) d['reporttime'] = arrow.get(d['reporttime']).datetime.replace(tzinfo=None) if not d.get('firsttime'): d['firsttime'] = d['lasttime'] ii = Indicator(**d) logger.debug('inserting: %s' % d['indicator']) s.add(ii) itype = resolve_itype(d['indicator']) if itype is 'ipv4': match = re.search('^(\S+)\/(\d+)$', d['indicator']) # TODO -- use ipaddress if match: ipv4 = Ipv4(ipv4=match.group(1), mask=match.group(2), indicator=ii) else: ipv4 = Ipv4(ipv4=d['indicator'], indicator=ii) s.add(ipv4) elif itype is 'ipv6': match = re.search('^(\S+)\/(\d+)$', d['indicator']) # TODO -- use ipaddress if match: ip = Ipv6(ip=match.group(1), mask=match.group(2), indicator=ii) else: ip = Ipv6(ip=d['indicator'], indicator=ii) s.add(ip) if itype is 'fqdn': fqdn = Fqdn(fqdn=d['indicator'], indicator=ii) s.add(fqdn) if itype is 'url': url = Url(url=d['indicator'], indicator=ii) s.add(url) if itype in HASH_TYPES: h = Hash(hash=d['indicator'], indicator=ii) s.add(h) for t in tags: t = Tag(tag=t, indicator=ii) s.add(t) if d.get('message'): try: d['message'] = b64decode(d['message']) except Exception as e: pass m = Message(message=d['message'], indicator=ii) s.add(m) tmp_added[d['indicator']].add(d['lasttime']) # if we're in testing mode, this needs re-attaching since we've manipulated the dict for Indicator() # see test_store_sqlite d['tags'] = ','.join(tags) except Exception as e: logger.error(e) if batch: logger.debug('Failing batch - passing exception to upper layer') raise else: n -= 1 logger.debug('Rolling back individual transaction..') s.rollback() # When this function is called in non-batch mode, we need to commit each individual indicator at this point. # For batches, the commit happens at a higher layer. if not batch: try: logger.debug('Committing individual indicator') start = time.time() s.commit() except Exception as e: n -= 1 logger.error(e) logger.debug('Rolling back individual transaction..') s.rollback() return n
def indicators_search(self, filters, sort=None, raw=False): # build filters with elasticsearch-dsl # http://elasticsearch-dsl.readthedocs.org/en/latest/search_dsl.html limit = filters.get('limit') if limit: del filters['limit'] else: limit = LIMIT nolog = filters.get('nolog') if nolog: del filters['nolog'] timeout = TIMEOUT s = Indicator.search(index='indicators-*') s = s.params(size=limit, timeout=timeout) if sort: s = s.sort(sort) q_filters = {} for f in VALID_FILTERS: if filters.get(f): q_filters[f] = filters[f] if q_filters.get('indicator'): try: itype = resolve_itype(q_filters['indicator']) if itype == 'ipv4': if PYVERSION == 2: q_filters['indicator'] = unicode(q_filters['indicator']) ip = ipaddress.IPv4Network(q_filters['indicator']) mask = ip.prefixlen if mask < 8: raise InvalidSearch('prefix needs to be greater than or equal to 8') start = str(ip.network_address) end = str(ip.broadcast_address) s = s.filter('range', indicator_ipv4={'gte': start, 'lte': end}) elif itype is 'ipv6': if PYVERSION == 2: q_filters['indicator'] = unicode(q_filters['indicator']) ip = ipaddress.IPv6Network(q_filters['indicator']) mask = ip.prefixlen if mask < 32: raise InvalidSearch('prefix needs to be greater than or equal to 32') start = binascii.b2a_hex(socket.inet_pton(socket.AF_INET6, str(ip.network_address))).decode('utf-8') end = binascii.b2a_hex(socket.inet_pton(socket.AF_INET6, str(ip.broadcast_address))).decode('utf-8') s = s.filter('range', indicator_ipv6={'gte': start, 'lte': end}) elif itype in ('email', 'url', 'fqdn'): s = s.filter('term', indicator=q_filters['indicator']) except InvalidIndicator: s = s.query("match", message=q_filters['indicator']) del q_filters['indicator'] for f in q_filters: kwargs = {f: q_filters[f]} s = s.filter('term', **kwargs) try: rv = s.execute() except elasticsearch.exceptions.RequestError as e: self.logger.error(e) return [] if raw: try: return rv.hits.hits except KeyError: return [] else: try: data = [] for x in rv.hits.hits: if x['_source'].get('message'): x['_source']['message'] = b64encode(x['_source']['message'].encode('utf-8')) data.append(x['_source']) return data except KeyError as e: self.logger.error(e) return []