def load_sources(): sources = reactor.load_sources(reactor.PATH_CONF + '/sources.cfg') if len(sources) > 0: reactor.status('info', 'known bad', '%d sources added to queue' % len(sources)) return True return False
def start_module(self, module): """ Start execution of collection modules This functions only purpose is to take a module name as input, verify that the module exists, find the correct function from the Module() class and then execute. There are various checks to ensure only valid module names are being passed up until this point, so any input we receive here should be valid. """ if module in self.running: arcreactor.status('info', 'arcreactor', 'collection module %s is all ready running' % module) elif module in reactor.modules.keys(): if module == 'all': for self.name in reactor.modules.keys(): reactor.statux('info', 'arcreactor', 'starting collection module %s' % module) self.running.append(self.name) Module.run_pastebin() Module.run_knownbad() Module.run_otx() elif module == 'pastebin': Module.run_pastebin() elif module == 'otx': Module.run_otx() elif module == 'knownbad': Module.run_knownbad() else: reactor.status('warn', 'arcreactor', '%s is not a valid collection module' % module) else: reactor.status('warn', 'arcreactor', '%s is not a valid collection module' % module)
def load_words(): watch_list = reactor.load_keywords(reactor.PATH_CONF + '/keywords.cfg') if len(watch_list) > 0: reactor.status('info', 'pastebin', '%d keywords added to watch list' % (len(watch_list))) return True return False
def get_stats(self, type='all'): """ Gather statistics on running and queued jobs. Jobs.get_stats() interacts with the job_stats hash to pull down information on running, paused and queued jobs. This function will only be called when the user passes the console comamnd 'info tasks'. """ if len(job_stats) > 0: if type == 'all': for self.job_title in job_stats.keys(): print('\n%s => ' % self.job_title) for self.key, self.value in job_stats[ self.title].iteritems(): print('{0:12}: \t {1:16}'.format(self.key, self.value)) elif type in job_stats.keys(): print('\n%s => ' % type) for self.key, self.value in job_stats[type].iteritems(): print('{0:12}: \t {1:16}'.format(self.key, self.value)) else: reactor.status('info', 'arcreactor', 'cannot find job %s' % type) else: reactor.status('info', 'arcreactor', 'no running or queued jobs')
def gather_exploits(): sec_tracker = 'http://securitytracker.com/archives/summary/9000.html' try: reactor.status('info', 'exploits', 'retrieving exploits from securitytracker.com') req = reactor.http_request(sec_tracker) if req is not None: for line in req.split('\n'): if '<a href="/id/' in line: name = line.split('">')[1].split("</a>")[0] names.append(name)
def gather_data(source): try: reactor.status('info', 'known bad', 'retrieving hosts from %s' % source) raw = reactor.http_request(source) if raw is not None: data = re.findall(ip_regex, raw) if data == "": data = re.findall(dom_regex, raw) return data, source except: reactor.satus('warn', 'known bad', 'failed to retrieve hosts from %s' % source)
def run_otx(self): reactor.status('info', 'otx', 'launching otx module') job_stats['otx'] = { 'status': 'running', 'started': str(datetime.now()).split('.')[0], 'message': 'loading keywords', 'events': 0 } if otx.gather_data(): job_stats['otx'] = {'status': 'finished', 'message': 'finished successfully', 'ended': str(datetime.now()).split('.')[0] } jobs_stats['otx'] = {'events': otx.count } else: job_stats['otx'] = { 'status': 'finished', 'message': 'finished with errors', 'ended': str(datetime.now()).split('.')[0] }
def gather_content(post_id): try: raw = reactor.http_request('http://pastebin.com/raw.php?i=%s' % post_id) queue.remove(post_id) if not 'Unknown Paste ID!' in raw and raw is not None: reactor.status('info', 'pastebin', 'searching post id %s' % post_id) if '\r\n' in raw: lines = raw.split('\r\n') for line in lines: search_raw(line, post_id) else: search_raw(raw, post_id) except: reactor.status('warn', 'pastebin', 'failed to fetch post id %s' % post_id)
def receive(command): if command.startswith('start'): module = command.split(' ')[1] Jobs.start_module(module) #elif command.startswith('kill'): # task = command.split(' ')[1] # if task == 'all': # Jobs.kill_all() # elif task in reactor.modules.keys(): # Jobs.kill_job(task) # else: # reactor.status('warn', 'arcreactor', '%s is not a valid option') elif command == 'info tasks': Jobs.get_stats() elif command.startswith('cfg'): conf = command.split(' ')[1] Config.manage(conf) else: reactor.status('warn', 'arcreactor', '%s is not a valid command')
def run_otx(self): reactor.status('info', 'otx', 'launching otx module') job_stats['otx'] = { 'status': 'running', 'started': str(datetime.now()).split('.')[0], 'message': 'loading keywords', 'events': 0 } if otx.gather_data(): job_stats['otx'] = { 'status': 'finished', 'message': 'finished successfully', 'ended': str(datetime.now()).split('.')[0] } jobs_stats['otx'] = {'events': otx.count} else: job_stats['otx'] = { 'status': 'finished', 'message': 'finished with errors', 'ended': str(datetime.now()).split('.')[0] }
def get_stats(self, type='all'): """ Gather statistics on running and queued jobs. Jobs.get_stats() interacts with the job_stats hash to pull down information on running, paused and queued jobs. This function will only be called when the user passes the console comamnd 'info tasks'. """ if len(job_stats) > 0: if type == 'all': for self.job_title in job_stats.keys(): print('\n%s => ' % self.job_title) for self.key, self.value in job_stats[self.title].iteritems(): print('{0:12}: \t {1:16}'.format(self.key, self.value)) elif type in job_stats.keys(): print('\n%s => ' % type) for self.key, self.value in job_stats[type].iteritems(): print('{0:12}: \t {1:16}'.format(self.key, self.value)) else: reactor.status('info', 'arcreactor', 'cannot find job %s' % type) else: reactor.status('info', 'arcreactor', 'no running or queued jobs')
def run_pastebin(self): job_stats['pastebin'] = { 'status': 'running', 'started': str(datetime.now()).split('.')[0], 'message': 'loading keywords', 'events': 0 } reactor.status('info', 'pastebin', 'launching pastebin module') if pastebin.load_words(): job_stats['pastebin'] = {'message': 'collecting post archive'} pastebin.gather_archive() if len(pastebin.queue) > 0: for post in pastebin.queue: job_stats['pastebin'] = { 'message': 'searching post %s' % post } # the search_raw function is called from within gather_content pastebin.gather_content(post) job_stats['pastebin'] = {'events': len(pastebin.found)} if len(pastebin.found) > 0: for self.post_id, self.data in pastebin.found.iteritems(): job_stats['pastebin'] = { 'message': 'sending syslog events' } self.cef = 'CEF:0|OSINT|ArcReactor|1.0|100|Watchlist Keyword Found|1|src=%s msg=%s' % ( self.post_id, self.data) reactor.send_syslog(self.cef) job_stats['pastebin'] = { 'status': 'finished', 'message': 'finished successfully', 'ended': str(datetime.now()).split('.')[0] } job_stats['pastebin'] = { 'status': 'finished', 'message': 'finished with errors', 'ended': str(datetime.now()).split('.')[0] }
def run_pastebin(self): job_stats['pastebin'] = { 'status': 'running', 'started': str(datetime.now()).split('.')[0], 'message': 'loading keywords', 'events': 0 } reactor.status('info', 'pastebin', 'launching pastebin module') if pastebin.load_words(): job_stats['pastebin'] = { 'message': 'collecting post archive' } pastebin.gather_archive() if len(pastebin.queue) > 0: for post in pastebin.queue: job_stats['pastebin'] = { 'message': 'searching post %s' % post } # the search_raw function is called from within gather_content pastebin.gather_content(post) job_stats['pastebin'] = { 'events': len(pastebin.found) } if len(pastebin.found) > 0: for self.post_id, self.data in pastebin.found.iteritems(): job_stats['pastebin'] = { 'message': 'sending syslog events' } self.cef = 'CEF:0|OSINT|ArcReactor|1.0|100|Watchlist Keyword Found|1|src=%s msg=%s' % (self.post_id, self.data) reactor.send_syslog(self.cef) job_stats['pastebin'] = { 'status': 'finished', 'message': 'finished successfully', 'ended': str(datetime.now()).split('.')[0] } job_stats['pastebin'] = { 'status': 'finished', 'message': 'finished with errors', 'ended': str(datetime.now()).split('.')[0] }
def start_module(self, module): """ Start execution of collection modules This functions only purpose is to take a module name as input, verify that the module exists, find the correct function from the Module() class and then execute. There are various checks to ensure only valid module names are being passed up until this point, so any input we receive here should be valid. """ if module in self.running: arcreactor.status( 'info', 'arcreactor', 'collection module %s is all ready running' % module) elif module in reactor.modules.keys(): if module == 'all': for self.name in reactor.modules.keys(): reactor.statux('info', 'arcreactor', 'starting collection module %s' % module) self.running.append(self.name) Module.run_pastebin() Module.run_knownbad() Module.run_otx() elif module == 'pastebin': Module.run_pastebin() elif module == 'otx': Module.run_otx() elif module == 'knownbad': Module.run_knownbad() else: reactor.status('warn', 'arcreactor', '%s is not a valid collection module' % module) else: reactor.status('warn', 'arcreactor', '%s is not a valid collection module' % module)
def gather_archive(): try: posts = reactor.http_request(archive) posts = regex.findall(posts) for p in posts: post_id, post_title = p[0], p[1] if post_id not in queue: reactor.status('info', 'pastebin', 'post id %s added to queue' % post_id) queue.append(post_id) reactor.status('info', 'pastebin', 'total posts added to queue: %d' % len(queue)) except: reactor.status('warn', 'pastebin', 'failed to fetch pastebin archive')
def gather_data(): try: data = reactor.http_request('http://reputation.alienvault.com/reputation.snort') if data is not None: reactor.status('info', 'OTX', 'attempting to parse reputation data') for line in data.split('\n'): if not line.startswith('#') or not len(line) == 0: try: d = line.split('#') addr, info = d[0], d[1] cef = 'CEF:0|OSINT|ArcReactor|1.0|100|%s|1|src=%s msg=%s' % (info, addr, 'http://reputation.alienvault.com/reputation.snort') reactor.status('info', 'OTX', 'sending CEF syslog for %s - %s' % (info, addr)) reactor.send_syslog(cef) count += 1 except IndexError: continue reactor.status('info', 'OTX', 'sent %d total events' % count) return True except: reactor.status('warn', 'OTX', 'failed to retrieve OTX database') return False
def search_raw(data, post_id): for word in watch_list: if word in data: found[post_id] = data reactor.status('info', 'pastebin', 'found %s in pastebin post %s' % (word, post_id))
def load_words(): watch_list = reactor.load_keywords(reactor.PATH_CONF+'/keywords.cfg') if len(watch_list) > 0: reactor.status('info', 'pastebin', '%d keywords added to watch list' % (len(watch_list))) return True return False
def load_sources(): sources = reactor.load_sources(reactor.PATH_CONF+'/sources.cfg') if len(sources) > 0: reactor.status('info', 'known bad', '%d sources added to queue' % len(sources)) return True return False