def analyze(self, line): line = line.strip() sline = line.split() try: if line[0] != "#" and len(sline) > 2: # ignore comments and entries with no clear reference if sline[0].isdigit(): del sline[0] # remove the useless first field _hostname = Hostname(hostname=sline[0]) evil = {} evil["source"] = self.name evil["id"] = md5.new(sline[0] + sline[1]).hexdigest() evil["description"] = sline[1] # malware, EK, etc evil["reference"] = sline[2] # GG safe browsing, blog, other blacklist, etc... if sline[3]: # add the last date of inclusion in the feed if sline[3] == "relisted" and sline[4]: evil["date_added"] = datetime.datetime.strptime(sline[4], "%Y%m%d") else: evil["date_added"] = datetime.datetime.strptime(sline[3], "%Y%m%d") _hostname.add_evil(evil) _hostname.seen(first=evil["date_added"]) self.commit_to_db(_hostname) except Exception, e: toolbox.debug_output(str(e), type="error")
def analyze(self, dict): # Create the new Hostname and store it in the DB hostname = Hostname(hostname=toolbox.find_hostnames(dict['title'])[0]) if hostname['value'] == None: return evil = dict evil['status'] = re.search("Status: (?P<status>\S+)", dict['description']).group('status') evil['id'] = md5.new(re.search(r"id=(?P<id>[a-f0-9]+)", dict['guid']).group('id')).hexdigest() evil['source'] = self.name hostname.add_evil(evil) self.commit_to_db(hostname)
class DShieldSuspiciousDomainsMedium(Feed): def __init__(self, name): super(DShieldSuspiciousDomainsMedium, self).__init__(name) self.name = "DShieldSuspiciousDomainsMedium" self.description = "DShield medium sensitivity suspicious domains" self.source = "http://www.dshield.org/feeds/suspiciousdomains_Medium.txt" self.confidence = 10 def update(self): self.update_lines() def analyze(self, line): if line.startswith('#') or line.startswith('\n'): return try: hostname = toolbox.find_hostnames(line)[0] except Exception, e: return # Create the new ip and store it in the DB hostname = Hostname(hostname=hostname, tags=['evil']) evil = Evil() evil['value'] = "%s (DShield suspicious domain)" % hostname['value'] evil['tags'] = ['dshield', 'medium'] return hostname, evil
def analyze(self, dict): evil = Evil() # description evil['description'] = dict['description'] host = re.search("Host: (?P<host>\S+),", dict['description']) if host: if toolbox.is_ip(host.group('host')): host = Ip(toolbox.is_ip(host.group('host'))) elif toolbox.is_hostname(host.group('host')): host = Hostname(toolbox.is_hostname(host.group('host'))) else: return None, None version = re.search("Version: (?P<version>[ABCD])", dict['description']) if version != None: evil['version'] = version.group('version') else: evil['version'] = 'N/A' # linkback evil['link'] = dict['link'] # tags evil['tags'] += ['feodo', 'cridex', 'malware', 'exe'] evil['value'] = "Feodo C2 ({})".format(host['value']) return host, evil
def analyze(self, dict): evil = dict date_string = re.search(r"\((?P<datetime>[\d\- :]+)\)", dict['title']).group('datetime') try: evil['date_added'] = datetime.datetime.strptime( date_string, "%Y-%m-%d %H:%M:%S") except ValueError: pass g = re.match(r'^Host: (?P<host>.+), Version: (?P<version>\w)', dict['description']) g = g.groupdict() evil['host'] = g['host'] evil['version'] = g['version'] evil['description'] = FeodoTracker.descriptions[g['version']] evil['id'] = md5.new(dict['description']).hexdigest() evil['source'] = self.name del evil['title'] if toolbox.is_ip(evil['host']): elt = Ip(ip=evil['host'], tags=[FeodoTracker.variants[g['version']]]) elif toolbox.is_hostname(evil['host']): elt = Hostname(hostname=evil['host'], tags=[FeodoTracker.variants[g['version']]]) elt.seen(first=evil['date_added']) elt.add_evil(evil) self.commit_to_db(elt)
class FeodoTracker(Feed): descriptions = { 'A': "Hosted on compromised webservers running an nginx proxy on port 8080 TCP forwarding all botnet traffic to a tier 2 proxy node. Botnet traffic usually directly hits these hosts on port 8080 TCP without using a domain name.", 'B': "Hosted on servers rented and operated by cybercriminals for the exclusive purpose of hosting a Feodo botnet controller. Usually taking advantage of a domain name within ccTLD .ru. Botnet traffic usually hits these domain names using port 80 TCP.", 'C': "Successor of Feodo, completely different code. Hosted on the same botnet infrastructure as Version A (compromised webservers, nginx on port 8080 TCP or port 7779 TCP, no domain names) but using a different URL structure. This Version is also known as Geodo.", 'D': "Successor of Cridex. This version is also known as Dridex", } variants = { 'A': "Feodo", 'B': "Feodo", 'C': "Geodo", 'D': "Dridex", } def __init__(self, name): super(FeodoTracker, self).__init__(name) self.name = "FeodoTracker" self.source = "https://feodotracker.abuse.ch/feodotracker.rss" self.description = "Feodo Tracker RSS Feed. This feed shows the latest twenty Feodo C2 servers which Feodo Tracker has identified." def update(self): for dict in self.update_xml('item', ["title", "link", "description", "guid"]): self.analyze(dict) def analyze(self, dict): evil = dict date_string = re.search(r"\((?P<datetime>[\d\- :]+)\)", dict['title']).group('datetime') try: evil['date_added'] = datetime.datetime.strptime( date_string, "%Y-%m-%d %H:%M:%S") except ValueError, e: pass g = re.match(r'^Host: (?P<host>.+), Version: (?P<version>\w)', dict['description']) g = g.groupdict() evil['host'] = g['host'] evil['version'] = g['version'] evil['description'] = FeodoTracker.descriptions[g['version']] evil['id'] = md5.new(dict['description']).hexdigest() evil['source'] = self.name del evil['title'] if toolbox.is_ip(evil['host']): elt = Ip(ip=evil['host'], tags=[FeodoTracker.variants[g['version']]]) elif toolbox.is_hostname(evil['host']): elt = Hostname(hostname=evil['host'], tags=[FeodoTracker.variants[g['version']]]) elt.add_evil(evil) self.commit_to_db(elt)
class MalwareTrafficAnalysis(Feed): """ This is a feed that will fetch data from a URL and process it """ def __init__(self, name): super(MalwareTrafficAnalysis, self).__init__(name, run_every="12h") self.name = "MalwareTrafficAnalysis" self.source = "http://www.malware-traffic-analysis.net/suspicious-ip-addresses-and-domains.txt" self.description = "Collects results from malware-traffic-analysis.net" def update(self): feed = urllib2.urlopen(self.source).read().split('\n') for line in feed: self.analyze(line) def analyze(self, line): if line.startswith("#") or line.startswith("IP address"): return try: ip, port, domains, traffic_info, description, date_string = line.split( ',') except ValueError, e: print "Malformed line, skipping" return evil = {} evil['ip'] = ip port = re.search('[\d]+', port) if port: evil['port'] = port.group() evil['domains'] = domains evil['description'] = "{}".format(description) if traffic_info: evil['description'] += " ({})".format(traffic_info) evil['date_added'] = datetime.datetime.strptime( date_string, "%Y-%m-%d") evil['id'] = md5.new(evil['description'] + evil['ip'] + date_string).hexdigest() evil['source'] = self.name ip = Ip(ip=ip) domains = [ d.strip() for d in domains.split('/') if toolbox.is_hostname(d.strip()) ] ip.seen(first=evil['date_added']) ip.add_evil(evil) i = self.commit_to_db(ip) for d in domains: h = Hostname(hostname=d) h.seen(first=evil['date_added']) h.add_evil(evil) h = self.commit_to_db(h) self.model.connect(h, i)
def analyze(self, line): line = line.strip() sline = line.split() try: if line[0] != '#' and len( sline ) > 2: #ignore comments and entries with no clear reference if sline[0].isdigit(): del sline[0] #remove the useless first field _hostname = Hostname(hostname=sline[0]) evil = {} evil['source'] = self.name evil['id'] = md5.new(sline[0] + sline[1]).hexdigest() evil['description'] = sline[1] #malware, EK, etc evil['reference'] = sline[ 2] #GG safe browsing, blog, other blacklist, etc... if sline[3]: #add the last date of inclusion in the feed if sline[3] == 'relisted' and sline[4]: evil['date_added'] = datetime.datetime.strptime( sline[4], "%Y%m%d") else: evil['date_added'] = datetime.datetime.strptime( sline[3], "%Y%m%d") _hostname.add_evil(evil) _hostname.seen(first=evil['date_added']) self.commit_to_db(_hostname) except Exception, e: toolbox.debug_output(str(e), type='error')
def analyze(self, dict): # Create the new Hostname and store it in the DB hostname = Hostname(hostname=toolbox.find_hostnames(dict['title'])[0]) if hostname['value'] == None: return evil = Evil() evil['value'] = "Palevo CC (%s)" % hostname['value'] evil['status'] = re.search("Status: (?P<status>\S+)", dict['description']).group('status') evil['info'] = dict['description'] evil['tags'] = ['cc', 'palevo'] return hostname, evil
def analyze(self, line): line = line.strip() sline = line.split() if line[0] != '#' and len(sline) > 2: #ignore comments and entries with no clear reference if sline[0].isdigit(): del sline[0] #remove the useless first field _hostname = Hostname(hostname=sline[0]) evil = {} evil['source'] = self.name evil['id'] = md5.new(sline[0] + sline[1]).hexdigest() evil['description'] = sline[1] #malware, EK, etc evil['reference'] = sline[2] #GG safe browsing, blog, other blacklist, etc... if sline[3]: #add the last date of inclusion in the feed if sline[3] == 'relisted' and sline[4]: evil['date_added'] = datetime.datetime.strptime(sline[4], "%Y%m%d") else: evil['date_added'] = datetime.datetime.strptime(sline[3], "%Y%m%d") _hostname.add_evil(evil) self.commit_to_db(_hostname)
def analyze(self, dict): # We create an Evil object. Evil objects are what Malcom uses # to store anything it considers evil. Malware, spam sources, etc. # Remember that you can create your own datatypes, if need be. evil = Evil() # We start populating the Evil() object's attributes with # information from the dict we parsed earlier # description evil['description'] = dict['link'] + " " + dict['description'] # status status = re.search("Status: (?P<status>\S+),", dict['description']) if status: evil['status'] = status.group('status') else: evil['status'] = "unknown" # linkback evil['guid'] = dict['guid'] # tags evil['tags'] += ['spyeye', 'malware', 'cc'] # This is important. Values have to be unique, since it's this way that # Malcom will identify them in the database. # This is probably not the best way, but it will do for now. host = re.search("Host: (?P<host>\S+),", dict['description']).group("host") if toolbox.find_ips(host): elt = Ip(host, tags=['cc', 'spyeye', 'malware']) else: elt = Hostname(host, tags=['cc', 'spyeye', 'malware']) evil['value'] = "SpyEye CC (%s)" % elt['value'] # Save elements to DB. The status field will contain information on # whether this element already existed in the DB. return elt, evil self.commit_to_db(elt, evil)
def add_text(self, text, tags=[]): added = [] for t in text: elt = None if t.strip() != "": if is_url(t): elt = Url(is_url(t), []) elif is_hostname(t): elt = Hostname(is_hostname(t), []) elif is_ip(t): elt = Ip(is_ip(t), []) if elt: added.append(self.save_element(elt, tags)) if len(added) == 1: return added[0] else: return added
def analyze(self, dict): evil = dict evil['host'] = dict['domain'] evil['id'] = md5.new(evil['domain'] + 'InfosecCertPaItFQDN').hexdigest() evil['description'] = self.description evil['source'] = self.name elt = Hostname(hostname=evil['host']) elt.seen() elt.add_evil(evil) self.commit_to_db(elt)
def analyze(self, line): if line.startswith('#') or line.startswith('\n'): return splitted_mdl = line.split('\t') # 20151201 agasi-story.info malicious blog.dynamoo.com 20131130 20121201 20120521 20110217 # Create the new hostname and store it in the DB hostname = Hostname(hostname=splitted_mdl[2]) if hostname['value'] == None: return # hostname not found evil = Evil() evil['value'] = "Malware domain blocklist (%s)" % hostname['value'] evil['tags'] = [ 'malwaredomains', re.sub(r'[^\w]', '', splitted_mdl[3]) ] evil['reference'] = splitted_mdl[4] return hostname, evil
def analyze(self, line): if line.startswith('#') or line.startswith('\n'): return splitted_mdl = re.split(r'\t+', line.lstrip('\t')) # 20151201 agasi-story.info malicious blog.dynamoo.com 20131130 20121201 20120521 20110217 if unicode(splitted_mdl[0]).isnumeric(): splitted_mdl.pop(0) # Create the new hostname and store it in the DB hostname = Hostname( hostname=splitted_mdl[0], tags=['malwaredomains', splitted_mdl[1].lower(), splitted_mdl[2]]) hostname, status = self.analytics.save_element(hostname, with_status=True) if status['updatedExisting'] == False: self.elements_fetched += 1
def analyze(self, dict): evil = dict try: evil['date_added'] = datetime.datetime.strptime(dict['first_seen'], "%Y-%m-%d %H:%M:%S") except ValueError: pass evil['host'] = dict['dst_ip'] evil['version'] = dict['malware'] evil['description'] = FeodoTracker.descriptions[dict['malware']] evil['id'] = md5.new(evil['host'] + evil['description']).hexdigest() evil['source'] = self.name if toolbox.is_ip(evil['host']): elt = Ip(ip=evil['host'], tags=[dict['malware']]) elif toolbox.is_hostname(evil['host']): elt = Hostname(hostname=evil['host'], tags=[dict['malware']]) elt.seen(first=evil['date_added']) elt.add_evil(evil) self.commit_to_db(elt)
class ZeusGameOverDomains(Feed): """ This gets data from http://virustracker.info/text/ZeuSGameover_Domains.txt Sensitivity level: high (for now) """ def __init__(self, name): super(ZeusGameOverDomains, self).__init__(name, run_every="12h") self.name = "ZeusGameOverDomains" self.source = "http://virustracker.info/text/ZeuSGameover_Domains.txt" self.description def update(self): feed = urllib2.urlopen("http://virustracker.info/text/ZeuSGameover_Domains.txt").readlines() self.status = "OK" for line in feed: self.analyze(line) return True def analyze(self, line): if line.startswith('#') or line.startswith('\n'): return try: hostname = toolbox.find_hostnames(line)[0] except Exception, e: # if find_hostname raises an exception, it means no hostname # was found in the line, so we return return # Create the new URL and store it in the DB hostname = Hostname(hostname=hostname, tags=['virustracker.info', 'zeusgameover']) hostname, new = self.model.save(hostname, with_status=True) if new: self.elements_fetched += 1
def analyze(self, line): if line.startswith("#") or line.startswith("IP address"): return try: ip, port, domains, traffic_info, description, date_string = line.split(',') except ValueError: # Malformed line, skipping return evil = {} evil['ip'] = ip port = re.search('[\d]+', port) if port: evil['port'] = port.group() evil['domains'] = domains evil['description'] = "{}".format(description) if traffic_info: evil['description'] += " ({})".format(traffic_info) evil['date_added'] = datetime.datetime.strptime(date_string, "%Y-%m-%d") evil['id'] = md5.new(evil['description']+evil['ip']+date_string).hexdigest() evil['source'] = self.name ip = Ip(ip=ip) domains = [d.strip() for d in domains.split('/') if toolbox.is_hostname(d.strip())] ip.seen(first=evil['date_added']) ip.add_evil(evil) i = self.commit_to_db(ip) for d in domains: h = Hostname(hostname=d) h.seen(first=evil['date_added']) h.add_evil(evil) h = self.commit_to_db(h) self.model.connect(h, i)
try: feed = urllib2.urlopen("http://virustracker.info/text/ZeuSGameover_Domains.txt").readlines() self.status = "OK" except Exception, e: self.status = "ERROR: " + str(e) return False for line in feed: self.analyze(line) return True def analyze(self, line): if line.startswith('#') or line.startswith('\n'): return try: hostname = toolbox.find_hostnames(line)[0] except Exception, e: # if find_hostname raises an exception, it means no hostname # was found in the line, so we return return # Create the new URL and store it in the DB hostname = Hostname(hostname=hostname, tags=['virustracker.info', 'zeusgameover']) hostname, status = self.analytics.save_element(hostname, with_status=True) if status['updatedExisting'] == False: self.elements_fetched += 1
def update(self): try: feed = urllib2.urlopen( "http://www.dshield.org/feeds/suspiciousdomains_High.txt" ).readlines() self.status = "OK" except Exception, e: self.status = "ERROR: " + str(e) return False for line in feed: self.analyze(line) return True def analyze(self, line): if line.startswith('#') or line.startswith('\n'): return try: hostname = toolbox.find_hostnames(line)[0] except Exception, e: return # Create the new ip and store it in the DB hostname = Hostname(hostname=hostname, tags=['dshield', 'high']) hostname, status = self.analytics.save_element(hostname, with_status=True) if status['updatedExisting'] == False: self.elements_fetched += 1
def analyze(self, dict): # We create an Evil object. Evil objects are what Malcom uses # to store anything it considers evil. Malware, spam sources, etc. # Remember that you can create your own datatypes, if need be. evil = Evil() # We start populating the Evil() object's attributes with # information from the dict we parsed earlier evil['feed'] = "SpyEyeConfigs" evil['hostname'] = toolbox.find_hostnames(dict['description'])[0] # description evil['description'] = dict['link'] + " " + dict['description'] # status if dict['description'].find("offline") != -1: evil['status'] = "offline" else: evil['status'] = "online" # md5 md5 = re.search("MD5 hash: (?P<md5>[0-9a-f]{32,32})",dict['description']) if md5 != None: evil['md5'] = md5.group('md5') else: evil['md5'] = "No MD5" # linkback evil['source'] = dict['guid'] # type evil['type'] = 'evil' # tags evil['tags'] += ['spyeye', 'malware', 'SpyEyeCnc'] # date_retreived evil['date_retreived'] = datetime.datetime.utcnow() # This is important. Values have to be unique, since it's this way that # Malcom will identify them in the database. # This is probably not the best way, but it will do for now. evil['value'] = "SpyEye Config" if md5: evil['value'] += " (MD5: %s)" % evil['md5'] else: evil['value'] += " (URL: %s)" % evil['url'] # Save elements to DB. The status field will contain information on # whether this element already existed in the DB. evil, status = self.analytics.save_element(evil, with_status=True) if status['updatedExisting'] == False: self.elements_fetched += 1 # Create an URL element hostname = Hostname(evil['hostname'], ['evil', 'SpyEyeConfigs']) # Save it to the DB. url, status = self.analytics.save_element(hostname, with_status=True) if status['updatedExisting'] == False: self.elements_fetched += 1 # Connect the URL element to the Evil element self.analytics.data.connect(hostname, evil, 'hosting')
feed = urllib2.urlopen( "http://www.malwaredomainlist.com/hostslist/hosts.txt" ).readlines() self.status = "OK" except Exception, e: self.status = "ERROR: " + str(e) return False for line in feed: self.analyze(line) return True def analyze(self, line): if line.startswith('#') or line.startswith('\n'): return try: hostname = toolbox.find_hostnames(line)[0] except Exception, e: # if find_hostname raises an exception, it means no hostname # was found in the line, so we return return # Create the new URL and store it in the DB hostname = Hostname(hostname=hostname, tags=['malwaredomainlist']) hostname, status = self.analytics.save_element(hostname, with_status=True) if status['updatedExisting'] == False: self.elements_fetched += 1
try: feed = urllib2.urlopen( "https://palevotracker.abuse.ch/?rssfeed").readlines() self.status = "OK" except Exception, e: self.status = "ERROR: " + str(e) return False for line in feed: self.analyze(line) return True def analyze(self, line): if line.startswith('#') or line.startswith('\n'): return try: hostname = toolbox.find_hostnames(line)[0] except Exception, e: # if find_hostname raises an exception, it means no hostname # was found in the line, so we return return # Create the new URL and store it in the DB hostname = Hostname(hostname=hostname, tags=['palevotracker']) hostname, status = self.analytics.save_element(hostname, with_status=True) if status['updatedExisting'] == False: self.elements_fetched += 1