def update_trails(server=None, force=False): """ Update trails from feeds """ trails = {} duplicates = {} if server: print "[i] retrieving trails from provided 'UPDATE_SERVER' server..." _ = retrieve_content(server) if not _: exit("[!] unable to retrieve data from '%s'" % server) else: with _fopen(TRAILS_FILE, "w+b") as f: f.write(_) trails = load_trails() trail_files = set() for dirpath, dirnames, filenames in os.walk(os.path.abspath(os.path.join(ROOT_DIR, "trails"))) : for filename in filenames: trail_files.add(os.path.abspath(os.path.join(dirpath, filename))) if config.CUSTOM_TRAILS_DIR: for dirpath, dirnames, filenames in os.walk(os.path.abspath(os.path.join(ROOT_DIR, os.path.expanduser(config.CUSTOM_TRAILS_DIR)))) : for filename in filenames: trail_files.add(os.path.abspath(os.path.join(dirpath, filename))) try: if not os.path.isdir(USERS_DIR): os.makedirs(USERS_DIR, 0755) except Exception, ex: exit("[!] something went wrong during creation of directory '%s' ('%s')" % (USERS_DIR, ex))
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for domain in re.findall(r" DNS: ([^ ]+)", content): retval[domain] = (__info__, __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for match in re.finditer(r">([^<]+\.[a-zA-Z]+)</td>\s*<td style=\"background-color: rgb\(11, 11, 11\);\"><a href=\"ccamdetail\.php\?hash=", content): retval[match.group(1)] = (__info__, __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for match in re.finditer(r"(?i)C2 Domain \.?([^\s\"]+)", content): retval[match.group(1)] = (__info__, __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for match in re.finditer(r"(?m)^([^,\s]+),Domain used by ([^,]+) (DGA|-)", content): retval[match.group(1)] = ("%s dga (malware)" % re.sub(r" DGA\Z", "", match.group(2)).lower(), __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for match in re.finditer(r'ipcheck.htm\?ip=([\d.]+)"', content): retval[match.group(1)] = (__info__, __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for match in re.finditer(r'(?i)zone\s+"([^"]+)"\s+{', content): retval[match.group(1)] = (__info__, __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for match in re.finditer(r"(?m)^([^,#]+),Domain used by ([^,/]+)", content): retval[match.group(1)] = ("%s (malware)" % match.group(2).lower().strip(), __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for match in re.finditer(r'<td>([^<]+)</td><td><a href="/monitor.php\?host=([^"]+)', content): retval[match.group(2)] = (match.group(1).lower() + " (malware)", __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for match in re.finditer(r"<link>http://([^<]+?)/?</link>", content): retval[match.group(1)] = (__info__, __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for match in re.finditer(r"\d+\.\d+\.\d+\.\d+", content): retval[match.group(0)] = (__info__, __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for match in re.finditer(r'<div class="?code"?>(\d+\.\d+\.\d+\.\d+)</div>', content): retval[match.group(1)] = (__info__, __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for match in re.finditer(r"(?m)^([\w.]+)\s+2\d{3}\-", content): retval[match.group(1)] = (__info__, __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for match in re.finditer(r"proxy-detection-sample/([\d.]+)", content): retval[match.group(1)] = (__info__, __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for match in re.finditer(r"<description>URL: ([^,\s]+)", content): retval[match.group(1)] = (__info__, __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) for line in content.split('\n'): line = line.strip() if not line or line.startswith('#') or '.' not in line: continue retval[line] = (__info__, __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for match in re.finditer(r"(?m)^([\d.]+),IP used by ([^,]+) C&C", content): _ = match.group(2).lower() if _ != "simda": # too many false positives retval[match.group(1)] = ("%s (malware)" % _, __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for match in re.finditer( r"(?m)^([^,\s]+),Domain used by ([^,]+) (DGA|-)", content): retval[match.group(1)] = ( "%s dga (malware)" % re.sub(r" DGA\Z", "", match.group(2)).lower(), __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for line in content.split("\n"): line = line.strip() if not line or line.startswith("#"): continue retval[line] = (__info__, __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for line in content.split('\n'): line = line.strip() if not line or line.startswith('#') or '.' not in line: continue retval[line.split(',')[0]] = ("%s (malware)" % line.split(',')[2].lower().split()[0], __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__, headers={"User-agent": NAME}) # having problems with database (appending error messages to the end of gzip stream) if __check__ in content: for line in content.split('\n'): line = line.strip() if not line or line.startswith('#') or '.' not in line: continue retval[line] = (__info__, __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for line in content.split('\n'): line = line.strip().lower() if not line or line.startswith('#') or '.' not in line or line in ("api.ipify.org",): continue retval[line] = (__info__, __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for line in content.split('\n'): line = line.strip() if not line or line.startswith('#') or '.' not in line: continue retval[line] = (__info__, __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for match in re.finditer( r'<td>([^<]+)</td><td><a href="/monitor.php\?host=([^"]+)', content): retval[match.group(2)] = (match.group(1).lower() + " (malware)", __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for line in content.split('\n'): line = line.strip() if not line or line.startswith('#') or '.' not in line or "Shunlist" in line: continue retval[line.split(",")[0]] = ("%s (attacker)" % line.split(",", 2)[-1].lower().strip("'\""), __reference__) return retval
def update(server=None): """ Update trails from feeds """ trails = {} if server: print "[i] retrieving trails from provided 'UPDATE_SERVER' server..." _ = retrieve_content(server) if not _: print "[!] unable to retrieve data from '%s'" % server else: with _fopen_trails("w+b") as f: f.write(_) trails = load_trails() if not trails and ((not os.path.isfile(TRAILS_FILE) or (time.time() - os.stat(TRAILS_FILE).st_mtime) >= config.UPDATE_PERIOD or os.stat(TRAILS_FILE).st_size == 0)): try: if not os.path.isdir(USERS_DIR): os.makedirs(USERS_DIR, 0755) except Exception, ex: exit("[!] something went wrong during creation of directory '%s' ('%s')" % (USERS_DIR, ex)) print "[i] updating trails..." if config.USE_FEED_UPDATES: sys.path.append(os.path.abspath(os.path.join(ROOT_DIR, "trails", "feeds"))) filenames = glob.glob(os.path.join(sys.path[-1], "*.py")) else: filenames = [] sys.path.append(os.path.abspath(os.path.join(ROOT_DIR, "trails"))) filenames += [os.path.join(sys.path[-1], "static")] filenames += [os.path.join(sys.path[-1], "custom")] for filename in filenames: try: module = __import__(os.path.basename(filename).split(".py")[0]) except (ImportError, SyntaxError), ex: print "[!] something went wrong during import of feed file '%s' ('%s')" % (filename, ex) continue for name, function in inspect.getmembers(module, inspect.isfunction): if name == "fetch": print(" [o] '%s'" % module.__url__) results = function() for item in results.items(): if not (any(_ in item[1][0] for _ in LOW_PRIORITY_INFO_KEYWORDS) and item[0] in trails): trails[item[0]] = item[1] if not results: print "[!] something went wrong during remote data retrieval ('%s')" % module.__url__
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for line in content.split('\n'): line = line.strip() if not line or line.startswith('#') or line.count(',') != 2: continue trail, __info__, __reference__ = line.split(',') retval[trail] = (__info__, __reference__) return retval
def fetch(): retval = {} for url in ("https://dataplane.org/dnsrd.txt", "https://dataplane.org/dnsrdany.txt", "https://dataplane.org/dnsversion.txt", "https://dataplane.org/sipinvitation.txt", "https://dataplane.org/sipquery.txt", "https://dataplane.org/sipregistration.txt", "https://dataplane.org/sshclient.txt", "https://dataplane.org/sshpwauth.txt", "https://dataplane.org/vncrfb.txt"): content = retrieve_content(url) if __check__ in content: for line in content.split('\n'): line = line.strip() if not line or line.startswith('#') or '.' not in line or '|' not in line: continue retval[line.split('|')[2].strip()] = (__info__, __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for match in re.finditer(r"([\d.]+)/(\d+)", content): prefix, mask = match.groups() start_int = addr_to_int(prefix) & make_mask(int(mask)) end_int = start_int | ((1 << 32 - int(mask)) - 1) for address in xrange(start_int, end_int + 1): retval[int_to_addr(address)] = (__info__, __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for line in content.split('\n'): line = line.strip() if not line or line.startswith('#') or '.' not in line: continue if " # " in line: retval[line.split(" # ")[0]] = ("%s (%s)" % (__info__, line.split(" # ")[1].split()[0].lower()), __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for line in content.split('\n'): line = line.strip() if not line or line.startswith('#'): continue if '://' in line: line = re.search(r"://(.*)", line).group(1) retval[line] = (__info__, __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for line in content.split('\n'): line = line.strip() if not line or line.startswith('#') or '.' not in line: continue match = re.search(r"any -> \[([\d.]+)\] (\d+) .+likely ([^)]+) C&C", line) if match: retval["%s:%s" % (match.group(1), match.group(2))] = ("%s (malware)" % (match.group(3).lower() if match.group(3).lower() != "malware" else "generic"), __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for line in content.split('\n'): line = line.strip('\r').replace('\xa0', "") if not line or line.startswith('#'): continue items = line.split('\t') if len(items) > 4: retval[items[2]] = (items[3], re.sub(r"\Ahttps?://", "", items[4]).split('/')[0] if '/' in items[4] else items[4]) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for line in content.split('\n'): line = line.strip() if not line or line.startswith('#') or '.' not in line: continue match = re.search(r"\A127.0.0.1\s+(.+)\Z", line) if match: retval[match.group(1)] = (__info__, __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for row in re.finditer(r"(?s)<tr>(.+?)</tr>", content): if "MalwareConnection" in row.group(1): match = re.search( r"<strong>([^<]+)</strong></td><td><div class='max-200'>", row.group(1)) if match: retval[match.group(1)] = ("malware", __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for line in content.split('\n'): line = line.strip('\r').replace('\xa0', "") if not line or line.startswith('#'): continue items = line.split('\t') if len(items) > 4: retval[items[2]] = (items[3], __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for line in content.split('\n'): line = line.strip('\r') if not line or line.startswith('#'): continue items = re.split("\s+", line) if items[0] == "127.0.0.1" and items[1] != "localhost": retval[items[1]] = (__info__, __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for line in content.split('\n'): line = line.strip() if not line or line.startswith('#') or not all(_ in line for _ in ('.', ',')): continue parts = line.split(',') if (datetime.datetime.now() - datetime.datetime.strptime(parts[-1], "%Y-%M-%d")).days < 60: retval["%s:%s" % (parts[0], parts[1])] = (__info__, __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for line in content.split('\n'): line = line.strip() if not line or line.startswith( '#') or '.' not in line or "Shunlist" in line: continue retval[line.split(",")[0]] = ( "%s (attacker)" % line.split(",", 2)[-1].lower().strip("'\""), __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for line in content.split('\n'): line = line.strip() if not line or line.startswith('#'): continue if re.search(r"\A\d+\.\d+\.\d+\.\d+\Z", line): retval[line] = (__info__, __reference__) else: retval[line] = (__info__, __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for match in re.finditer(r"(\d+\.\d+\.\d+\.\d+)/(\d+)", content): prefix, mask = match.groups() mask = int(mask) if MIN_BLACKLIST_MASK <= mask <= MAX_BLACKLIST_MASK: start_int = addr_to_int(prefix) & make_mask(mask) end_int = start_int | ((1 << 32 - mask) - 1) for address in xrange(start_int, end_int + 1): retval[int_to_addr(address)] = (__info__, __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for line in content.split('\n'): line = line.strip() if not line or line.startswith('#') or '.' not in line: continue if " # " in line: reason = line.split(" # ")[1].split()[0].lower() if reason == "scanning": # too many false positives continue retval[line.split(" # ")[0]] = (__info__, __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for match in re.finditer(r"(?m)^([\d.]+),IP used by ([^,]+) C&C", content): retval[match.group(1)] = ("%s (malware)" % match.group(2).lower(), __reference__) for row in re.finditer(r"(?s)<tr>(.+?)</tr>", content): if "<span>100%</span>" in row.group(1): domain = re.search(r"get_data_domain\('([^']+)", row.group(1)) if domain: tag = re.search(r">(trojan|spyware|adware)\.([^<]+)", row.group(1)) retval[domain.group(1)] = (("%s (malware)" % tag.group(2)) if tag else "malware", __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for match in re.finditer(r"(\d+\.\d+\.\d+\.\d+)/(\d+)", content): prefix, mask = match.groups() mask = int(mask) start_int = addr_to_int(prefix) & make_mask(mask) end_int = start_int | ((1 << 32 - mask) - 1) if 0 <= end_int - start_int <= 1024: address = start_int while start_int <= address <= end_int: retval[int_to_addr(address)] = (__info__, __reference__) address += 1 return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for line in content.split('\n'): line = line.strip('\r').replace('\xa0', "") if not line or line.startswith('#'): continue items = line.split('\t') if len(items) > 4: info = items[3] for _ in ("andromeda", "banjori", "banload", "bedep", "bhek", "bhek2", "blackvine", "browlock", "citadel", "corebot", "cridex", "cryptowall", "darkcomet", "dexter", "dircrypt", "dridex", "dyre", "fareit", "geinimi", "gh0st", "gorynych", "goz", "gozi", "gumblar", "hesperbot", "kaixin", "katrina", "kazy", "keitaro", "kelihos", "kins", "koobface", "kryptik", "matsnu", "napolar", "necurs", "neurevt", "njrat", "nymaim", "passwordstealer", "pkybot", "pony", "p0ny", "posmalware", "poweliks", "pushdo", "pykspa", "qakbot", "ramnit", "ranbyus", "rbn", "rovnix", "runforestrun", "russiandoll", "shiotob", "shylock", "simda", "soaksoak", "sofacy", "suppobox", "teslacrypt", "tinba", "vawtrak", "waledac", "yigido", "zemot", "zeus"): if re.search(r"(?i)\b%s\b" % _, info): info = "%s (malware)" % _ break retval[items[2]] = (info.replace('_', ' '), __reference__) return retval
def update_trails(server=None, force=False): """ Update trails from feeds """ trails = {} duplicates = {} if server: print "[i] retrieving trails from provided 'UPDATE_SERVER' server..." _ = retrieve_content(server) if not _: exit("[!] unable to retrieve data from '%s'" % server) else: with _fopen_trails("w+b") as f: f.write(_) trails = load_trails() trail_files = [] for dirpath, dirnames, filenames in os.walk( os.path.abspath(os.path.join(ROOT_DIR, "trails"))): for filename in filenames: trail_files.append(os.path.abspath(os.path.join(dirpath, filename))) if config.CUSTOM_TRAILS_DIR: for dirpath, dirnames, filenames in os.walk( os.path.abspath( os.path.join(ROOT_DIR, os.path.expanduser( config.CUSTOM_TRAILS_DIR)))): for filename in filenames: trail_files.append( os.path.abspath(os.path.join(dirpath, filename))) try: if not os.path.isdir(USERS_DIR): os.makedirs(USERS_DIR, 0755) except Exception, ex: exit( "[!] something went wrong during creation of directory '%s' ('%s')" % (USERS_DIR, ex))
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for line in content.split('\n'): line = line.strip() if not line or line.startswith(__check__): continue if "://" in line: parts = line.lower().split(',') trail = re.sub("/[^/]+$", "", parts[1]) trail = trail.split("://")[-1] trail = re.sub(r"/(web)?panel.*", "", trail) if re.search(r"\A\d[\d.]*\d\Z", trail): trail = "%s/" % trail trail = trail.replace(".xsph.ru.xsph.ru", ".xsph.ru") retval[trail] = (parts[0], __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for line in content.split('\n'): line = line.strip() if not line or line.startswith('#'): continue if '://' in line: line = re.search(r"://(.*)", line).group(1) line = line.rstrip('/') if '/' in line: retval[line] = (__info__, __reference__) elif re.search(r"\A\d+\.\d+\.\d+\.\d+\Z", line): retval[line] = ("potential malware site", __reference__) else: retval[line] = (__info__, __reference__) return retval
def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for line in content.split('\n'): line = line.strip() if not line or line.startswith('#') or ';' not in line or "packetstormsecurity" in line: continue items = line.split(';') if re.search(r"\d+\.\d+\.\d+\.\d+", items[0]): continue info = __info__ for _ in ('aaeh', 'andromeda', 'angler', 'anunak', 'arid viper', 'armageddon', 'asprox', 'babar', 'bandachor', 'bedep', 'black vine', 'buhtrap', 'camerashy', 'carbanak', 'cleaver', 'cmstar', 'cryptofortress', 'ctb-locker', 'darkhotel', 'darpapox', 'deep panda', 'desert falcons', 'destover', 'dragonok', 'dridex', 'dyre', 'el machete', 'elastic botnet', 'elf.billgates', 'equationdrug', 'escelar', 'evilgrab', 'fessleak', 'filmkan', 'flame', 'gamapos', 'gauss', 'gaza cybergang', 'grabit', 'group-3390', 'hellsing', 'kazy', 'keyraider', 'kriptovor', 'lotus blossom', 'moose', 'neutrino', 'nitlovepos', 'nuclear', 'pkybot', 'plugx', 'plugx', 'poison ivy', 'pony', 'poseidon', 'potao express', 'pushdo', 'red october', 'regin', 'retefe', 'rig', 'rocket kitten', 'rsa ir', 'sakula', 'sandworm', 'shade encryptor', 'shell crew', 'signed pos', 'skype worm', 'sofacy', 'steamstealers', 'stuxnet', 'symmi', 'teslacrypt', 'the equation', 'the masked', 'the naikon', 'torrentlocker', 'trapwot', 'triplenine', 'turla', 'volatile cedar', 'windigo', 'wintti', 'wirelurker', 'word intruder', 'xlscmd', 'zeuscart'): if re.search(r"(?i)\b%s\b" % _, items[1]): info = "%s (malware)" % _ break retval[items[0]] = (info, __reference__) return retval