def process(file, dst, type): hashes = set() with open(file, 'r') as f_in: for obj in csv.DictReader(f_in): pem = obj['PEM Info'].strip("'").replace('\r', '').replace('\n\n', '\n') try: obj['Certificate Name'] except: obj['Common Name or Certificate Name'] cert = load_certificate(FILETYPE_PEM, pem) hashes.add(gethash(cert, 'md5')) hashes.add(gethash(cert, 'sha1')) hashes.add(obj['SHA-256 Fingerprint'].lower()) warninglist = { 'name': 'Fingerprint of {type}'.format(type=type), 'version': get_version(), 'description': "Fingerprint of {type} taken from Mozilla's lists at https://wiki.mozilla.org/CA" .format(type=type), 'list': hashes, 'type': 'string', 'matching_attributes': [ "md5", "sha1", "sha256", "filename|md5", "filename|sha1", "filename|sha256", "x509-fingerprint-md5", "x509-fingerprint-sha1", "x509-fingerprint-sha256" ] } write_to_file(warninglist, dst)
def process(url, warninglist, dst): whitelist = download(url).text whitelist = list(set(whitelist.split())) warninglist['type'] = 'hostname' warninglist['matching_attributes'] = ['domain', 'hostname', 'url'] warninglist['version'] = get_version() warninglist['list'] = whitelist write_to_file(warninglist, dst)
def process(url, dst): warninglist = { 'name': 'Specialized list of {} addresses belonging to common VPN providers and datacenters'.format(dst.split('-')[1].replace('ip', 'IP')), 'version': get_version(), 'description': 'Specialized list of {} addresses belonging to common VPN providers and datacenters'.format(dst.split('-')[1].replace('ip', 'IP')), 'list': process_stream(url), 'type': 'cidr', 'matching_attributes': ["ip-src", "ip-dst", "domain|ip"] } write_to_file(warninglist, dst)
def generate(sites, warninglist, dst): warninglist['version'] = get_version() warninglist['type'] = 'string' warninglist['matching_attributes'] = [ 'hostname', 'domain', 'url', 'domain|ip'] warninglist['list'] = [] for site in sites: v = site.decode('UTF-8').split(',')[1] warninglist['list'].append(v.strip().replace('\\r\\n', '')) write_to_file(warninglist, dst)
def process(warninglist_name): description = { 'description': 'Numbers that cannot be attributed because they reserved for different purposes.', 'name': 'Unattributed phone number.', 'matching_attributes': ['phone-number', 'whois-registrant-phone'], 'type': 'regex', 'version': get_version() } warninglist = generate_french_warninglist() # The list can be extended by adding other entries: `warninglist.extend(generate_some_warninglist())` description['list'] = warninglist write_to_file(description, warninglist_name)
def process(url, dst): warninglist = { 'name': 'TLDs as known by IANA', 'version': get_version(), 'description': 'Event contains one or more TLDs as attribute with an IDS flag set', 'list': [], 'matching_attributes': ["hostname", "domain", "domain|ip"], 'type': 'string' } r = download(url) for tld in r.text.splitlines(): if tld.startswith('#'): continue warninglist['list'].append(tld) write_to_file(warninglist, dst)
def process(files, dst): warninglist = { 'type': "string", 'matching_attributes': ["hostname", "domain", "ip-dst", "ip-src", "url", "domain|ip"], 'name': "CRL Warninglist", 'version': get_version(), 'description': "CRL Warninglist from threatstop (https://github.com/threatstop/crl-ocsp-whitelist/)", 'list': [] } for file in files: with open(get_abspath_source_file(file), 'r') as f: ips = f.readlines() for ip in ips: warninglist['list'].append(ip.strip()) write_to_file(warninglist, dst)
def process(file, dst): warninglist = { 'name': 'List of known Microsoft Azure Datacenter IP Ranges', 'version': get_version(), 'description': 'Microsoft Azure Datacenter IP Ranges', 'list': [], 'matching_attributes': ["ip-src", "ip-dst", "domain|ip"], 'type': 'cidr' } with open(file, 'r') as json_file: ms_azure_ip_list = json.load(json_file) for value in ms_azure_ip_list['values']: warninglist['list'] += value['properties']['addressPrefixes'] write_to_file(warninglist, dst)
def process(files, dst): warninglist = { 'name': "List of known Cloudflare IP ranges", 'version': get_version(), 'description': "List of known Cloudflare IP ranges (https://www.cloudflare.com/ips/)", 'type': "cidr", 'list': [], 'matching_attributes': ["ip-dst", "ip-src", "domain|ip"] } for file in files: with open(file, 'r') as f: ips = f.readlines() for ip in ips: warninglist['list'].append(ip.strip()) write_to_file(warninglist, dst)
def process(files, dst): warninglist = { 'description': "Event contains one or more entries from the top 500 of the most used domains (Mozilla).", 'version': get_version(), 'name': "Top 500 domains and pages from https://moz.com/top500", 'type': 'string', 'list': [], 'matching_attributes': ['hostname', 'domain', 'uri', 'url'] } for file in files: with open(get_abspath_source_file(file)) as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') for row in csv_reader: v = row[1] warninglist['list'].append(v.rstrip().rstrip('/')) write_to_file(warninglist, dst)
def process(file, dst): with open(get_abspath_source_file(file), newline='\n', encoding='utf-8', errors='replace') as csv_file: sites = csv_file.readlines()[:10000] warninglist = { 'name': 'Top 10K websites from Majestic Million', 'version': get_version(), 'description': 'Event contains one or more entries from the top 10K of the most used websites (Majestic Million).', 'matching_attributes': ['hostname', 'domain'], 'type': 'hostname', 'list': [] } for site in sites: v = site.split(',')[2] warninglist['list'].append(v.rstrip()) write_to_file(warninglist, dst)
def process(file, dst, name: str, description: str): warninglist = { 'name': name, 'version': get_version(), 'description': description, 'matching_attributes': ["ip-src", "ip-dst", "domain|ip"], 'type': 'cidr' } with open(get_abspath_source_file(file), 'r') as json_file: ms_azure_ip_list = json.load(json_file) values = [] for value in ms_azure_ip_list['values']: values += value['properties']['addressPrefixes'] warninglist['list'] = consolidate_networks(values) write_to_file(warninglist, dst)
def process(url, dst): warninglist = { 'name': 'List of disposable email domains', 'version': get_version(), 'description': 'List of disposable email domains', 'list': process_stream(url), 'type': 'substring', 'matching_attributes': [ "email-src", "email-dst", "whois-registrant-email", "domain|ip", "dns-soa-email" ] } write_to_file(warninglist, dst)
def process(url, dst): university_list = download(url).json() warninglist = { 'type': "string", 'name': "University domains", 'matching_attributes': ['hostname', 'domain', 'url', 'domain|ip'], 'version': get_version(), 'description': "List of University domains from https://raw.githubusercontent.com/Hipo/university-domains-list/master/world_universities_and_domains.json", 'list': [] } for university in university_list: for domain in university.get('domains'): if domain not in warninglist['list']: warninglist['list'].append(domain) write_to_file(warninglist, dst)
def process(url, dst): warninglist = { 'name': 'List of known Wikimedia address ranges', 'version': get_version(), 'description': 'Wikimedia address ranges (http://noc.wikimedia.org/conf/reverse-proxy.php.txt)', 'type': 'cidr', 'list': [], 'matching_attributes': ["ip-src", "ip-dst", "domain|ip"] } matched = re.findall(r'\'(.*?)\'', codecs.decode(download(url).content, 'UTF-8')) for ip in matched: try: ipaddress.ip_network(ip) warninglist['list'].append(ip) except ValueError: pass write_to_file(warninglist, dst)
def process(file, dst): with open(get_abspath_source_file(file), 'r') as json_file: amazon_aws_ip_list = json.load(json_file) l = [] for prefix in amazon_aws_ip_list['prefixes']: l.append(prefix['ip_prefix']) for prefix in amazon_aws_ip_list['ipv6_prefixes']: l.append(prefix['ipv6_prefix']) warninglist = { 'name': 'List of known Amazon AWS IP address ranges', 'version': get_version(), 'description': 'Amazon AWS IP address ranges (https://ip-ranges.amazonaws.com/ip-ranges.json)', 'type': 'cidr', 'list': l, 'matching_attributes': ["ip-src", "ip-dst", "domain|ip"] } write_to_file(warninglist, dst)
def process(files, dst): warninglist = { 'name': "List of known Stackpath CDN IP ranges", 'version': get_version(), 'description': "List of known Stackpath (Highwinds) CDN IP ranges (https://support.stackpath.com/hc/en-us/articles/360001091666-Whitelist-CDN-WAF-IP-Blocks)", 'type': "cidr", 'list': [], 'matching_attributes': ["ip-dst", "ip-src", "domain|ip"] } for file in files: with open(get_abspath_source_file(file), 'r') as f: ips = f.readlines() for ip in ips: iptoadd = ip.strip() try: ipaddress.ip_network(ip.strip()) except ValueError as err:# if it's host given strip to the subnet iptoadd = str(ipaddress.IPv6Interface(ip.strip()).ip) warninglist['list'].append(iptoadd) write_to_file(warninglist, dst)
def process(file, dst): with zipfile.ZipFile(file, 'r') as alexa_lists: for name in alexa_lists.namelist(): if name == "top-1m.csv": with alexa_lists.open(name) as top: top1000 = top.readlines()[:1000] else: continue warninglist = { 'description': "Event contains one or more entries from the top 1000 of the most used website (Alexa).", 'version': get_version(), 'name': "Top 1000 website from Alexa", 'type': 'hostname', 'list': [], 'matching_attributes': ['hostname', 'domain', 'url', 'domain|ip'] } for site in top1000: v = site.decode('UTF-8').split(',')[1] warninglist['list'].append(v.rstrip()) write_to_file(warninglist, dst)
def walk_through(folder, errfile=sys.stdout): for elem in os.listdir(folder): if os.path.isfile(folder+"/"+elem) & check_file(folder+"/"+elem): generator.write_to_file(folder+"/"+elem) elif os.path.isdir(folder+"/"+elem): walk_through(folder+"/"+elem)
networks = set() asn_to_fetch = [] for asn in search_result["data"]["asns"]: if is_akamai(asn): asn_to_fetch.append(asn["asn"]) for prefix in search_result["data"]["ipv4_prefixes"]: if is_akamai(prefix): networks.add(prefix["prefix"]) for prefix in search_result["data"]["ipv6_prefixes"]: if is_akamai(prefix): networks.add(prefix["prefix"]) for asn in asn_to_fetch: try: networks.update(get_networks_for_asn(asn)) except Exception as e: print(str(e)) warninglist = { 'name': 'List of known Akamai IP ranges', 'version': get_version(), 'description': 'Akamai IP ranges from BGP search', 'type': 'cidr', 'list': consolidate_networks(networks), 'matching_attributes': ["ip-src", "ip-dst", "domain|ip"] } write_to_file(warninglist, "akamai")
if __name__ == '__main__': source_url = 'https://publicsuffix.org/list/public_suffix_list.dat' destination_folder = 'second-level-tlds' data = download(source_url).text lines = data.split("\n") # Filter out comments domains = [ line.strip() for line in lines if len(line) != 0 and not line.startswith('//') ] # Convert IDN domain to xn-- format domains = [domain.encode('idna').decode('utf-8') for domain in domains] # Filter out invalid domains domains = [ domain.lstrip('*.') for domain in domains if not domain.startswith('!') ] warninglist = { 'name': 'Second level TLDs as known by Mozilla Foundation', 'description': 'Event contains one or more second level TLDs as attribute with an IDS flag set.', 'matching_attributes': ['hostname', 'domain', 'domain|ip'], 'type': 'string', 'version': get_version(), 'list': domains, } write_to_file(warninglist, destination_folder)
def generate(data_list, dst, warninglist): warninglist['version'] = get_version() warninglist['list'] = data_list write_to_file(warninglist, dst)
dns = Dns(create_resolver()) spf_ranges = [] p = multiprocessing.dummy.Pool(40) for domain_ranges in p.map(lambda d: dns.get_ip_ranges_from_spf(d), domains): spf_ranges.extend(domain_ranges) warninglist = { 'name': "List of known SMTP sending IP ranges", 'version': get_version(), 'description': "List of IP ranges for known SMTP servers.", 'matching_attributes': ["ip-src", "ip-dst", "domain|ip"], 'type': 'cidr', 'list': consolidate_networks(spf_ranges), } write_to_file(warninglist, "smtp-sending-ips") mx_ips = [] for domain_ranges in p.map(lambda d: dns.get_mx_ips_for_domain(d), domains): mx_ips.extend(domain_ranges) warninglist = { 'name': "List of known SMTP receiving IP addresses", 'version': get_version(), 'description': "List of IP addresses for known SMTP servers.", 'matching_attributes': ["ip-src", "ip-dst", "domain|ip"], 'type': 'cidr', 'list': map(str, mx_ips), } write_to_file(warninglist, "smtp-receiving-ips")
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from generator import get_version, write_to_file, Dns, consolidate_networks, create_resolver if __name__ == '__main__': spf = Dns(create_resolver()) warninglist = { 'name': "List of known Gmail sending IP ranges", 'version': get_version(), 'description': "List of known Gmail sending IP ranges (https://support.google.com/a/answer/27642?hl=en)", 'matching_attributes': ["ip-src", "ip-dst", "domain|ip"], 'type': 'cidr', 'list': consolidate_networks(spf.get_ip_ranges_from_spf("gmail.com")), } write_to_file(warninglist, "google-gmail-sending-ips")
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import json from generator import download, get_version, write_to_file, consolidate_networks if __name__ == '__main__': cloud = download("https://www.gstatic.com/ipranges/cloud.json") parsed = json.loads(cloud.text) ranges = [ p["ipv4Prefix"] if "ipv4Prefix" in p else p["ipv6Prefix"] for p in parsed["prefixes"] ] warninglist = { 'name': "List of known GCP (Google Cloud Platform) IP address ranges", 'version': get_version(), 'description': "GCP (Google Cloud Platform) IP address ranges (https://www.gstatic.com/ipranges/cloud.json)", 'matching_attributes': ["ip-src", "ip-dst", "domain|ip"], 'type': 'cidr', 'list': consolidate_networks(ranges), } write_to_file(warninglist, "google-gcp")